1 | /* |
2 | * Generic process-grouping system. |
3 | * |
4 | * Based originally on the cpuset system, extracted by Paul Menage |
5 | * Copyright (C) 2006 Google, Inc |
6 | * |
7 | * Notifications support |
8 | * Copyright (C) 2009 Nokia Corporation |
9 | * Author: Kirill A. Shutemov |
10 | * |
11 | * Copyright notices from the original cpuset code: |
12 | * -------------------------------------------------- |
13 | * Copyright (C) 2003 BULL SA. |
14 | * Copyright (C) 2004-2006 Silicon Graphics, Inc. |
15 | * |
16 | * Portions derived from Patrick Mochel's sysfs code. |
17 | * sysfs is Copyright (c) 2001-3 Patrick Mochel |
18 | * |
19 | * 2003-10-10 Written by Simon Derr. |
20 | * 2003-10-22 Updates by Stephen Hemminger. |
21 | * 2004 May-July Rework by Paul Jackson. |
22 | * --------------------------------------------------- |
23 | * |
24 | * This file is subject to the terms and conditions of the GNU General Public |
25 | * License. See the file COPYING in the main directory of the Linux |
26 | * distribution for more details. |
27 | */ |
28 | |
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
30 | |
31 | #include "cgroup-internal.h" |
32 | |
33 | #include <linux/bpf-cgroup.h> |
34 | #include <linux/cred.h> |
35 | #include <linux/errno.h> |
36 | #include <linux/init_task.h> |
37 | #include <linux/kernel.h> |
38 | #include <linux/magic.h> |
39 | #include <linux/mutex.h> |
40 | #include <linux/mount.h> |
41 | #include <linux/pagemap.h> |
42 | #include <linux/proc_fs.h> |
43 | #include <linux/rcupdate.h> |
44 | #include <linux/sched.h> |
45 | #include <linux/sched/task.h> |
46 | #include <linux/slab.h> |
47 | #include <linux/spinlock.h> |
48 | #include <linux/percpu-rwsem.h> |
49 | #include <linux/string.h> |
50 | #include <linux/hashtable.h> |
51 | #include <linux/idr.h> |
52 | #include <linux/kthread.h> |
53 | #include <linux/atomic.h> |
54 | #include <linux/cpuset.h> |
55 | #include <linux/proc_ns.h> |
56 | #include <linux/nsproxy.h> |
57 | #include <linux/file.h> |
58 | #include <linux/fs_parser.h> |
59 | #include <linux/sched/cputime.h> |
60 | #include <linux/sched/deadline.h> |
61 | #include <linux/psi.h> |
62 | #include <net/sock.h> |
63 | |
64 | #define CREATE_TRACE_POINTS |
65 | #include <trace/events/cgroup.h> |
66 | |
67 | #define CGROUP_FILE_NAME_MAX (MAX_CGROUP_TYPE_NAMELEN + \ |
68 | MAX_CFTYPE_NAME + 2) |
69 | /* let's not notify more than 100 times per second */ |
70 | #define CGROUP_FILE_NOTIFY_MIN_INTV DIV_ROUND_UP(HZ, 100) |
71 | |
72 | /* |
73 | * To avoid confusing the compiler (and generating warnings) with code |
74 | * that attempts to access what would be a 0-element array (i.e. sized |
75 | * to a potentially empty array when CGROUP_SUBSYS_COUNT == 0), this |
76 | * constant expression can be added. |
77 | */ |
78 | #define CGROUP_HAS_SUBSYS_CONFIG (CGROUP_SUBSYS_COUNT > 0) |
79 | |
80 | /* |
81 | * cgroup_mutex is the master lock. Any modification to cgroup or its |
82 | * hierarchy must be performed while holding it. |
83 | * |
84 | * css_set_lock protects task->cgroups pointer, the list of css_set |
85 | * objects, and the chain of tasks off each css_set. |
86 | * |
87 | * These locks are exported if CONFIG_PROVE_RCU so that accessors in |
88 | * cgroup.h can use them for lockdep annotations. |
89 | */ |
90 | DEFINE_MUTEX(cgroup_mutex); |
91 | DEFINE_SPINLOCK(css_set_lock); |
92 | |
93 | #ifdef CONFIG_PROVE_RCU |
94 | EXPORT_SYMBOL_GPL(cgroup_mutex); |
95 | EXPORT_SYMBOL_GPL(css_set_lock); |
96 | #endif |
97 | |
98 | DEFINE_SPINLOCK(trace_cgroup_path_lock); |
99 | char trace_cgroup_path[TRACE_CGROUP_PATH_LEN]; |
100 | static bool cgroup_debug __read_mostly; |
101 | |
102 | /* |
103 | * Protects cgroup_idr and css_idr so that IDs can be released without |
104 | * grabbing cgroup_mutex. |
105 | */ |
106 | static DEFINE_SPINLOCK(cgroup_idr_lock); |
107 | |
108 | /* |
109 | * Protects cgroup_file->kn for !self csses. It synchronizes notifications |
110 | * against file removal/re-creation across css hiding. |
111 | */ |
112 | static DEFINE_SPINLOCK(cgroup_file_kn_lock); |
113 | |
114 | DEFINE_PERCPU_RWSEM(cgroup_threadgroup_rwsem); |
115 | |
116 | #define cgroup_assert_mutex_or_rcu_locked() \ |
117 | RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ |
118 | !lockdep_is_held(&cgroup_mutex), \ |
119 | "cgroup_mutex or RCU read lock required"); |
120 | |
121 | /* |
122 | * cgroup destruction makes heavy use of work items and there can be a lot |
123 | * of concurrent destructions. Use a separate workqueue so that cgroup |
124 | * destruction work items don't end up filling up max_active of system_wq |
125 | * which may lead to deadlock. |
126 | */ |
127 | static struct workqueue_struct *cgroup_destroy_wq; |
128 | |
129 | /* generate an array of cgroup subsystem pointers */ |
130 | #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys, |
131 | struct cgroup_subsys *cgroup_subsys[] = { |
132 | #include <linux/cgroup_subsys.h> |
133 | }; |
134 | #undef SUBSYS |
135 | |
136 | /* array of cgroup subsystem names */ |
137 | #define SUBSYS(_x) [_x ## _cgrp_id] = #_x, |
138 | static const char *cgroup_subsys_name[] = { |
139 | #include <linux/cgroup_subsys.h> |
140 | }; |
141 | #undef SUBSYS |
142 | |
143 | /* array of static_keys for cgroup_subsys_enabled() and cgroup_subsys_on_dfl() */ |
144 | #define SUBSYS(_x) \ |
145 | DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_enabled_key); \ |
146 | DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_on_dfl_key); \ |
147 | EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_enabled_key); \ |
148 | EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_on_dfl_key); |
149 | #include <linux/cgroup_subsys.h> |
150 | #undef SUBSYS |
151 | |
152 | #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_enabled_key, |
153 | static struct static_key_true *cgroup_subsys_enabled_key[] = { |
154 | #include <linux/cgroup_subsys.h> |
155 | }; |
156 | #undef SUBSYS |
157 | |
158 | #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_on_dfl_key, |
159 | static struct static_key_true *cgroup_subsys_on_dfl_key[] = { |
160 | #include <linux/cgroup_subsys.h> |
161 | }; |
162 | #undef SUBSYS |
163 | |
164 | static DEFINE_PER_CPU(struct cgroup_rstat_cpu, cgrp_dfl_root_rstat_cpu); |
165 | |
166 | /* the default hierarchy */ |
167 | struct cgroup_root cgrp_dfl_root = { .cgrp.rstat_cpu = &cgrp_dfl_root_rstat_cpu }; |
168 | EXPORT_SYMBOL_GPL(cgrp_dfl_root); |
169 | |
170 | /* |
171 | * The default hierarchy always exists but is hidden until mounted for the |
172 | * first time. This is for backward compatibility. |
173 | */ |
174 | static bool cgrp_dfl_visible; |
175 | |
176 | /* some controllers are not supported in the default hierarchy */ |
177 | static u16 cgrp_dfl_inhibit_ss_mask; |
178 | |
179 | /* some controllers are implicitly enabled on the default hierarchy */ |
180 | static u16 cgrp_dfl_implicit_ss_mask; |
181 | |
182 | /* some controllers can be threaded on the default hierarchy */ |
183 | static u16 cgrp_dfl_threaded_ss_mask; |
184 | |
185 | /* The list of hierarchy roots */ |
186 | LIST_HEAD(cgroup_roots); |
187 | static int cgroup_root_count; |
188 | |
189 | /* hierarchy ID allocation and mapping, protected by cgroup_mutex */ |
190 | static DEFINE_IDR(cgroup_hierarchy_idr); |
191 | |
192 | /* |
193 | * Assign a monotonically increasing serial number to csses. It guarantees |
194 | * cgroups with bigger numbers are newer than those with smaller numbers. |
195 | * Also, as csses are always appended to the parent's ->children list, it |
196 | * guarantees that sibling csses are always sorted in the ascending serial |
197 | * number order on the list. Protected by cgroup_mutex. |
198 | */ |
199 | static u64 css_serial_nr_next = 1; |
200 | |
201 | /* |
202 | * These bitmasks identify subsystems with specific features to avoid |
203 | * having to do iterative checks repeatedly. |
204 | */ |
205 | static u16 have_fork_callback __read_mostly; |
206 | static u16 have_exit_callback __read_mostly; |
207 | static u16 have_release_callback __read_mostly; |
208 | static u16 have_canfork_callback __read_mostly; |
209 | |
210 | static bool have_favordynmods __ro_after_init = IS_ENABLED(CONFIG_CGROUP_FAVOR_DYNMODS); |
211 | |
212 | /* cgroup namespace for init task */ |
213 | struct cgroup_namespace init_cgroup_ns = { |
214 | .ns.count = REFCOUNT_INIT(2), |
215 | .user_ns = &init_user_ns, |
216 | .ns.ops = &cgroupns_operations, |
217 | .ns.inum = PROC_CGROUP_INIT_INO, |
218 | .root_cset = &init_css_set, |
219 | }; |
220 | |
221 | static struct file_system_type cgroup2_fs_type; |
222 | static struct cftype cgroup_base_files[]; |
223 | static struct cftype cgroup_psi_files[]; |
224 | |
225 | /* cgroup optional features */ |
226 | enum cgroup_opt_features { |
227 | #ifdef CONFIG_PSI |
228 | OPT_FEATURE_PRESSURE, |
229 | #endif |
230 | OPT_FEATURE_COUNT |
231 | }; |
232 | |
233 | static const char *cgroup_opt_feature_names[OPT_FEATURE_COUNT] = { |
234 | #ifdef CONFIG_PSI |
235 | "pressure" , |
236 | #endif |
237 | }; |
238 | |
239 | static u16 cgroup_feature_disable_mask __read_mostly; |
240 | |
241 | static int cgroup_apply_control(struct cgroup *cgrp); |
242 | static void cgroup_finalize_control(struct cgroup *cgrp, int ret); |
243 | static void css_task_iter_skip(struct css_task_iter *it, |
244 | struct task_struct *task); |
245 | static int cgroup_destroy_locked(struct cgroup *cgrp); |
246 | static struct cgroup_subsys_state *css_create(struct cgroup *cgrp, |
247 | struct cgroup_subsys *ss); |
248 | static void css_release(struct percpu_ref *ref); |
249 | static void kill_css(struct cgroup_subsys_state *css); |
250 | static int cgroup_addrm_files(struct cgroup_subsys_state *css, |
251 | struct cgroup *cgrp, struct cftype cfts[], |
252 | bool is_add); |
253 | |
254 | #ifdef CONFIG_DEBUG_CGROUP_REF |
255 | #define CGROUP_REF_FN_ATTRS noinline |
256 | #define CGROUP_REF_EXPORT(fn) EXPORT_SYMBOL_GPL(fn); |
257 | #include <linux/cgroup_refcnt.h> |
258 | #endif |
259 | |
260 | /** |
261 | * cgroup_ssid_enabled - cgroup subsys enabled test by subsys ID |
262 | * @ssid: subsys ID of interest |
263 | * |
264 | * cgroup_subsys_enabled() can only be used with literal subsys names which |
265 | * is fine for individual subsystems but unsuitable for cgroup core. This |
266 | * is slower static_key_enabled() based test indexed by @ssid. |
267 | */ |
268 | bool cgroup_ssid_enabled(int ssid) |
269 | { |
270 | if (!CGROUP_HAS_SUBSYS_CONFIG) |
271 | return false; |
272 | |
273 | return static_key_enabled(cgroup_subsys_enabled_key[ssid]); |
274 | } |
275 | |
276 | /** |
277 | * cgroup_on_dfl - test whether a cgroup is on the default hierarchy |
278 | * @cgrp: the cgroup of interest |
279 | * |
280 | * The default hierarchy is the v2 interface of cgroup and this function |
281 | * can be used to test whether a cgroup is on the default hierarchy for |
282 | * cases where a subsystem should behave differently depending on the |
283 | * interface version. |
284 | * |
285 | * List of changed behaviors: |
286 | * |
287 | * - Mount options "noprefix", "xattr", "clone_children", "release_agent" |
288 | * and "name" are disallowed. |
289 | * |
290 | * - When mounting an existing superblock, mount options should match. |
291 | * |
292 | * - rename(2) is disallowed. |
293 | * |
294 | * - "tasks" is removed. Everything should be at process granularity. Use |
295 | * "cgroup.procs" instead. |
296 | * |
297 | * - "cgroup.procs" is not sorted. pids will be unique unless they got |
298 | * recycled in-between reads. |
299 | * |
300 | * - "release_agent" and "notify_on_release" are removed. Replacement |
301 | * notification mechanism will be implemented. |
302 | * |
303 | * - "cgroup.clone_children" is removed. |
304 | * |
305 | * - "cgroup.subtree_populated" is available. Its value is 0 if the cgroup |
306 | * and its descendants contain no task; otherwise, 1. The file also |
307 | * generates kernfs notification which can be monitored through poll and |
308 | * [di]notify when the value of the file changes. |
309 | * |
310 | * - cpuset: tasks will be kept in empty cpusets when hotplug happens and |
311 | * take masks of ancestors with non-empty cpus/mems, instead of being |
312 | * moved to an ancestor. |
313 | * |
314 | * - cpuset: a task can be moved into an empty cpuset, and again it takes |
315 | * masks of ancestors. |
316 | * |
317 | * - blkcg: blk-throttle becomes properly hierarchical. |
318 | */ |
319 | bool cgroup_on_dfl(const struct cgroup *cgrp) |
320 | { |
321 | return cgrp->root == &cgrp_dfl_root; |
322 | } |
323 | |
324 | /* IDR wrappers which synchronize using cgroup_idr_lock */ |
325 | static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end, |
326 | gfp_t gfp_mask) |
327 | { |
328 | int ret; |
329 | |
330 | idr_preload(gfp_mask); |
331 | spin_lock_bh(lock: &cgroup_idr_lock); |
332 | ret = idr_alloc(idr, ptr, start, end, gfp_mask & ~__GFP_DIRECT_RECLAIM); |
333 | spin_unlock_bh(lock: &cgroup_idr_lock); |
334 | idr_preload_end(); |
335 | return ret; |
336 | } |
337 | |
338 | static void *cgroup_idr_replace(struct idr *idr, void *ptr, int id) |
339 | { |
340 | void *ret; |
341 | |
342 | spin_lock_bh(lock: &cgroup_idr_lock); |
343 | ret = idr_replace(idr, ptr, id); |
344 | spin_unlock_bh(lock: &cgroup_idr_lock); |
345 | return ret; |
346 | } |
347 | |
348 | static void cgroup_idr_remove(struct idr *idr, int id) |
349 | { |
350 | spin_lock_bh(lock: &cgroup_idr_lock); |
351 | idr_remove(idr, id); |
352 | spin_unlock_bh(lock: &cgroup_idr_lock); |
353 | } |
354 | |
355 | static bool cgroup_has_tasks(struct cgroup *cgrp) |
356 | { |
357 | return cgrp->nr_populated_csets; |
358 | } |
359 | |
360 | static bool cgroup_is_threaded(struct cgroup *cgrp) |
361 | { |
362 | return cgrp->dom_cgrp != cgrp; |
363 | } |
364 | |
365 | /* can @cgrp host both domain and threaded children? */ |
366 | static bool cgroup_is_mixable(struct cgroup *cgrp) |
367 | { |
368 | /* |
369 | * Root isn't under domain level resource control exempting it from |
370 | * the no-internal-process constraint, so it can serve as a thread |
371 | * root and a parent of resource domains at the same time. |
372 | */ |
373 | return !cgroup_parent(cgrp); |
374 | } |
375 | |
376 | /* can @cgrp become a thread root? Should always be true for a thread root */ |
377 | static bool cgroup_can_be_thread_root(struct cgroup *cgrp) |
378 | { |
379 | /* mixables don't care */ |
380 | if (cgroup_is_mixable(cgrp)) |
381 | return true; |
382 | |
383 | /* domain roots can't be nested under threaded */ |
384 | if (cgroup_is_threaded(cgrp)) |
385 | return false; |
386 | |
387 | /* can only have either domain or threaded children */ |
388 | if (cgrp->nr_populated_domain_children) |
389 | return false; |
390 | |
391 | /* and no domain controllers can be enabled */ |
392 | if (cgrp->subtree_control & ~cgrp_dfl_threaded_ss_mask) |
393 | return false; |
394 | |
395 | return true; |
396 | } |
397 | |
398 | /* is @cgrp root of a threaded subtree? */ |
399 | static bool cgroup_is_thread_root(struct cgroup *cgrp) |
400 | { |
401 | /* thread root should be a domain */ |
402 | if (cgroup_is_threaded(cgrp)) |
403 | return false; |
404 | |
405 | /* a domain w/ threaded children is a thread root */ |
406 | if (cgrp->nr_threaded_children) |
407 | return true; |
408 | |
409 | /* |
410 | * A domain which has tasks and explicit threaded controllers |
411 | * enabled is a thread root. |
412 | */ |
413 | if (cgroup_has_tasks(cgrp) && |
414 | (cgrp->subtree_control & cgrp_dfl_threaded_ss_mask)) |
415 | return true; |
416 | |
417 | return false; |
418 | } |
419 | |
420 | /* a domain which isn't connected to the root w/o brekage can't be used */ |
421 | static bool cgroup_is_valid_domain(struct cgroup *cgrp) |
422 | { |
423 | /* the cgroup itself can be a thread root */ |
424 | if (cgroup_is_threaded(cgrp)) |
425 | return false; |
426 | |
427 | /* but the ancestors can't be unless mixable */ |
428 | while ((cgrp = cgroup_parent(cgrp))) { |
429 | if (!cgroup_is_mixable(cgrp) && cgroup_is_thread_root(cgrp)) |
430 | return false; |
431 | if (cgroup_is_threaded(cgrp)) |
432 | return false; |
433 | } |
434 | |
435 | return true; |
436 | } |
437 | |
438 | /* subsystems visibly enabled on a cgroup */ |
439 | static u16 cgroup_control(struct cgroup *cgrp) |
440 | { |
441 | struct cgroup *parent = cgroup_parent(cgrp); |
442 | u16 root_ss_mask = cgrp->root->subsys_mask; |
443 | |
444 | if (parent) { |
445 | u16 ss_mask = parent->subtree_control; |
446 | |
447 | /* threaded cgroups can only have threaded controllers */ |
448 | if (cgroup_is_threaded(cgrp)) |
449 | ss_mask &= cgrp_dfl_threaded_ss_mask; |
450 | return ss_mask; |
451 | } |
452 | |
453 | if (cgroup_on_dfl(cgrp)) |
454 | root_ss_mask &= ~(cgrp_dfl_inhibit_ss_mask | |
455 | cgrp_dfl_implicit_ss_mask); |
456 | return root_ss_mask; |
457 | } |
458 | |
459 | /* subsystems enabled on a cgroup */ |
460 | static u16 cgroup_ss_mask(struct cgroup *cgrp) |
461 | { |
462 | struct cgroup *parent = cgroup_parent(cgrp); |
463 | |
464 | if (parent) { |
465 | u16 ss_mask = parent->subtree_ss_mask; |
466 | |
467 | /* threaded cgroups can only have threaded controllers */ |
468 | if (cgroup_is_threaded(cgrp)) |
469 | ss_mask &= cgrp_dfl_threaded_ss_mask; |
470 | return ss_mask; |
471 | } |
472 | |
473 | return cgrp->root->subsys_mask; |
474 | } |
475 | |
476 | /** |
477 | * cgroup_css - obtain a cgroup's css for the specified subsystem |
478 | * @cgrp: the cgroup of interest |
479 | * @ss: the subsystem of interest (%NULL returns @cgrp->self) |
480 | * |
481 | * Return @cgrp's css (cgroup_subsys_state) associated with @ss. This |
482 | * function must be called either under cgroup_mutex or rcu_read_lock() and |
483 | * the caller is responsible for pinning the returned css if it wants to |
484 | * keep accessing it outside the said locks. This function may return |
485 | * %NULL if @cgrp doesn't have @subsys_id enabled. |
486 | */ |
487 | static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp, |
488 | struct cgroup_subsys *ss) |
489 | { |
490 | if (CGROUP_HAS_SUBSYS_CONFIG && ss) |
491 | return rcu_dereference_check(cgrp->subsys[ss->id], |
492 | lockdep_is_held(&cgroup_mutex)); |
493 | else |
494 | return &cgrp->self; |
495 | } |
496 | |
497 | /** |
498 | * cgroup_e_css_by_mask - obtain a cgroup's effective css for the specified ss |
499 | * @cgrp: the cgroup of interest |
500 | * @ss: the subsystem of interest (%NULL returns @cgrp->self) |
501 | * |
502 | * Similar to cgroup_css() but returns the effective css, which is defined |
503 | * as the matching css of the nearest ancestor including self which has @ss |
504 | * enabled. If @ss is associated with the hierarchy @cgrp is on, this |
505 | * function is guaranteed to return non-NULL css. |
506 | */ |
507 | static struct cgroup_subsys_state *cgroup_e_css_by_mask(struct cgroup *cgrp, |
508 | struct cgroup_subsys *ss) |
509 | { |
510 | lockdep_assert_held(&cgroup_mutex); |
511 | |
512 | if (!ss) |
513 | return &cgrp->self; |
514 | |
515 | /* |
516 | * This function is used while updating css associations and thus |
517 | * can't test the csses directly. Test ss_mask. |
518 | */ |
519 | while (!(cgroup_ss_mask(cgrp) & (1 << ss->id))) { |
520 | cgrp = cgroup_parent(cgrp); |
521 | if (!cgrp) |
522 | return NULL; |
523 | } |
524 | |
525 | return cgroup_css(cgrp, ss); |
526 | } |
527 | |
528 | /** |
529 | * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem |
530 | * @cgrp: the cgroup of interest |
531 | * @ss: the subsystem of interest |
532 | * |
533 | * Find and get the effective css of @cgrp for @ss. The effective css is |
534 | * defined as the matching css of the nearest ancestor including self which |
535 | * has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on, |
536 | * the root css is returned, so this function always returns a valid css. |
537 | * |
538 | * The returned css is not guaranteed to be online, and therefore it is the |
539 | * callers responsibility to try get a reference for it. |
540 | */ |
541 | struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp, |
542 | struct cgroup_subsys *ss) |
543 | { |
544 | struct cgroup_subsys_state *css; |
545 | |
546 | if (!CGROUP_HAS_SUBSYS_CONFIG) |
547 | return NULL; |
548 | |
549 | do { |
550 | css = cgroup_css(cgrp, ss); |
551 | |
552 | if (css) |
553 | return css; |
554 | cgrp = cgroup_parent(cgrp); |
555 | } while (cgrp); |
556 | |
557 | return init_css_set.subsys[ss->id]; |
558 | } |
559 | |
560 | /** |
561 | * cgroup_get_e_css - get a cgroup's effective css for the specified subsystem |
562 | * @cgrp: the cgroup of interest |
563 | * @ss: the subsystem of interest |
564 | * |
565 | * Find and get the effective css of @cgrp for @ss. The effective css is |
566 | * defined as the matching css of the nearest ancestor including self which |
567 | * has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on, |
568 | * the root css is returned, so this function always returns a valid css. |
569 | * The returned css must be put using css_put(). |
570 | */ |
571 | struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgrp, |
572 | struct cgroup_subsys *ss) |
573 | { |
574 | struct cgroup_subsys_state *css; |
575 | |
576 | if (!CGROUP_HAS_SUBSYS_CONFIG) |
577 | return NULL; |
578 | |
579 | rcu_read_lock(); |
580 | |
581 | do { |
582 | css = cgroup_css(cgrp, ss); |
583 | |
584 | if (css && css_tryget_online(css)) |
585 | goto out_unlock; |
586 | cgrp = cgroup_parent(cgrp); |
587 | } while (cgrp); |
588 | |
589 | css = init_css_set.subsys[ss->id]; |
590 | css_get(css); |
591 | out_unlock: |
592 | rcu_read_unlock(); |
593 | return css; |
594 | } |
595 | EXPORT_SYMBOL_GPL(cgroup_get_e_css); |
596 | |
597 | static void cgroup_get_live(struct cgroup *cgrp) |
598 | { |
599 | WARN_ON_ONCE(cgroup_is_dead(cgrp)); |
600 | cgroup_get(cgrp); |
601 | } |
602 | |
603 | /** |
604 | * __cgroup_task_count - count the number of tasks in a cgroup. The caller |
605 | * is responsible for taking the css_set_lock. |
606 | * @cgrp: the cgroup in question |
607 | */ |
608 | int __cgroup_task_count(const struct cgroup *cgrp) |
609 | { |
610 | int count = 0; |
611 | struct cgrp_cset_link *link; |
612 | |
613 | lockdep_assert_held(&css_set_lock); |
614 | |
615 | list_for_each_entry(link, &cgrp->cset_links, cset_link) |
616 | count += link->cset->nr_tasks; |
617 | |
618 | return count; |
619 | } |
620 | |
621 | /** |
622 | * cgroup_task_count - count the number of tasks in a cgroup. |
623 | * @cgrp: the cgroup in question |
624 | */ |
625 | int cgroup_task_count(const struct cgroup *cgrp) |
626 | { |
627 | int count; |
628 | |
629 | spin_lock_irq(lock: &css_set_lock); |
630 | count = __cgroup_task_count(cgrp); |
631 | spin_unlock_irq(lock: &css_set_lock); |
632 | |
633 | return count; |
634 | } |
635 | |
636 | struct cgroup_subsys_state *of_css(struct kernfs_open_file *of) |
637 | { |
638 | struct cgroup *cgrp = of->kn->parent->priv; |
639 | struct cftype *cft = of_cft(of); |
640 | |
641 | /* |
642 | * This is open and unprotected implementation of cgroup_css(). |
643 | * seq_css() is only called from a kernfs file operation which has |
644 | * an active reference on the file. Because all the subsystem |
645 | * files are drained before a css is disassociated with a cgroup, |
646 | * the matching css from the cgroup's subsys table is guaranteed to |
647 | * be and stay valid until the enclosing operation is complete. |
648 | */ |
649 | if (CGROUP_HAS_SUBSYS_CONFIG && cft->ss) |
650 | return rcu_dereference_raw(cgrp->subsys[cft->ss->id]); |
651 | else |
652 | return &cgrp->self; |
653 | } |
654 | EXPORT_SYMBOL_GPL(of_css); |
655 | |
656 | /** |
657 | * for_each_css - iterate all css's of a cgroup |
658 | * @css: the iteration cursor |
659 | * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end |
660 | * @cgrp: the target cgroup to iterate css's of |
661 | * |
662 | * Should be called under cgroup_mutex. |
663 | */ |
664 | #define for_each_css(css, ssid, cgrp) \ |
665 | for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \ |
666 | if (!((css) = rcu_dereference_check( \ |
667 | (cgrp)->subsys[(ssid)], \ |
668 | lockdep_is_held(&cgroup_mutex)))) { } \ |
669 | else |
670 | |
671 | /** |
672 | * do_each_subsys_mask - filter for_each_subsys with a bitmask |
673 | * @ss: the iteration cursor |
674 | * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end |
675 | * @ss_mask: the bitmask |
676 | * |
677 | * The block will only run for cases where the ssid-th bit (1 << ssid) of |
678 | * @ss_mask is set. |
679 | */ |
680 | #define do_each_subsys_mask(ss, ssid, ss_mask) do { \ |
681 | unsigned long __ss_mask = (ss_mask); \ |
682 | if (!CGROUP_HAS_SUBSYS_CONFIG) { \ |
683 | (ssid) = 0; \ |
684 | break; \ |
685 | } \ |
686 | for_each_set_bit(ssid, &__ss_mask, CGROUP_SUBSYS_COUNT) { \ |
687 | (ss) = cgroup_subsys[ssid]; \ |
688 | { |
689 | |
690 | #define while_each_subsys_mask() \ |
691 | } \ |
692 | } \ |
693 | } while (false) |
694 | |
695 | /* iterate over child cgrps, lock should be held throughout iteration */ |
696 | #define cgroup_for_each_live_child(child, cgrp) \ |
697 | list_for_each_entry((child), &(cgrp)->self.children, self.sibling) \ |
698 | if (({ lockdep_assert_held(&cgroup_mutex); \ |
699 | cgroup_is_dead(child); })) \ |
700 | ; \ |
701 | else |
702 | |
703 | /* walk live descendants in pre order */ |
704 | #define cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) \ |
705 | css_for_each_descendant_pre((d_css), cgroup_css((cgrp), NULL)) \ |
706 | if (({ lockdep_assert_held(&cgroup_mutex); \ |
707 | (dsct) = (d_css)->cgroup; \ |
708 | cgroup_is_dead(dsct); })) \ |
709 | ; \ |
710 | else |
711 | |
712 | /* walk live descendants in postorder */ |
713 | #define cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) \ |
714 | css_for_each_descendant_post((d_css), cgroup_css((cgrp), NULL)) \ |
715 | if (({ lockdep_assert_held(&cgroup_mutex); \ |
716 | (dsct) = (d_css)->cgroup; \ |
717 | cgroup_is_dead(dsct); })) \ |
718 | ; \ |
719 | else |
720 | |
721 | /* |
722 | * The default css_set - used by init and its children prior to any |
723 | * hierarchies being mounted. It contains a pointer to the root state |
724 | * for each subsystem. Also used to anchor the list of css_sets. Not |
725 | * reference-counted, to improve performance when child cgroups |
726 | * haven't been created. |
727 | */ |
728 | struct css_set init_css_set = { |
729 | .refcount = REFCOUNT_INIT(1), |
730 | .dom_cset = &init_css_set, |
731 | .tasks = LIST_HEAD_INIT(init_css_set.tasks), |
732 | .mg_tasks = LIST_HEAD_INIT(init_css_set.mg_tasks), |
733 | .dying_tasks = LIST_HEAD_INIT(init_css_set.dying_tasks), |
734 | .task_iters = LIST_HEAD_INIT(init_css_set.task_iters), |
735 | .threaded_csets = LIST_HEAD_INIT(init_css_set.threaded_csets), |
736 | .cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links), |
737 | .mg_src_preload_node = LIST_HEAD_INIT(init_css_set.mg_src_preload_node), |
738 | .mg_dst_preload_node = LIST_HEAD_INIT(init_css_set.mg_dst_preload_node), |
739 | .mg_node = LIST_HEAD_INIT(init_css_set.mg_node), |
740 | |
741 | /* |
742 | * The following field is re-initialized when this cset gets linked |
743 | * in cgroup_init(). However, let's initialize the field |
744 | * statically too so that the default cgroup can be accessed safely |
745 | * early during boot. |
746 | */ |
747 | .dfl_cgrp = &cgrp_dfl_root.cgrp, |
748 | }; |
749 | |
750 | static int css_set_count = 1; /* 1 for init_css_set */ |
751 | |
752 | static bool css_set_threaded(struct css_set *cset) |
753 | { |
754 | return cset->dom_cset != cset; |
755 | } |
756 | |
757 | /** |
758 | * css_set_populated - does a css_set contain any tasks? |
759 | * @cset: target css_set |
760 | * |
761 | * css_set_populated() should be the same as !!cset->nr_tasks at steady |
762 | * state. However, css_set_populated() can be called while a task is being |
763 | * added to or removed from the linked list before the nr_tasks is |
764 | * properly updated. Hence, we can't just look at ->nr_tasks here. |
765 | */ |
766 | static bool css_set_populated(struct css_set *cset) |
767 | { |
768 | lockdep_assert_held(&css_set_lock); |
769 | |
770 | return !list_empty(head: &cset->tasks) || !list_empty(head: &cset->mg_tasks); |
771 | } |
772 | |
773 | /** |
774 | * cgroup_update_populated - update the populated count of a cgroup |
775 | * @cgrp: the target cgroup |
776 | * @populated: inc or dec populated count |
777 | * |
778 | * One of the css_sets associated with @cgrp is either getting its first |
779 | * task or losing the last. Update @cgrp->nr_populated_* accordingly. The |
780 | * count is propagated towards root so that a given cgroup's |
781 | * nr_populated_children is zero iff none of its descendants contain any |
782 | * tasks. |
783 | * |
784 | * @cgrp's interface file "cgroup.populated" is zero if both |
785 | * @cgrp->nr_populated_csets and @cgrp->nr_populated_children are zero and |
786 | * 1 otherwise. When the sum changes from or to zero, userland is notified |
787 | * that the content of the interface file has changed. This can be used to |
788 | * detect when @cgrp and its descendants become populated or empty. |
789 | */ |
790 | static void cgroup_update_populated(struct cgroup *cgrp, bool populated) |
791 | { |
792 | struct cgroup *child = NULL; |
793 | int adj = populated ? 1 : -1; |
794 | |
795 | lockdep_assert_held(&css_set_lock); |
796 | |
797 | do { |
798 | bool was_populated = cgroup_is_populated(cgrp); |
799 | |
800 | if (!child) { |
801 | cgrp->nr_populated_csets += adj; |
802 | } else { |
803 | if (cgroup_is_threaded(cgrp: child)) |
804 | cgrp->nr_populated_threaded_children += adj; |
805 | else |
806 | cgrp->nr_populated_domain_children += adj; |
807 | } |
808 | |
809 | if (was_populated == cgroup_is_populated(cgrp)) |
810 | break; |
811 | |
812 | cgroup1_check_for_release(cgrp); |
813 | TRACE_CGROUP_PATH(notify_populated, cgrp, |
814 | cgroup_is_populated(cgrp)); |
815 | cgroup_file_notify(cfile: &cgrp->events_file); |
816 | |
817 | child = cgrp; |
818 | cgrp = cgroup_parent(cgrp); |
819 | } while (cgrp); |
820 | } |
821 | |
822 | /** |
823 | * css_set_update_populated - update populated state of a css_set |
824 | * @cset: target css_set |
825 | * @populated: whether @cset is populated or depopulated |
826 | * |
827 | * @cset is either getting the first task or losing the last. Update the |
828 | * populated counters of all associated cgroups accordingly. |
829 | */ |
830 | static void css_set_update_populated(struct css_set *cset, bool populated) |
831 | { |
832 | struct cgrp_cset_link *link; |
833 | |
834 | lockdep_assert_held(&css_set_lock); |
835 | |
836 | list_for_each_entry(link, &cset->cgrp_links, cgrp_link) |
837 | cgroup_update_populated(cgrp: link->cgrp, populated); |
838 | } |
839 | |
840 | /* |
841 | * @task is leaving, advance task iterators which are pointing to it so |
842 | * that they can resume at the next position. Advancing an iterator might |
843 | * remove it from the list, use safe walk. See css_task_iter_skip() for |
844 | * details. |
845 | */ |
846 | static void css_set_skip_task_iters(struct css_set *cset, |
847 | struct task_struct *task) |
848 | { |
849 | struct css_task_iter *it, *pos; |
850 | |
851 | list_for_each_entry_safe(it, pos, &cset->task_iters, iters_node) |
852 | css_task_iter_skip(it, task); |
853 | } |
854 | |
855 | /** |
856 | * css_set_move_task - move a task from one css_set to another |
857 | * @task: task being moved |
858 | * @from_cset: css_set @task currently belongs to (may be NULL) |
859 | * @to_cset: new css_set @task is being moved to (may be NULL) |
860 | * @use_mg_tasks: move to @to_cset->mg_tasks instead of ->tasks |
861 | * |
862 | * Move @task from @from_cset to @to_cset. If @task didn't belong to any |
863 | * css_set, @from_cset can be NULL. If @task is being disassociated |
864 | * instead of moved, @to_cset can be NULL. |
865 | * |
866 | * This function automatically handles populated counter updates and |
867 | * css_task_iter adjustments but the caller is responsible for managing |
868 | * @from_cset and @to_cset's reference counts. |
869 | */ |
870 | static void css_set_move_task(struct task_struct *task, |
871 | struct css_set *from_cset, struct css_set *to_cset, |
872 | bool use_mg_tasks) |
873 | { |
874 | lockdep_assert_held(&css_set_lock); |
875 | |
876 | if (to_cset && !css_set_populated(cset: to_cset)) |
877 | css_set_update_populated(cset: to_cset, populated: true); |
878 | |
879 | if (from_cset) { |
880 | WARN_ON_ONCE(list_empty(&task->cg_list)); |
881 | |
882 | css_set_skip_task_iters(cset: from_cset, task); |
883 | list_del_init(entry: &task->cg_list); |
884 | if (!css_set_populated(cset: from_cset)) |
885 | css_set_update_populated(cset: from_cset, populated: false); |
886 | } else { |
887 | WARN_ON_ONCE(!list_empty(&task->cg_list)); |
888 | } |
889 | |
890 | if (to_cset) { |
891 | /* |
892 | * We are synchronized through cgroup_threadgroup_rwsem |
893 | * against PF_EXITING setting such that we can't race |
894 | * against cgroup_exit()/cgroup_free() dropping the css_set. |
895 | */ |
896 | WARN_ON_ONCE(task->flags & PF_EXITING); |
897 | |
898 | cgroup_move_task(p: task, to: to_cset); |
899 | list_add_tail(new: &task->cg_list, head: use_mg_tasks ? &to_cset->mg_tasks : |
900 | &to_cset->tasks); |
901 | } |
902 | } |
903 | |
904 | /* |
905 | * hash table for cgroup groups. This improves the performance to find |
906 | * an existing css_set. This hash doesn't (currently) take into |
907 | * account cgroups in empty hierarchies. |
908 | */ |
909 | #define CSS_SET_HASH_BITS 7 |
910 | static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS); |
911 | |
912 | static unsigned long css_set_hash(struct cgroup_subsys_state **css) |
913 | { |
914 | unsigned long key = 0UL; |
915 | struct cgroup_subsys *ss; |
916 | int i; |
917 | |
918 | for_each_subsys(ss, i) |
919 | key += (unsigned long)css[i]; |
920 | key = (key >> 16) ^ key; |
921 | |
922 | return key; |
923 | } |
924 | |
925 | void put_css_set_locked(struct css_set *cset) |
926 | { |
927 | struct cgrp_cset_link *link, *tmp_link; |
928 | struct cgroup_subsys *ss; |
929 | int ssid; |
930 | |
931 | lockdep_assert_held(&css_set_lock); |
932 | |
933 | if (!refcount_dec_and_test(r: &cset->refcount)) |
934 | return; |
935 | |
936 | WARN_ON_ONCE(!list_empty(&cset->threaded_csets)); |
937 | |
938 | /* This css_set is dead. Unlink it and release cgroup and css refs */ |
939 | for_each_subsys(ss, ssid) { |
940 | list_del(entry: &cset->e_cset_node[ssid]); |
941 | css_put(cset->subsys[ssid]); |
942 | } |
943 | hash_del(node: &cset->hlist); |
944 | css_set_count--; |
945 | |
946 | list_for_each_entry_safe(link, tmp_link, &cset->cgrp_links, cgrp_link) { |
947 | list_del(entry: &link->cset_link); |
948 | list_del(entry: &link->cgrp_link); |
949 | if (cgroup_parent(cgrp: link->cgrp)) |
950 | cgroup_put(cgrp: link->cgrp); |
951 | kfree(objp: link); |
952 | } |
953 | |
954 | if (css_set_threaded(cset)) { |
955 | list_del(entry: &cset->threaded_csets_node); |
956 | put_css_set_locked(cset: cset->dom_cset); |
957 | } |
958 | |
959 | kfree_rcu(cset, rcu_head); |
960 | } |
961 | |
962 | /** |
963 | * compare_css_sets - helper function for find_existing_css_set(). |
964 | * @cset: candidate css_set being tested |
965 | * @old_cset: existing css_set for a task |
966 | * @new_cgrp: cgroup that's being entered by the task |
967 | * @template: desired set of css pointers in css_set (pre-calculated) |
968 | * |
969 | * Returns true if "cset" matches "old_cset" except for the hierarchy |
970 | * which "new_cgrp" belongs to, for which it should match "new_cgrp". |
971 | */ |
972 | static bool compare_css_sets(struct css_set *cset, |
973 | struct css_set *old_cset, |
974 | struct cgroup *new_cgrp, |
975 | struct cgroup_subsys_state *template[]) |
976 | { |
977 | struct cgroup *new_dfl_cgrp; |
978 | struct list_head *l1, *l2; |
979 | |
980 | /* |
981 | * On the default hierarchy, there can be csets which are |
982 | * associated with the same set of cgroups but different csses. |
983 | * Let's first ensure that csses match. |
984 | */ |
985 | if (memcmp(p: template, q: cset->subsys, size: sizeof(cset->subsys))) |
986 | return false; |
987 | |
988 | |
989 | /* @cset's domain should match the default cgroup's */ |
990 | if (cgroup_on_dfl(cgrp: new_cgrp)) |
991 | new_dfl_cgrp = new_cgrp; |
992 | else |
993 | new_dfl_cgrp = old_cset->dfl_cgrp; |
994 | |
995 | if (new_dfl_cgrp->dom_cgrp != cset->dom_cset->dfl_cgrp) |
996 | return false; |
997 | |
998 | /* |
999 | * Compare cgroup pointers in order to distinguish between |
1000 | * different cgroups in hierarchies. As different cgroups may |
1001 | * share the same effective css, this comparison is always |
1002 | * necessary. |
1003 | */ |
1004 | l1 = &cset->cgrp_links; |
1005 | l2 = &old_cset->cgrp_links; |
1006 | while (1) { |
1007 | struct cgrp_cset_link *link1, *link2; |
1008 | struct cgroup *cgrp1, *cgrp2; |
1009 | |
1010 | l1 = l1->next; |
1011 | l2 = l2->next; |
1012 | /* See if we reached the end - both lists are equal length. */ |
1013 | if (l1 == &cset->cgrp_links) { |
1014 | BUG_ON(l2 != &old_cset->cgrp_links); |
1015 | break; |
1016 | } else { |
1017 | BUG_ON(l2 == &old_cset->cgrp_links); |
1018 | } |
1019 | /* Locate the cgroups associated with these links. */ |
1020 | link1 = list_entry(l1, struct cgrp_cset_link, cgrp_link); |
1021 | link2 = list_entry(l2, struct cgrp_cset_link, cgrp_link); |
1022 | cgrp1 = link1->cgrp; |
1023 | cgrp2 = link2->cgrp; |
1024 | /* Hierarchies should be linked in the same order. */ |
1025 | BUG_ON(cgrp1->root != cgrp2->root); |
1026 | |
1027 | /* |
1028 | * If this hierarchy is the hierarchy of the cgroup |
1029 | * that's changing, then we need to check that this |
1030 | * css_set points to the new cgroup; if it's any other |
1031 | * hierarchy, then this css_set should point to the |
1032 | * same cgroup as the old css_set. |
1033 | */ |
1034 | if (cgrp1->root == new_cgrp->root) { |
1035 | if (cgrp1 != new_cgrp) |
1036 | return false; |
1037 | } else { |
1038 | if (cgrp1 != cgrp2) |
1039 | return false; |
1040 | } |
1041 | } |
1042 | return true; |
1043 | } |
1044 | |
1045 | /** |
1046 | * find_existing_css_set - init css array and find the matching css_set |
1047 | * @old_cset: the css_set that we're using before the cgroup transition |
1048 | * @cgrp: the cgroup that we're moving into |
1049 | * @template: out param for the new set of csses, should be clear on entry |
1050 | */ |
1051 | static struct css_set *find_existing_css_set(struct css_set *old_cset, |
1052 | struct cgroup *cgrp, |
1053 | struct cgroup_subsys_state **template) |
1054 | { |
1055 | struct cgroup_root *root = cgrp->root; |
1056 | struct cgroup_subsys *ss; |
1057 | struct css_set *cset; |
1058 | unsigned long key; |
1059 | int i; |
1060 | |
1061 | /* |
1062 | * Build the set of subsystem state objects that we want to see in the |
1063 | * new css_set. While subsystems can change globally, the entries here |
1064 | * won't change, so no need for locking. |
1065 | */ |
1066 | for_each_subsys(ss, i) { |
1067 | if (root->subsys_mask & (1UL << i)) { |
1068 | /* |
1069 | * @ss is in this hierarchy, so we want the |
1070 | * effective css from @cgrp. |
1071 | */ |
1072 | template[i] = cgroup_e_css_by_mask(cgrp, ss); |
1073 | } else { |
1074 | /* |
1075 | * @ss is not in this hierarchy, so we don't want |
1076 | * to change the css. |
1077 | */ |
1078 | template[i] = old_cset->subsys[i]; |
1079 | } |
1080 | } |
1081 | |
1082 | key = css_set_hash(css: template); |
1083 | hash_for_each_possible(css_set_table, cset, hlist, key) { |
1084 | if (!compare_css_sets(cset, old_cset, new_cgrp: cgrp, template)) |
1085 | continue; |
1086 | |
1087 | /* This css_set matches what we need */ |
1088 | return cset; |
1089 | } |
1090 | |
1091 | /* No existing cgroup group matched */ |
1092 | return NULL; |
1093 | } |
1094 | |
1095 | static void free_cgrp_cset_links(struct list_head *links_to_free) |
1096 | { |
1097 | struct cgrp_cset_link *link, *tmp_link; |
1098 | |
1099 | list_for_each_entry_safe(link, tmp_link, links_to_free, cset_link) { |
1100 | list_del(entry: &link->cset_link); |
1101 | kfree(objp: link); |
1102 | } |
1103 | } |
1104 | |
1105 | /** |
1106 | * allocate_cgrp_cset_links - allocate cgrp_cset_links |
1107 | * @count: the number of links to allocate |
1108 | * @tmp_links: list_head the allocated links are put on |
1109 | * |
1110 | * Allocate @count cgrp_cset_link structures and chain them on @tmp_links |
1111 | * through ->cset_link. Returns 0 on success or -errno. |
1112 | */ |
1113 | static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links) |
1114 | { |
1115 | struct cgrp_cset_link *link; |
1116 | int i; |
1117 | |
1118 | INIT_LIST_HEAD(list: tmp_links); |
1119 | |
1120 | for (i = 0; i < count; i++) { |
1121 | link = kzalloc(size: sizeof(*link), GFP_KERNEL); |
1122 | if (!link) { |
1123 | free_cgrp_cset_links(links_to_free: tmp_links); |
1124 | return -ENOMEM; |
1125 | } |
1126 | list_add(new: &link->cset_link, head: tmp_links); |
1127 | } |
1128 | return 0; |
1129 | } |
1130 | |
1131 | /** |
1132 | * link_css_set - a helper function to link a css_set to a cgroup |
1133 | * @tmp_links: cgrp_cset_link objects allocated by allocate_cgrp_cset_links() |
1134 | * @cset: the css_set to be linked |
1135 | * @cgrp: the destination cgroup |
1136 | */ |
1137 | static void link_css_set(struct list_head *tmp_links, struct css_set *cset, |
1138 | struct cgroup *cgrp) |
1139 | { |
1140 | struct cgrp_cset_link *link; |
1141 | |
1142 | BUG_ON(list_empty(tmp_links)); |
1143 | |
1144 | if (cgroup_on_dfl(cgrp)) |
1145 | cset->dfl_cgrp = cgrp; |
1146 | |
1147 | link = list_first_entry(tmp_links, struct cgrp_cset_link, cset_link); |
1148 | link->cset = cset; |
1149 | link->cgrp = cgrp; |
1150 | |
1151 | /* |
1152 | * Always add links to the tail of the lists so that the lists are |
1153 | * in chronological order. |
1154 | */ |
1155 | list_move_tail(list: &link->cset_link, head: &cgrp->cset_links); |
1156 | list_add_tail(new: &link->cgrp_link, head: &cset->cgrp_links); |
1157 | |
1158 | if (cgroup_parent(cgrp)) |
1159 | cgroup_get_live(cgrp); |
1160 | } |
1161 | |
1162 | /** |
1163 | * find_css_set - return a new css_set with one cgroup updated |
1164 | * @old_cset: the baseline css_set |
1165 | * @cgrp: the cgroup to be updated |
1166 | * |
1167 | * Return a new css_set that's equivalent to @old_cset, but with @cgrp |
1168 | * substituted into the appropriate hierarchy. |
1169 | */ |
1170 | static struct css_set *find_css_set(struct css_set *old_cset, |
1171 | struct cgroup *cgrp) |
1172 | { |
1173 | struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT] = { }; |
1174 | struct css_set *cset; |
1175 | struct list_head tmp_links; |
1176 | struct cgrp_cset_link *link; |
1177 | struct cgroup_subsys *ss; |
1178 | unsigned long key; |
1179 | int ssid; |
1180 | |
1181 | lockdep_assert_held(&cgroup_mutex); |
1182 | |
1183 | /* First see if we already have a cgroup group that matches |
1184 | * the desired set */ |
1185 | spin_lock_irq(lock: &css_set_lock); |
1186 | cset = find_existing_css_set(old_cset, cgrp, template); |
1187 | if (cset) |
1188 | get_css_set(cset); |
1189 | spin_unlock_irq(lock: &css_set_lock); |
1190 | |
1191 | if (cset) |
1192 | return cset; |
1193 | |
1194 | cset = kzalloc(size: sizeof(*cset), GFP_KERNEL); |
1195 | if (!cset) |
1196 | return NULL; |
1197 | |
1198 | /* Allocate all the cgrp_cset_link objects that we'll need */ |
1199 | if (allocate_cgrp_cset_links(count: cgroup_root_count, tmp_links: &tmp_links) < 0) { |
1200 | kfree(objp: cset); |
1201 | return NULL; |
1202 | } |
1203 | |
1204 | refcount_set(r: &cset->refcount, n: 1); |
1205 | cset->dom_cset = cset; |
1206 | INIT_LIST_HEAD(list: &cset->tasks); |
1207 | INIT_LIST_HEAD(list: &cset->mg_tasks); |
1208 | INIT_LIST_HEAD(list: &cset->dying_tasks); |
1209 | INIT_LIST_HEAD(list: &cset->task_iters); |
1210 | INIT_LIST_HEAD(list: &cset->threaded_csets); |
1211 | INIT_HLIST_NODE(h: &cset->hlist); |
1212 | INIT_LIST_HEAD(list: &cset->cgrp_links); |
1213 | INIT_LIST_HEAD(list: &cset->mg_src_preload_node); |
1214 | INIT_LIST_HEAD(list: &cset->mg_dst_preload_node); |
1215 | INIT_LIST_HEAD(list: &cset->mg_node); |
1216 | |
1217 | /* Copy the set of subsystem state objects generated in |
1218 | * find_existing_css_set() */ |
1219 | memcpy(cset->subsys, template, sizeof(cset->subsys)); |
1220 | |
1221 | spin_lock_irq(lock: &css_set_lock); |
1222 | /* Add reference counts and links from the new css_set. */ |
1223 | list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) { |
1224 | struct cgroup *c = link->cgrp; |
1225 | |
1226 | if (c->root == cgrp->root) |
1227 | c = cgrp; |
1228 | link_css_set(tmp_links: &tmp_links, cset, cgrp: c); |
1229 | } |
1230 | |
1231 | BUG_ON(!list_empty(&tmp_links)); |
1232 | |
1233 | css_set_count++; |
1234 | |
1235 | /* Add @cset to the hash table */ |
1236 | key = css_set_hash(css: cset->subsys); |
1237 | hash_add(css_set_table, &cset->hlist, key); |
1238 | |
1239 | for_each_subsys(ss, ssid) { |
1240 | struct cgroup_subsys_state *css = cset->subsys[ssid]; |
1241 | |
1242 | list_add_tail(new: &cset->e_cset_node[ssid], |
1243 | head: &css->cgroup->e_csets[ssid]); |
1244 | css_get(css); |
1245 | } |
1246 | |
1247 | spin_unlock_irq(lock: &css_set_lock); |
1248 | |
1249 | /* |
1250 | * If @cset should be threaded, look up the matching dom_cset and |
1251 | * link them up. We first fully initialize @cset then look for the |
1252 | * dom_cset. It's simpler this way and safe as @cset is guaranteed |
1253 | * to stay empty until we return. |
1254 | */ |
1255 | if (cgroup_is_threaded(cgrp: cset->dfl_cgrp)) { |
1256 | struct css_set *dcset; |
1257 | |
1258 | dcset = find_css_set(old_cset: cset, cgrp: cset->dfl_cgrp->dom_cgrp); |
1259 | if (!dcset) { |
1260 | put_css_set(cset); |
1261 | return NULL; |
1262 | } |
1263 | |
1264 | spin_lock_irq(lock: &css_set_lock); |
1265 | cset->dom_cset = dcset; |
1266 | list_add_tail(new: &cset->threaded_csets_node, |
1267 | head: &dcset->threaded_csets); |
1268 | spin_unlock_irq(lock: &css_set_lock); |
1269 | } |
1270 | |
1271 | return cset; |
1272 | } |
1273 | |
1274 | struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root) |
1275 | { |
1276 | struct cgroup *root_cgrp = kernfs_root_to_node(root: kf_root)->priv; |
1277 | |
1278 | return root_cgrp->root; |
1279 | } |
1280 | |
1281 | void cgroup_favor_dynmods(struct cgroup_root *root, bool favor) |
1282 | { |
1283 | bool favoring = root->flags & CGRP_ROOT_FAVOR_DYNMODS; |
1284 | |
1285 | /* see the comment above CGRP_ROOT_FAVOR_DYNMODS definition */ |
1286 | if (favor && !favoring) { |
1287 | rcu_sync_enter(&cgroup_threadgroup_rwsem.rss); |
1288 | root->flags |= CGRP_ROOT_FAVOR_DYNMODS; |
1289 | } else if (!favor && favoring) { |
1290 | rcu_sync_exit(&cgroup_threadgroup_rwsem.rss); |
1291 | root->flags &= ~CGRP_ROOT_FAVOR_DYNMODS; |
1292 | } |
1293 | } |
1294 | |
1295 | static int cgroup_init_root_id(struct cgroup_root *root) |
1296 | { |
1297 | int id; |
1298 | |
1299 | lockdep_assert_held(&cgroup_mutex); |
1300 | |
1301 | id = idr_alloc_cyclic(&cgroup_hierarchy_idr, ptr: root, start: 0, end: 0, GFP_KERNEL); |
1302 | if (id < 0) |
1303 | return id; |
1304 | |
1305 | root->hierarchy_id = id; |
1306 | return 0; |
1307 | } |
1308 | |
1309 | static void cgroup_exit_root_id(struct cgroup_root *root) |
1310 | { |
1311 | lockdep_assert_held(&cgroup_mutex); |
1312 | |
1313 | idr_remove(&cgroup_hierarchy_idr, id: root->hierarchy_id); |
1314 | } |
1315 | |
1316 | void cgroup_free_root(struct cgroup_root *root) |
1317 | { |
1318 | kfree_rcu(root, rcu); |
1319 | } |
1320 | |
1321 | static void cgroup_destroy_root(struct cgroup_root *root) |
1322 | { |
1323 | struct cgroup *cgrp = &root->cgrp; |
1324 | struct cgrp_cset_link *link, *tmp_link; |
1325 | |
1326 | trace_cgroup_destroy_root(root); |
1327 | |
1328 | cgroup_lock_and_drain_offline(cgrp: &cgrp_dfl_root.cgrp); |
1329 | |
1330 | BUG_ON(atomic_read(&root->nr_cgrps)); |
1331 | BUG_ON(!list_empty(&cgrp->self.children)); |
1332 | |
1333 | /* Rebind all subsystems back to the default hierarchy */ |
1334 | WARN_ON(rebind_subsystems(&cgrp_dfl_root, root->subsys_mask)); |
1335 | |
1336 | /* |
1337 | * Release all the links from cset_links to this hierarchy's |
1338 | * root cgroup |
1339 | */ |
1340 | spin_lock_irq(lock: &css_set_lock); |
1341 | |
1342 | list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) { |
1343 | list_del(entry: &link->cset_link); |
1344 | list_del(entry: &link->cgrp_link); |
1345 | kfree(objp: link); |
1346 | } |
1347 | |
1348 | spin_unlock_irq(lock: &css_set_lock); |
1349 | |
1350 | WARN_ON_ONCE(list_empty(&root->root_list)); |
1351 | list_del_rcu(entry: &root->root_list); |
1352 | cgroup_root_count--; |
1353 | |
1354 | if (!have_favordynmods) |
1355 | cgroup_favor_dynmods(root, favor: false); |
1356 | |
1357 | cgroup_exit_root_id(root); |
1358 | |
1359 | cgroup_unlock(); |
1360 | |
1361 | cgroup_rstat_exit(cgrp); |
1362 | kernfs_destroy_root(root: root->kf_root); |
1363 | cgroup_free_root(root); |
1364 | } |
1365 | |
1366 | /* |
1367 | * Returned cgroup is without refcount but it's valid as long as cset pins it. |
1368 | */ |
1369 | static inline struct cgroup *__cset_cgroup_from_root(struct css_set *cset, |
1370 | struct cgroup_root *root) |
1371 | { |
1372 | struct cgroup *res_cgroup = NULL; |
1373 | |
1374 | if (cset == &init_css_set) { |
1375 | res_cgroup = &root->cgrp; |
1376 | } else if (root == &cgrp_dfl_root) { |
1377 | res_cgroup = cset->dfl_cgrp; |
1378 | } else { |
1379 | struct cgrp_cset_link *link; |
1380 | lockdep_assert_held(&css_set_lock); |
1381 | |
1382 | list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { |
1383 | struct cgroup *c = link->cgrp; |
1384 | |
1385 | if (c->root == root) { |
1386 | res_cgroup = c; |
1387 | break; |
1388 | } |
1389 | } |
1390 | } |
1391 | |
1392 | /* |
1393 | * If cgroup_mutex is not held, the cgrp_cset_link will be freed |
1394 | * before we remove the cgroup root from the root_list. Consequently, |
1395 | * when accessing a cgroup root, the cset_link may have already been |
1396 | * freed, resulting in a NULL res_cgroup. However, by holding the |
1397 | * cgroup_mutex, we ensure that res_cgroup can't be NULL. |
1398 | * If we don't hold cgroup_mutex in the caller, we must do the NULL |
1399 | * check. |
1400 | */ |
1401 | return res_cgroup; |
1402 | } |
1403 | |
1404 | /* |
1405 | * look up cgroup associated with current task's cgroup namespace on the |
1406 | * specified hierarchy |
1407 | */ |
1408 | static struct cgroup * |
1409 | current_cgns_cgroup_from_root(struct cgroup_root *root) |
1410 | { |
1411 | struct cgroup *res = NULL; |
1412 | struct css_set *cset; |
1413 | |
1414 | lockdep_assert_held(&css_set_lock); |
1415 | |
1416 | rcu_read_lock(); |
1417 | |
1418 | cset = current->nsproxy->cgroup_ns->root_cset; |
1419 | res = __cset_cgroup_from_root(cset, root); |
1420 | |
1421 | rcu_read_unlock(); |
1422 | |
1423 | /* |
1424 | * The namespace_sem is held by current, so the root cgroup can't |
1425 | * be umounted. Therefore, we can ensure that the res is non-NULL. |
1426 | */ |
1427 | WARN_ON_ONCE(!res); |
1428 | return res; |
1429 | } |
1430 | |
1431 | /* |
1432 | * Look up cgroup associated with current task's cgroup namespace on the default |
1433 | * hierarchy. |
1434 | * |
1435 | * Unlike current_cgns_cgroup_from_root(), this doesn't need locks: |
1436 | * - Internal rcu_read_lock is unnecessary because we don't dereference any rcu |
1437 | * pointers. |
1438 | * - css_set_lock is not needed because we just read cset->dfl_cgrp. |
1439 | * - As a bonus returned cgrp is pinned with the current because it cannot |
1440 | * switch cgroup_ns asynchronously. |
1441 | */ |
1442 | static struct cgroup *current_cgns_cgroup_dfl(void) |
1443 | { |
1444 | struct css_set *cset; |
1445 | |
1446 | if (current->nsproxy) { |
1447 | cset = current->nsproxy->cgroup_ns->root_cset; |
1448 | return __cset_cgroup_from_root(cset, root: &cgrp_dfl_root); |
1449 | } else { |
1450 | /* |
1451 | * NOTE: This function may be called from bpf_cgroup_from_id() |
1452 | * on a task which has already passed exit_task_namespaces() and |
1453 | * nsproxy == NULL. Fall back to cgrp_dfl_root which will make all |
1454 | * cgroups visible for lookups. |
1455 | */ |
1456 | return &cgrp_dfl_root.cgrp; |
1457 | } |
1458 | } |
1459 | |
1460 | /* look up cgroup associated with given css_set on the specified hierarchy */ |
1461 | static struct cgroup *cset_cgroup_from_root(struct css_set *cset, |
1462 | struct cgroup_root *root) |
1463 | { |
1464 | lockdep_assert_held(&css_set_lock); |
1465 | |
1466 | return __cset_cgroup_from_root(cset, root); |
1467 | } |
1468 | |
1469 | /* |
1470 | * Return the cgroup for "task" from the given hierarchy. Must be |
1471 | * called with css_set_lock held to prevent task's groups from being modified. |
1472 | * Must be called with either cgroup_mutex or rcu read lock to prevent the |
1473 | * cgroup root from being destroyed. |
1474 | */ |
1475 | struct cgroup *task_cgroup_from_root(struct task_struct *task, |
1476 | struct cgroup_root *root) |
1477 | { |
1478 | /* |
1479 | * No need to lock the task - since we hold css_set_lock the |
1480 | * task can't change groups. |
1481 | */ |
1482 | return cset_cgroup_from_root(cset: task_css_set(task), root); |
1483 | } |
1484 | |
1485 | /* |
1486 | * A task must hold cgroup_mutex to modify cgroups. |
1487 | * |
1488 | * Any task can increment and decrement the count field without lock. |
1489 | * So in general, code holding cgroup_mutex can't rely on the count |
1490 | * field not changing. However, if the count goes to zero, then only |
1491 | * cgroup_attach_task() can increment it again. Because a count of zero |
1492 | * means that no tasks are currently attached, therefore there is no |
1493 | * way a task attached to that cgroup can fork (the other way to |
1494 | * increment the count). So code holding cgroup_mutex can safely |
1495 | * assume that if the count is zero, it will stay zero. Similarly, if |
1496 | * a task holds cgroup_mutex on a cgroup with zero count, it |
1497 | * knows that the cgroup won't be removed, as cgroup_rmdir() |
1498 | * needs that mutex. |
1499 | * |
1500 | * A cgroup can only be deleted if both its 'count' of using tasks |
1501 | * is zero, and its list of 'children' cgroups is empty. Since all |
1502 | * tasks in the system use _some_ cgroup, and since there is always at |
1503 | * least one task in the system (init, pid == 1), therefore, root cgroup |
1504 | * always has either children cgroups and/or using tasks. So we don't |
1505 | * need a special hack to ensure that root cgroup cannot be deleted. |
1506 | * |
1507 | * P.S. One more locking exception. RCU is used to guard the |
1508 | * update of a tasks cgroup pointer by cgroup_attach_task() |
1509 | */ |
1510 | |
1511 | static struct kernfs_syscall_ops cgroup_kf_syscall_ops; |
1512 | |
1513 | static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft, |
1514 | char *buf) |
1515 | { |
1516 | struct cgroup_subsys *ss = cft->ss; |
1517 | |
1518 | if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) && |
1519 | !(cgrp->root->flags & CGRP_ROOT_NOPREFIX)) { |
1520 | const char *dbg = (cft->flags & CFTYPE_DEBUG) ? ".__DEBUG__." : "" ; |
1521 | |
1522 | snprintf(buf, CGROUP_FILE_NAME_MAX, fmt: "%s%s.%s" , |
1523 | dbg, cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name, |
1524 | cft->name); |
1525 | } else { |
1526 | strscpy(buf, cft->name, CGROUP_FILE_NAME_MAX); |
1527 | } |
1528 | return buf; |
1529 | } |
1530 | |
1531 | /** |
1532 | * cgroup_file_mode - deduce file mode of a control file |
1533 | * @cft: the control file in question |
1534 | * |
1535 | * S_IRUGO for read, S_IWUSR for write. |
1536 | */ |
1537 | static umode_t cgroup_file_mode(const struct cftype *cft) |
1538 | { |
1539 | umode_t mode = 0; |
1540 | |
1541 | if (cft->read_u64 || cft->read_s64 || cft->seq_show) |
1542 | mode |= S_IRUGO; |
1543 | |
1544 | if (cft->write_u64 || cft->write_s64 || cft->write) { |
1545 | if (cft->flags & CFTYPE_WORLD_WRITABLE) |
1546 | mode |= S_IWUGO; |
1547 | else |
1548 | mode |= S_IWUSR; |
1549 | } |
1550 | |
1551 | return mode; |
1552 | } |
1553 | |
1554 | /** |
1555 | * cgroup_calc_subtree_ss_mask - calculate subtree_ss_mask |
1556 | * @subtree_control: the new subtree_control mask to consider |
1557 | * @this_ss_mask: available subsystems |
1558 | * |
1559 | * On the default hierarchy, a subsystem may request other subsystems to be |
1560 | * enabled together through its ->depends_on mask. In such cases, more |
1561 | * subsystems than specified in "cgroup.subtree_control" may be enabled. |
1562 | * |
1563 | * This function calculates which subsystems need to be enabled if |
1564 | * @subtree_control is to be applied while restricted to @this_ss_mask. |
1565 | */ |
1566 | static u16 cgroup_calc_subtree_ss_mask(u16 subtree_control, u16 this_ss_mask) |
1567 | { |
1568 | u16 cur_ss_mask = subtree_control; |
1569 | struct cgroup_subsys *ss; |
1570 | int ssid; |
1571 | |
1572 | lockdep_assert_held(&cgroup_mutex); |
1573 | |
1574 | cur_ss_mask |= cgrp_dfl_implicit_ss_mask; |
1575 | |
1576 | while (true) { |
1577 | u16 new_ss_mask = cur_ss_mask; |
1578 | |
1579 | do_each_subsys_mask(ss, ssid, cur_ss_mask) { |
1580 | new_ss_mask |= ss->depends_on; |
1581 | } while_each_subsys_mask(); |
1582 | |
1583 | /* |
1584 | * Mask out subsystems which aren't available. This can |
1585 | * happen only if some depended-upon subsystems were bound |
1586 | * to non-default hierarchies. |
1587 | */ |
1588 | new_ss_mask &= this_ss_mask; |
1589 | |
1590 | if (new_ss_mask == cur_ss_mask) |
1591 | break; |
1592 | cur_ss_mask = new_ss_mask; |
1593 | } |
1594 | |
1595 | return cur_ss_mask; |
1596 | } |
1597 | |
1598 | /** |
1599 | * cgroup_kn_unlock - unlocking helper for cgroup kernfs methods |
1600 | * @kn: the kernfs_node being serviced |
1601 | * |
1602 | * This helper undoes cgroup_kn_lock_live() and should be invoked before |
1603 | * the method finishes if locking succeeded. Note that once this function |
1604 | * returns the cgroup returned by cgroup_kn_lock_live() may become |
1605 | * inaccessible any time. If the caller intends to continue to access the |
1606 | * cgroup, it should pin it before invoking this function. |
1607 | */ |
1608 | void cgroup_kn_unlock(struct kernfs_node *kn) |
1609 | { |
1610 | struct cgroup *cgrp; |
1611 | |
1612 | if (kernfs_type(kn) == KERNFS_DIR) |
1613 | cgrp = kn->priv; |
1614 | else |
1615 | cgrp = kn->parent->priv; |
1616 | |
1617 | cgroup_unlock(); |
1618 | |
1619 | kernfs_unbreak_active_protection(kn); |
1620 | cgroup_put(cgrp); |
1621 | } |
1622 | |
1623 | /** |
1624 | * cgroup_kn_lock_live - locking helper for cgroup kernfs methods |
1625 | * @kn: the kernfs_node being serviced |
1626 | * @drain_offline: perform offline draining on the cgroup |
1627 | * |
1628 | * This helper is to be used by a cgroup kernfs method currently servicing |
1629 | * @kn. It breaks the active protection, performs cgroup locking and |
1630 | * verifies that the associated cgroup is alive. Returns the cgroup if |
1631 | * alive; otherwise, %NULL. A successful return should be undone by a |
1632 | * matching cgroup_kn_unlock() invocation. If @drain_offline is %true, the |
1633 | * cgroup is drained of offlining csses before return. |
1634 | * |
1635 | * Any cgroup kernfs method implementation which requires locking the |
1636 | * associated cgroup should use this helper. It avoids nesting cgroup |
1637 | * locking under kernfs active protection and allows all kernfs operations |
1638 | * including self-removal. |
1639 | */ |
1640 | struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn, bool drain_offline) |
1641 | { |
1642 | struct cgroup *cgrp; |
1643 | |
1644 | if (kernfs_type(kn) == KERNFS_DIR) |
1645 | cgrp = kn->priv; |
1646 | else |
1647 | cgrp = kn->parent->priv; |
1648 | |
1649 | /* |
1650 | * We're gonna grab cgroup_mutex which nests outside kernfs |
1651 | * active_ref. cgroup liveliness check alone provides enough |
1652 | * protection against removal. Ensure @cgrp stays accessible and |
1653 | * break the active_ref protection. |
1654 | */ |
1655 | if (!cgroup_tryget(cgrp)) |
1656 | return NULL; |
1657 | kernfs_break_active_protection(kn); |
1658 | |
1659 | if (drain_offline) |
1660 | cgroup_lock_and_drain_offline(cgrp); |
1661 | else |
1662 | cgroup_lock(); |
1663 | |
1664 | if (!cgroup_is_dead(cgrp)) |
1665 | return cgrp; |
1666 | |
1667 | cgroup_kn_unlock(kn); |
1668 | return NULL; |
1669 | } |
1670 | |
1671 | static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft) |
1672 | { |
1673 | char name[CGROUP_FILE_NAME_MAX]; |
1674 | |
1675 | lockdep_assert_held(&cgroup_mutex); |
1676 | |
1677 | if (cft->file_offset) { |
1678 | struct cgroup_subsys_state *css = cgroup_css(cgrp, ss: cft->ss); |
1679 | struct cgroup_file *cfile = (void *)css + cft->file_offset; |
1680 | |
1681 | spin_lock_irq(lock: &cgroup_file_kn_lock); |
1682 | cfile->kn = NULL; |
1683 | spin_unlock_irq(lock: &cgroup_file_kn_lock); |
1684 | |
1685 | del_timer_sync(timer: &cfile->notify_timer); |
1686 | } |
1687 | |
1688 | kernfs_remove_by_name(parent: cgrp->kn, name: cgroup_file_name(cgrp, cft, buf: name)); |
1689 | } |
1690 | |
1691 | /** |
1692 | * css_clear_dir - remove subsys files in a cgroup directory |
1693 | * @css: target css |
1694 | */ |
1695 | static void css_clear_dir(struct cgroup_subsys_state *css) |
1696 | { |
1697 | struct cgroup *cgrp = css->cgroup; |
1698 | struct cftype *cfts; |
1699 | |
1700 | if (!(css->flags & CSS_VISIBLE)) |
1701 | return; |
1702 | |
1703 | css->flags &= ~CSS_VISIBLE; |
1704 | |
1705 | if (!css->ss) { |
1706 | if (cgroup_on_dfl(cgrp)) { |
1707 | cgroup_addrm_files(css, cgrp, |
1708 | cfts: cgroup_base_files, is_add: false); |
1709 | if (cgroup_psi_enabled()) |
1710 | cgroup_addrm_files(css, cgrp, |
1711 | cfts: cgroup_psi_files, is_add: false); |
1712 | } else { |
1713 | cgroup_addrm_files(css, cgrp, |
1714 | cfts: cgroup1_base_files, is_add: false); |
1715 | } |
1716 | } else { |
1717 | list_for_each_entry(cfts, &css->ss->cfts, node) |
1718 | cgroup_addrm_files(css, cgrp, cfts, is_add: false); |
1719 | } |
1720 | } |
1721 | |
1722 | /** |
1723 | * css_populate_dir - create subsys files in a cgroup directory |
1724 | * @css: target css |
1725 | * |
1726 | * On failure, no file is added. |
1727 | */ |
1728 | static int css_populate_dir(struct cgroup_subsys_state *css) |
1729 | { |
1730 | struct cgroup *cgrp = css->cgroup; |
1731 | struct cftype *cfts, *failed_cfts; |
1732 | int ret; |
1733 | |
1734 | if (css->flags & CSS_VISIBLE) |
1735 | return 0; |
1736 | |
1737 | if (!css->ss) { |
1738 | if (cgroup_on_dfl(cgrp)) { |
1739 | ret = cgroup_addrm_files(css, cgrp, |
1740 | cfts: cgroup_base_files, is_add: true); |
1741 | if (ret < 0) |
1742 | return ret; |
1743 | |
1744 | if (cgroup_psi_enabled()) { |
1745 | ret = cgroup_addrm_files(css, cgrp, |
1746 | cfts: cgroup_psi_files, is_add: true); |
1747 | if (ret < 0) |
1748 | return ret; |
1749 | } |
1750 | } else { |
1751 | ret = cgroup_addrm_files(css, cgrp, |
1752 | cfts: cgroup1_base_files, is_add: true); |
1753 | if (ret < 0) |
1754 | return ret; |
1755 | } |
1756 | } else { |
1757 | list_for_each_entry(cfts, &css->ss->cfts, node) { |
1758 | ret = cgroup_addrm_files(css, cgrp, cfts, is_add: true); |
1759 | if (ret < 0) { |
1760 | failed_cfts = cfts; |
1761 | goto err; |
1762 | } |
1763 | } |
1764 | } |
1765 | |
1766 | css->flags |= CSS_VISIBLE; |
1767 | |
1768 | return 0; |
1769 | err: |
1770 | list_for_each_entry(cfts, &css->ss->cfts, node) { |
1771 | if (cfts == failed_cfts) |
1772 | break; |
1773 | cgroup_addrm_files(css, cgrp, cfts, is_add: false); |
1774 | } |
1775 | return ret; |
1776 | } |
1777 | |
1778 | int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask) |
1779 | { |
1780 | struct cgroup *dcgrp = &dst_root->cgrp; |
1781 | struct cgroup_subsys *ss; |
1782 | int ssid, ret; |
1783 | u16 dfl_disable_ss_mask = 0; |
1784 | |
1785 | lockdep_assert_held(&cgroup_mutex); |
1786 | |
1787 | do_each_subsys_mask(ss, ssid, ss_mask) { |
1788 | /* |
1789 | * If @ss has non-root csses attached to it, can't move. |
1790 | * If @ss is an implicit controller, it is exempt from this |
1791 | * rule and can be stolen. |
1792 | */ |
1793 | if (css_next_child(NULL, parent: cgroup_css(cgrp: &ss->root->cgrp, ss)) && |
1794 | !ss->implicit_on_dfl) |
1795 | return -EBUSY; |
1796 | |
1797 | /* can't move between two non-dummy roots either */ |
1798 | if (ss->root != &cgrp_dfl_root && dst_root != &cgrp_dfl_root) |
1799 | return -EBUSY; |
1800 | |
1801 | /* |
1802 | * Collect ssid's that need to be disabled from default |
1803 | * hierarchy. |
1804 | */ |
1805 | if (ss->root == &cgrp_dfl_root) |
1806 | dfl_disable_ss_mask |= 1 << ssid; |
1807 | |
1808 | } while_each_subsys_mask(); |
1809 | |
1810 | if (dfl_disable_ss_mask) { |
1811 | struct cgroup *scgrp = &cgrp_dfl_root.cgrp; |
1812 | |
1813 | /* |
1814 | * Controllers from default hierarchy that need to be rebound |
1815 | * are all disabled together in one go. |
1816 | */ |
1817 | cgrp_dfl_root.subsys_mask &= ~dfl_disable_ss_mask; |
1818 | WARN_ON(cgroup_apply_control(scgrp)); |
1819 | cgroup_finalize_control(cgrp: scgrp, ret: 0); |
1820 | } |
1821 | |
1822 | do_each_subsys_mask(ss, ssid, ss_mask) { |
1823 | struct cgroup_root *src_root = ss->root; |
1824 | struct cgroup *scgrp = &src_root->cgrp; |
1825 | struct cgroup_subsys_state *css = cgroup_css(cgrp: scgrp, ss); |
1826 | struct css_set *cset, *cset_pos; |
1827 | struct css_task_iter *it; |
1828 | |
1829 | WARN_ON(!css || cgroup_css(dcgrp, ss)); |
1830 | |
1831 | if (src_root != &cgrp_dfl_root) { |
1832 | /* disable from the source */ |
1833 | src_root->subsys_mask &= ~(1 << ssid); |
1834 | WARN_ON(cgroup_apply_control(scgrp)); |
1835 | cgroup_finalize_control(cgrp: scgrp, ret: 0); |
1836 | } |
1837 | |
1838 | /* rebind */ |
1839 | RCU_INIT_POINTER(scgrp->subsys[ssid], NULL); |
1840 | rcu_assign_pointer(dcgrp->subsys[ssid], css); |
1841 | ss->root = dst_root; |
1842 | css->cgroup = dcgrp; |
1843 | |
1844 | spin_lock_irq(lock: &css_set_lock); |
1845 | WARN_ON(!list_empty(&dcgrp->e_csets[ss->id])); |
1846 | list_for_each_entry_safe(cset, cset_pos, &scgrp->e_csets[ss->id], |
1847 | e_cset_node[ss->id]) { |
1848 | list_move_tail(list: &cset->e_cset_node[ss->id], |
1849 | head: &dcgrp->e_csets[ss->id]); |
1850 | /* |
1851 | * all css_sets of scgrp together in same order to dcgrp, |
1852 | * patch in-flight iterators to preserve correct iteration. |
1853 | * since the iterator is always advanced right away and |
1854 | * finished when it->cset_pos meets it->cset_head, so only |
1855 | * update it->cset_head is enough here. |
1856 | */ |
1857 | list_for_each_entry(it, &cset->task_iters, iters_node) |
1858 | if (it->cset_head == &scgrp->e_csets[ss->id]) |
1859 | it->cset_head = &dcgrp->e_csets[ss->id]; |
1860 | } |
1861 | spin_unlock_irq(lock: &css_set_lock); |
1862 | |
1863 | if (ss->css_rstat_flush) { |
1864 | list_del_rcu(entry: &css->rstat_css_node); |
1865 | synchronize_rcu(); |
1866 | list_add_rcu(new: &css->rstat_css_node, |
1867 | head: &dcgrp->rstat_css_list); |
1868 | } |
1869 | |
1870 | /* default hierarchy doesn't enable controllers by default */ |
1871 | dst_root->subsys_mask |= 1 << ssid; |
1872 | if (dst_root == &cgrp_dfl_root) { |
1873 | static_branch_enable(cgroup_subsys_on_dfl_key[ssid]); |
1874 | } else { |
1875 | dcgrp->subtree_control |= 1 << ssid; |
1876 | static_branch_disable(cgroup_subsys_on_dfl_key[ssid]); |
1877 | } |
1878 | |
1879 | ret = cgroup_apply_control(cgrp: dcgrp); |
1880 | if (ret) |
1881 | pr_warn("partial failure to rebind %s controller (err=%d)\n" , |
1882 | ss->name, ret); |
1883 | |
1884 | if (ss->bind) |
1885 | ss->bind(css); |
1886 | } while_each_subsys_mask(); |
1887 | |
1888 | kernfs_activate(kn: dcgrp->kn); |
1889 | return 0; |
1890 | } |
1891 | |
1892 | int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node, |
1893 | struct kernfs_root *kf_root) |
1894 | { |
1895 | int len = 0; |
1896 | char *buf = NULL; |
1897 | struct cgroup_root *kf_cgroot = cgroup_root_from_kf(kf_root); |
1898 | struct cgroup *ns_cgroup; |
1899 | |
1900 | buf = kmalloc(PATH_MAX, GFP_KERNEL); |
1901 | if (!buf) |
1902 | return -ENOMEM; |
1903 | |
1904 | spin_lock_irq(lock: &css_set_lock); |
1905 | ns_cgroup = current_cgns_cgroup_from_root(root: kf_cgroot); |
1906 | len = kernfs_path_from_node(root_kn: kf_node, kn: ns_cgroup->kn, buf, PATH_MAX); |
1907 | spin_unlock_irq(lock: &css_set_lock); |
1908 | |
1909 | if (len == -E2BIG) |
1910 | len = -ERANGE; |
1911 | else if (len > 0) { |
1912 | seq_escape(m: sf, s: buf, esc: " \t\n\\" ); |
1913 | len = 0; |
1914 | } |
1915 | kfree(objp: buf); |
1916 | return len; |
1917 | } |
1918 | |
1919 | enum cgroup2_param { |
1920 | Opt_nsdelegate, |
1921 | Opt_favordynmods, |
1922 | Opt_memory_localevents, |
1923 | Opt_memory_recursiveprot, |
1924 | Opt_memory_hugetlb_accounting, |
1925 | nr__cgroup2_params |
1926 | }; |
1927 | |
1928 | static const struct fs_parameter_spec cgroup2_fs_parameters[] = { |
1929 | fsparam_flag("nsdelegate" , Opt_nsdelegate), |
1930 | fsparam_flag("favordynmods" , Opt_favordynmods), |
1931 | fsparam_flag("memory_localevents" , Opt_memory_localevents), |
1932 | fsparam_flag("memory_recursiveprot" , Opt_memory_recursiveprot), |
1933 | fsparam_flag("memory_hugetlb_accounting" , Opt_memory_hugetlb_accounting), |
1934 | {} |
1935 | }; |
1936 | |
1937 | static int cgroup2_parse_param(struct fs_context *fc, struct fs_parameter *param) |
1938 | { |
1939 | struct cgroup_fs_context *ctx = cgroup_fc2context(fc); |
1940 | struct fs_parse_result result; |
1941 | int opt; |
1942 | |
1943 | opt = fs_parse(fc, desc: cgroup2_fs_parameters, param, result: &result); |
1944 | if (opt < 0) |
1945 | return opt; |
1946 | |
1947 | switch (opt) { |
1948 | case Opt_nsdelegate: |
1949 | ctx->flags |= CGRP_ROOT_NS_DELEGATE; |
1950 | return 0; |
1951 | case Opt_favordynmods: |
1952 | ctx->flags |= CGRP_ROOT_FAVOR_DYNMODS; |
1953 | return 0; |
1954 | case Opt_memory_localevents: |
1955 | ctx->flags |= CGRP_ROOT_MEMORY_LOCAL_EVENTS; |
1956 | return 0; |
1957 | case Opt_memory_recursiveprot: |
1958 | ctx->flags |= CGRP_ROOT_MEMORY_RECURSIVE_PROT; |
1959 | return 0; |
1960 | case Opt_memory_hugetlb_accounting: |
1961 | ctx->flags |= CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING; |
1962 | return 0; |
1963 | } |
1964 | return -EINVAL; |
1965 | } |
1966 | |
1967 | static void apply_cgroup_root_flags(unsigned int root_flags) |
1968 | { |
1969 | if (current->nsproxy->cgroup_ns == &init_cgroup_ns) { |
1970 | if (root_flags & CGRP_ROOT_NS_DELEGATE) |
1971 | cgrp_dfl_root.flags |= CGRP_ROOT_NS_DELEGATE; |
1972 | else |
1973 | cgrp_dfl_root.flags &= ~CGRP_ROOT_NS_DELEGATE; |
1974 | |
1975 | cgroup_favor_dynmods(root: &cgrp_dfl_root, |
1976 | favor: root_flags & CGRP_ROOT_FAVOR_DYNMODS); |
1977 | |
1978 | if (root_flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) |
1979 | cgrp_dfl_root.flags |= CGRP_ROOT_MEMORY_LOCAL_EVENTS; |
1980 | else |
1981 | cgrp_dfl_root.flags &= ~CGRP_ROOT_MEMORY_LOCAL_EVENTS; |
1982 | |
1983 | if (root_flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT) |
1984 | cgrp_dfl_root.flags |= CGRP_ROOT_MEMORY_RECURSIVE_PROT; |
1985 | else |
1986 | cgrp_dfl_root.flags &= ~CGRP_ROOT_MEMORY_RECURSIVE_PROT; |
1987 | |
1988 | if (root_flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING) |
1989 | cgrp_dfl_root.flags |= CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING; |
1990 | else |
1991 | cgrp_dfl_root.flags &= ~CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING; |
1992 | } |
1993 | } |
1994 | |
1995 | static int cgroup_show_options(struct seq_file *seq, struct kernfs_root *kf_root) |
1996 | { |
1997 | if (cgrp_dfl_root.flags & CGRP_ROOT_NS_DELEGATE) |
1998 | seq_puts(m: seq, s: ",nsdelegate" ); |
1999 | if (cgrp_dfl_root.flags & CGRP_ROOT_FAVOR_DYNMODS) |
2000 | seq_puts(m: seq, s: ",favordynmods" ); |
2001 | if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) |
2002 | seq_puts(m: seq, s: ",memory_localevents" ); |
2003 | if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT) |
2004 | seq_puts(m: seq, s: ",memory_recursiveprot" ); |
2005 | if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING) |
2006 | seq_puts(m: seq, s: ",memory_hugetlb_accounting" ); |
2007 | return 0; |
2008 | } |
2009 | |
2010 | static int cgroup_reconfigure(struct fs_context *fc) |
2011 | { |
2012 | struct cgroup_fs_context *ctx = cgroup_fc2context(fc); |
2013 | |
2014 | apply_cgroup_root_flags(root_flags: ctx->flags); |
2015 | return 0; |
2016 | } |
2017 | |
2018 | static void init_cgroup_housekeeping(struct cgroup *cgrp) |
2019 | { |
2020 | struct cgroup_subsys *ss; |
2021 | int ssid; |
2022 | |
2023 | INIT_LIST_HEAD(list: &cgrp->self.sibling); |
2024 | INIT_LIST_HEAD(list: &cgrp->self.children); |
2025 | INIT_LIST_HEAD(list: &cgrp->cset_links); |
2026 | INIT_LIST_HEAD(list: &cgrp->pidlists); |
2027 | mutex_init(&cgrp->pidlist_mutex); |
2028 | cgrp->self.cgroup = cgrp; |
2029 | cgrp->self.flags |= CSS_ONLINE; |
2030 | cgrp->dom_cgrp = cgrp; |
2031 | cgrp->max_descendants = INT_MAX; |
2032 | cgrp->max_depth = INT_MAX; |
2033 | INIT_LIST_HEAD(list: &cgrp->rstat_css_list); |
2034 | prev_cputime_init(prev: &cgrp->prev_cputime); |
2035 | |
2036 | for_each_subsys(ss, ssid) |
2037 | INIT_LIST_HEAD(list: &cgrp->e_csets[ssid]); |
2038 | |
2039 | init_waitqueue_head(&cgrp->offline_waitq); |
2040 | INIT_WORK(&cgrp->release_agent_work, cgroup1_release_agent); |
2041 | } |
2042 | |
2043 | void init_cgroup_root(struct cgroup_fs_context *ctx) |
2044 | { |
2045 | struct cgroup_root *root = ctx->root; |
2046 | struct cgroup *cgrp = &root->cgrp; |
2047 | |
2048 | INIT_LIST_HEAD_RCU(list: &root->root_list); |
2049 | atomic_set(v: &root->nr_cgrps, i: 1); |
2050 | cgrp->root = root; |
2051 | init_cgroup_housekeeping(cgrp); |
2052 | |
2053 | /* DYNMODS must be modified through cgroup_favor_dynmods() */ |
2054 | root->flags = ctx->flags & ~CGRP_ROOT_FAVOR_DYNMODS; |
2055 | if (ctx->release_agent) |
2056 | strscpy(root->release_agent_path, ctx->release_agent, PATH_MAX); |
2057 | if (ctx->name) |
2058 | strscpy(root->name, ctx->name, MAX_CGROUP_ROOT_NAMELEN); |
2059 | if (ctx->cpuset_clone_children) |
2060 | set_bit(nr: CGRP_CPUSET_CLONE_CHILDREN, addr: &root->cgrp.flags); |
2061 | } |
2062 | |
2063 | int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask) |
2064 | { |
2065 | LIST_HEAD(tmp_links); |
2066 | struct cgroup *root_cgrp = &root->cgrp; |
2067 | struct kernfs_syscall_ops *kf_sops; |
2068 | struct css_set *cset; |
2069 | int i, ret; |
2070 | |
2071 | lockdep_assert_held(&cgroup_mutex); |
2072 | |
2073 | ret = percpu_ref_init(ref: &root_cgrp->self.refcnt, release: css_release, |
2074 | flags: 0, GFP_KERNEL); |
2075 | if (ret) |
2076 | goto out; |
2077 | |
2078 | /* |
2079 | * We're accessing css_set_count without locking css_set_lock here, |
2080 | * but that's OK - it can only be increased by someone holding |
2081 | * cgroup_lock, and that's us. Later rebinding may disable |
2082 | * controllers on the default hierarchy and thus create new csets, |
2083 | * which can't be more than the existing ones. Allocate 2x. |
2084 | */ |
2085 | ret = allocate_cgrp_cset_links(count: 2 * css_set_count, tmp_links: &tmp_links); |
2086 | if (ret) |
2087 | goto cancel_ref; |
2088 | |
2089 | ret = cgroup_init_root_id(root); |
2090 | if (ret) |
2091 | goto cancel_ref; |
2092 | |
2093 | kf_sops = root == &cgrp_dfl_root ? |
2094 | &cgroup_kf_syscall_ops : &cgroup1_kf_syscall_ops; |
2095 | |
2096 | root->kf_root = kernfs_create_root(scops: kf_sops, |
2097 | flags: KERNFS_ROOT_CREATE_DEACTIVATED | |
2098 | KERNFS_ROOT_SUPPORT_EXPORTOP | |
2099 | KERNFS_ROOT_SUPPORT_USER_XATTR, |
2100 | priv: root_cgrp); |
2101 | if (IS_ERR(ptr: root->kf_root)) { |
2102 | ret = PTR_ERR(ptr: root->kf_root); |
2103 | goto exit_root_id; |
2104 | } |
2105 | root_cgrp->kn = kernfs_root_to_node(root: root->kf_root); |
2106 | WARN_ON_ONCE(cgroup_ino(root_cgrp) != 1); |
2107 | root_cgrp->ancestors[0] = root_cgrp; |
2108 | |
2109 | ret = css_populate_dir(css: &root_cgrp->self); |
2110 | if (ret) |
2111 | goto destroy_root; |
2112 | |
2113 | ret = cgroup_rstat_init(cgrp: root_cgrp); |
2114 | if (ret) |
2115 | goto destroy_root; |
2116 | |
2117 | ret = rebind_subsystems(dst_root: root, ss_mask); |
2118 | if (ret) |
2119 | goto exit_stats; |
2120 | |
2121 | ret = cgroup_bpf_inherit(cgrp: root_cgrp); |
2122 | WARN_ON_ONCE(ret); |
2123 | |
2124 | trace_cgroup_setup_root(root); |
2125 | |
2126 | /* |
2127 | * There must be no failure case after here, since rebinding takes |
2128 | * care of subsystems' refcounts, which are explicitly dropped in |
2129 | * the failure exit path. |
2130 | */ |
2131 | list_add_rcu(new: &root->root_list, head: &cgroup_roots); |
2132 | cgroup_root_count++; |
2133 | |
2134 | /* |
2135 | * Link the root cgroup in this hierarchy into all the css_set |
2136 | * objects. |
2137 | */ |
2138 | spin_lock_irq(lock: &css_set_lock); |
2139 | hash_for_each(css_set_table, i, cset, hlist) { |
2140 | link_css_set(tmp_links: &tmp_links, cset, cgrp: root_cgrp); |
2141 | if (css_set_populated(cset)) |
2142 | cgroup_update_populated(cgrp: root_cgrp, populated: true); |
2143 | } |
2144 | spin_unlock_irq(lock: &css_set_lock); |
2145 | |
2146 | BUG_ON(!list_empty(&root_cgrp->self.children)); |
2147 | BUG_ON(atomic_read(&root->nr_cgrps) != 1); |
2148 | |
2149 | ret = 0; |
2150 | goto out; |
2151 | |
2152 | exit_stats: |
2153 | cgroup_rstat_exit(cgrp: root_cgrp); |
2154 | destroy_root: |
2155 | kernfs_destroy_root(root: root->kf_root); |
2156 | root->kf_root = NULL; |
2157 | exit_root_id: |
2158 | cgroup_exit_root_id(root); |
2159 | cancel_ref: |
2160 | percpu_ref_exit(ref: &root_cgrp->self.refcnt); |
2161 | out: |
2162 | free_cgrp_cset_links(links_to_free: &tmp_links); |
2163 | return ret; |
2164 | } |
2165 | |
2166 | int cgroup_do_get_tree(struct fs_context *fc) |
2167 | { |
2168 | struct cgroup_fs_context *ctx = cgroup_fc2context(fc); |
2169 | int ret; |
2170 | |
2171 | ctx->kfc.root = ctx->root->kf_root; |
2172 | if (fc->fs_type == &cgroup2_fs_type) |
2173 | ctx->kfc.magic = CGROUP2_SUPER_MAGIC; |
2174 | else |
2175 | ctx->kfc.magic = CGROUP_SUPER_MAGIC; |
2176 | ret = kernfs_get_tree(fc); |
2177 | |
2178 | /* |
2179 | * In non-init cgroup namespace, instead of root cgroup's dentry, |
2180 | * we return the dentry corresponding to the cgroupns->root_cgrp. |
2181 | */ |
2182 | if (!ret && ctx->ns != &init_cgroup_ns) { |
2183 | struct dentry *nsdentry; |
2184 | struct super_block *sb = fc->root->d_sb; |
2185 | struct cgroup *cgrp; |
2186 | |
2187 | cgroup_lock(); |
2188 | spin_lock_irq(lock: &css_set_lock); |
2189 | |
2190 | cgrp = cset_cgroup_from_root(cset: ctx->ns->root_cset, root: ctx->root); |
2191 | |
2192 | spin_unlock_irq(lock: &css_set_lock); |
2193 | cgroup_unlock(); |
2194 | |
2195 | nsdentry = kernfs_node_dentry(kn: cgrp->kn, sb); |
2196 | dput(fc->root); |
2197 | if (IS_ERR(ptr: nsdentry)) { |
2198 | deactivate_locked_super(sb); |
2199 | ret = PTR_ERR(ptr: nsdentry); |
2200 | nsdentry = NULL; |
2201 | } |
2202 | fc->root = nsdentry; |
2203 | } |
2204 | |
2205 | if (!ctx->kfc.new_sb_created) |
2206 | cgroup_put(cgrp: &ctx->root->cgrp); |
2207 | |
2208 | return ret; |
2209 | } |
2210 | |
2211 | /* |
2212 | * Destroy a cgroup filesystem context. |
2213 | */ |
2214 | static void cgroup_fs_context_free(struct fs_context *fc) |
2215 | { |
2216 | struct cgroup_fs_context *ctx = cgroup_fc2context(fc); |
2217 | |
2218 | kfree(objp: ctx->name); |
2219 | kfree(objp: ctx->release_agent); |
2220 | put_cgroup_ns(ns: ctx->ns); |
2221 | kernfs_free_fs_context(fc); |
2222 | kfree(objp: ctx); |
2223 | } |
2224 | |
2225 | static int cgroup_get_tree(struct fs_context *fc) |
2226 | { |
2227 | struct cgroup_fs_context *ctx = cgroup_fc2context(fc); |
2228 | int ret; |
2229 | |
2230 | WRITE_ONCE(cgrp_dfl_visible, true); |
2231 | cgroup_get_live(cgrp: &cgrp_dfl_root.cgrp); |
2232 | ctx->root = &cgrp_dfl_root; |
2233 | |
2234 | ret = cgroup_do_get_tree(fc); |
2235 | if (!ret) |
2236 | apply_cgroup_root_flags(root_flags: ctx->flags); |
2237 | return ret; |
2238 | } |
2239 | |
2240 | static const struct fs_context_operations cgroup_fs_context_ops = { |
2241 | .free = cgroup_fs_context_free, |
2242 | .parse_param = cgroup2_parse_param, |
2243 | .get_tree = cgroup_get_tree, |
2244 | .reconfigure = cgroup_reconfigure, |
2245 | }; |
2246 | |
2247 | static const struct fs_context_operations cgroup1_fs_context_ops = { |
2248 | .free = cgroup_fs_context_free, |
2249 | .parse_param = cgroup1_parse_param, |
2250 | .get_tree = cgroup1_get_tree, |
2251 | .reconfigure = cgroup1_reconfigure, |
2252 | }; |
2253 | |
2254 | /* |
2255 | * Initialise the cgroup filesystem creation/reconfiguration context. Notably, |
2256 | * we select the namespace we're going to use. |
2257 | */ |
2258 | static int cgroup_init_fs_context(struct fs_context *fc) |
2259 | { |
2260 | struct cgroup_fs_context *ctx; |
2261 | |
2262 | ctx = kzalloc(size: sizeof(struct cgroup_fs_context), GFP_KERNEL); |
2263 | if (!ctx) |
2264 | return -ENOMEM; |
2265 | |
2266 | ctx->ns = current->nsproxy->cgroup_ns; |
2267 | get_cgroup_ns(ns: ctx->ns); |
2268 | fc->fs_private = &ctx->kfc; |
2269 | if (fc->fs_type == &cgroup2_fs_type) |
2270 | fc->ops = &cgroup_fs_context_ops; |
2271 | else |
2272 | fc->ops = &cgroup1_fs_context_ops; |
2273 | put_user_ns(ns: fc->user_ns); |
2274 | fc->user_ns = get_user_ns(ns: ctx->ns->user_ns); |
2275 | fc->global = true; |
2276 | |
2277 | if (have_favordynmods) |
2278 | ctx->flags |= CGRP_ROOT_FAVOR_DYNMODS; |
2279 | |
2280 | return 0; |
2281 | } |
2282 | |
2283 | static void cgroup_kill_sb(struct super_block *sb) |
2284 | { |
2285 | struct kernfs_root *kf_root = kernfs_root_from_sb(sb); |
2286 | struct cgroup_root *root = cgroup_root_from_kf(kf_root); |
2287 | |
2288 | /* |
2289 | * If @root doesn't have any children, start killing it. |
2290 | * This prevents new mounts by disabling percpu_ref_tryget_live(). |
2291 | * |
2292 | * And don't kill the default root. |
2293 | */ |
2294 | if (list_empty(head: &root->cgrp.self.children) && root != &cgrp_dfl_root && |
2295 | !percpu_ref_is_dying(ref: &root->cgrp.self.refcnt)) { |
2296 | cgroup_bpf_offline(cgrp: &root->cgrp); |
2297 | percpu_ref_kill(ref: &root->cgrp.self.refcnt); |
2298 | } |
2299 | cgroup_put(cgrp: &root->cgrp); |
2300 | kernfs_kill_sb(sb); |
2301 | } |
2302 | |
2303 | struct file_system_type cgroup_fs_type = { |
2304 | .name = "cgroup" , |
2305 | .init_fs_context = cgroup_init_fs_context, |
2306 | .parameters = cgroup1_fs_parameters, |
2307 | .kill_sb = cgroup_kill_sb, |
2308 | .fs_flags = FS_USERNS_MOUNT, |
2309 | }; |
2310 | |
2311 | static struct file_system_type cgroup2_fs_type = { |
2312 | .name = "cgroup2" , |
2313 | .init_fs_context = cgroup_init_fs_context, |
2314 | .parameters = cgroup2_fs_parameters, |
2315 | .kill_sb = cgroup_kill_sb, |
2316 | .fs_flags = FS_USERNS_MOUNT, |
2317 | }; |
2318 | |
2319 | #ifdef CONFIG_CPUSETS |
2320 | static const struct fs_context_operations cpuset_fs_context_ops = { |
2321 | .get_tree = cgroup1_get_tree, |
2322 | .free = cgroup_fs_context_free, |
2323 | }; |
2324 | |
2325 | /* |
2326 | * This is ugly, but preserves the userspace API for existing cpuset |
2327 | * users. If someone tries to mount the "cpuset" filesystem, we |
2328 | * silently switch it to mount "cgroup" instead |
2329 | */ |
2330 | static int cpuset_init_fs_context(struct fs_context *fc) |
2331 | { |
2332 | char *agent = kstrdup(s: "/sbin/cpuset_release_agent" , GFP_USER); |
2333 | struct cgroup_fs_context *ctx; |
2334 | int err; |
2335 | |
2336 | err = cgroup_init_fs_context(fc); |
2337 | if (err) { |
2338 | kfree(objp: agent); |
2339 | return err; |
2340 | } |
2341 | |
2342 | fc->ops = &cpuset_fs_context_ops; |
2343 | |
2344 | ctx = cgroup_fc2context(fc); |
2345 | ctx->subsys_mask = 1 << cpuset_cgrp_id; |
2346 | ctx->flags |= CGRP_ROOT_NOPREFIX; |
2347 | ctx->release_agent = agent; |
2348 | |
2349 | get_filesystem(fs: &cgroup_fs_type); |
2350 | put_filesystem(fs: fc->fs_type); |
2351 | fc->fs_type = &cgroup_fs_type; |
2352 | |
2353 | return 0; |
2354 | } |
2355 | |
2356 | static struct file_system_type cpuset_fs_type = { |
2357 | .name = "cpuset" , |
2358 | .init_fs_context = cpuset_init_fs_context, |
2359 | .fs_flags = FS_USERNS_MOUNT, |
2360 | }; |
2361 | #endif |
2362 | |
2363 | int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen, |
2364 | struct cgroup_namespace *ns) |
2365 | { |
2366 | struct cgroup *root = cset_cgroup_from_root(cset: ns->root_cset, root: cgrp->root); |
2367 | |
2368 | return kernfs_path_from_node(root_kn: cgrp->kn, kn: root->kn, buf, buflen); |
2369 | } |
2370 | |
2371 | int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen, |
2372 | struct cgroup_namespace *ns) |
2373 | { |
2374 | int ret; |
2375 | |
2376 | cgroup_lock(); |
2377 | spin_lock_irq(lock: &css_set_lock); |
2378 | |
2379 | ret = cgroup_path_ns_locked(cgrp, buf, buflen, ns); |
2380 | |
2381 | spin_unlock_irq(lock: &css_set_lock); |
2382 | cgroup_unlock(); |
2383 | |
2384 | return ret; |
2385 | } |
2386 | EXPORT_SYMBOL_GPL(cgroup_path_ns); |
2387 | |
2388 | /** |
2389 | * cgroup_attach_lock - Lock for ->attach() |
2390 | * @lock_threadgroup: whether to down_write cgroup_threadgroup_rwsem |
2391 | * |
2392 | * cgroup migration sometimes needs to stabilize threadgroups against forks and |
2393 | * exits by write-locking cgroup_threadgroup_rwsem. However, some ->attach() |
2394 | * implementations (e.g. cpuset), also need to disable CPU hotplug. |
2395 | * Unfortunately, letting ->attach() operations acquire cpus_read_lock() can |
2396 | * lead to deadlocks. |
2397 | * |
2398 | * Bringing up a CPU may involve creating and destroying tasks which requires |
2399 | * read-locking threadgroup_rwsem, so threadgroup_rwsem nests inside |
2400 | * cpus_read_lock(). If we call an ->attach() which acquires the cpus lock while |
2401 | * write-locking threadgroup_rwsem, the locking order is reversed and we end up |
2402 | * waiting for an on-going CPU hotplug operation which in turn is waiting for |
2403 | * the threadgroup_rwsem to be released to create new tasks. For more details: |
2404 | * |
2405 | * http://lkml.kernel.org/r/20220711174629.uehfmqegcwn2lqzu@wubuntu |
2406 | * |
2407 | * Resolve the situation by always acquiring cpus_read_lock() before optionally |
2408 | * write-locking cgroup_threadgroup_rwsem. This allows ->attach() to assume that |
2409 | * CPU hotplug is disabled on entry. |
2410 | */ |
2411 | void cgroup_attach_lock(bool lock_threadgroup) |
2412 | { |
2413 | cpus_read_lock(); |
2414 | if (lock_threadgroup) |
2415 | percpu_down_write(&cgroup_threadgroup_rwsem); |
2416 | } |
2417 | |
2418 | /** |
2419 | * cgroup_attach_unlock - Undo cgroup_attach_lock() |
2420 | * @lock_threadgroup: whether to up_write cgroup_threadgroup_rwsem |
2421 | */ |
2422 | void cgroup_attach_unlock(bool lock_threadgroup) |
2423 | { |
2424 | if (lock_threadgroup) |
2425 | percpu_up_write(&cgroup_threadgroup_rwsem); |
2426 | cpus_read_unlock(); |
2427 | } |
2428 | |
2429 | /** |
2430 | * cgroup_migrate_add_task - add a migration target task to a migration context |
2431 | * @task: target task |
2432 | * @mgctx: target migration context |
2433 | * |
2434 | * Add @task, which is a migration target, to @mgctx->tset. This function |
2435 | * becomes noop if @task doesn't need to be migrated. @task's css_set |
2436 | * should have been added as a migration source and @task->cg_list will be |
2437 | * moved from the css_set's tasks list to mg_tasks one. |
2438 | */ |
2439 | static void cgroup_migrate_add_task(struct task_struct *task, |
2440 | struct cgroup_mgctx *mgctx) |
2441 | { |
2442 | struct css_set *cset; |
2443 | |
2444 | lockdep_assert_held(&css_set_lock); |
2445 | |
2446 | /* @task either already exited or can't exit until the end */ |
2447 | if (task->flags & PF_EXITING) |
2448 | return; |
2449 | |
2450 | /* cgroup_threadgroup_rwsem protects racing against forks */ |
2451 | WARN_ON_ONCE(list_empty(&task->cg_list)); |
2452 | |
2453 | cset = task_css_set(task); |
2454 | if (!cset->mg_src_cgrp) |
2455 | return; |
2456 | |
2457 | mgctx->tset.nr_tasks++; |
2458 | |
2459 | list_move_tail(list: &task->cg_list, head: &cset->mg_tasks); |
2460 | if (list_empty(head: &cset->mg_node)) |
2461 | list_add_tail(new: &cset->mg_node, |
2462 | head: &mgctx->tset.src_csets); |
2463 | if (list_empty(head: &cset->mg_dst_cset->mg_node)) |
2464 | list_add_tail(new: &cset->mg_dst_cset->mg_node, |
2465 | head: &mgctx->tset.dst_csets); |
2466 | } |
2467 | |
2468 | /** |
2469 | * cgroup_taskset_first - reset taskset and return the first task |
2470 | * @tset: taskset of interest |
2471 | * @dst_cssp: output variable for the destination css |
2472 | * |
2473 | * @tset iteration is initialized and the first task is returned. |
2474 | */ |
2475 | struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset, |
2476 | struct cgroup_subsys_state **dst_cssp) |
2477 | { |
2478 | tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node); |
2479 | tset->cur_task = NULL; |
2480 | |
2481 | return cgroup_taskset_next(tset, dst_cssp); |
2482 | } |
2483 | |
2484 | /** |
2485 | * cgroup_taskset_next - iterate to the next task in taskset |
2486 | * @tset: taskset of interest |
2487 | * @dst_cssp: output variable for the destination css |
2488 | * |
2489 | * Return the next task in @tset. Iteration must have been initialized |
2490 | * with cgroup_taskset_first(). |
2491 | */ |
2492 | struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset, |
2493 | struct cgroup_subsys_state **dst_cssp) |
2494 | { |
2495 | struct css_set *cset = tset->cur_cset; |
2496 | struct task_struct *task = tset->cur_task; |
2497 | |
2498 | while (CGROUP_HAS_SUBSYS_CONFIG && &cset->mg_node != tset->csets) { |
2499 | if (!task) |
2500 | task = list_first_entry(&cset->mg_tasks, |
2501 | struct task_struct, cg_list); |
2502 | else |
2503 | task = list_next_entry(task, cg_list); |
2504 | |
2505 | if (&task->cg_list != &cset->mg_tasks) { |
2506 | tset->cur_cset = cset; |
2507 | tset->cur_task = task; |
2508 | |
2509 | /* |
2510 | * This function may be called both before and |
2511 | * after cgroup_migrate_execute(). The two cases |
2512 | * can be distinguished by looking at whether @cset |
2513 | * has its ->mg_dst_cset set. |
2514 | */ |
2515 | if (cset->mg_dst_cset) |
2516 | *dst_cssp = cset->mg_dst_cset->subsys[tset->ssid]; |
2517 | else |
2518 | *dst_cssp = cset->subsys[tset->ssid]; |
2519 | |
2520 | return task; |
2521 | } |
2522 | |
2523 | cset = list_next_entry(cset, mg_node); |
2524 | task = NULL; |
2525 | } |
2526 | |
2527 | return NULL; |
2528 | } |
2529 | |
2530 | /** |
2531 | * cgroup_migrate_execute - migrate a taskset |
2532 | * @mgctx: migration context |
2533 | * |
2534 | * Migrate tasks in @mgctx as setup by migration preparation functions. |
2535 | * This function fails iff one of the ->can_attach callbacks fails and |
2536 | * guarantees that either all or none of the tasks in @mgctx are migrated. |
2537 | * @mgctx is consumed regardless of success. |
2538 | */ |
2539 | static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx) |
2540 | { |
2541 | struct cgroup_taskset *tset = &mgctx->tset; |
2542 | struct cgroup_subsys *ss; |
2543 | struct task_struct *task, *tmp_task; |
2544 | struct css_set *cset, *tmp_cset; |
2545 | int ssid, failed_ssid, ret; |
2546 | |
2547 | /* check that we can legitimately attach to the cgroup */ |
2548 | if (tset->nr_tasks) { |
2549 | do_each_subsys_mask(ss, ssid, mgctx->ss_mask) { |
2550 | if (ss->can_attach) { |
2551 | tset->ssid = ssid; |
2552 | ret = ss->can_attach(tset); |
2553 | if (ret) { |
2554 | failed_ssid = ssid; |
2555 | goto out_cancel_attach; |
2556 | } |
2557 | } |
2558 | } while_each_subsys_mask(); |
2559 | } |
2560 | |
2561 | /* |
2562 | * Now that we're guaranteed success, proceed to move all tasks to |
2563 | * the new cgroup. There are no failure cases after here, so this |
2564 | * is the commit point. |
2565 | */ |
2566 | spin_lock_irq(lock: &css_set_lock); |
2567 | list_for_each_entry(cset, &tset->src_csets, mg_node) { |
2568 | list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) { |
2569 | struct css_set *from_cset = task_css_set(task); |
2570 | struct css_set *to_cset = cset->mg_dst_cset; |
2571 | |
2572 | get_css_set(cset: to_cset); |
2573 | to_cset->nr_tasks++; |
2574 | css_set_move_task(task, from_cset, to_cset, use_mg_tasks: true); |
2575 | from_cset->nr_tasks--; |
2576 | /* |
2577 | * If the source or destination cgroup is frozen, |
2578 | * the task might require to change its state. |
2579 | */ |
2580 | cgroup_freezer_migrate_task(task, src: from_cset->dfl_cgrp, |
2581 | dst: to_cset->dfl_cgrp); |
2582 | put_css_set_locked(cset: from_cset); |
2583 | |
2584 | } |
2585 | } |
2586 | spin_unlock_irq(lock: &css_set_lock); |
2587 | |
2588 | /* |
2589 | * Migration is committed, all target tasks are now on dst_csets. |
2590 | * Nothing is sensitive to fork() after this point. Notify |
2591 | * controllers that migration is complete. |
2592 | */ |
2593 | tset->csets = &tset->dst_csets; |
2594 | |
2595 | if (tset->nr_tasks) { |
2596 | do_each_subsys_mask(ss, ssid, mgctx->ss_mask) { |
2597 | if (ss->attach) { |
2598 | tset->ssid = ssid; |
2599 | ss->attach(tset); |
2600 | } |
2601 | } while_each_subsys_mask(); |
2602 | } |
2603 | |
2604 | ret = 0; |
2605 | goto out_release_tset; |
2606 | |
2607 | out_cancel_attach: |
2608 | if (tset->nr_tasks) { |
2609 | do_each_subsys_mask(ss, ssid, mgctx->ss_mask) { |
2610 | if (ssid == failed_ssid) |
2611 | break; |
2612 | if (ss->cancel_attach) { |
2613 | tset->ssid = ssid; |
2614 | ss->cancel_attach(tset); |
2615 | } |
2616 | } while_each_subsys_mask(); |
2617 | } |
2618 | out_release_tset: |
2619 | spin_lock_irq(lock: &css_set_lock); |
2620 | list_splice_init(list: &tset->dst_csets, head: &tset->src_csets); |
2621 | list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) { |
2622 | list_splice_tail_init(list: &cset->mg_tasks, head: &cset->tasks); |
2623 | list_del_init(entry: &cset->mg_node); |
2624 | } |
2625 | spin_unlock_irq(lock: &css_set_lock); |
2626 | |
2627 | /* |
2628 | * Re-initialize the cgroup_taskset structure in case it is reused |
2629 | * again in another cgroup_migrate_add_task()/cgroup_migrate_execute() |
2630 | * iteration. |
2631 | */ |
2632 | tset->nr_tasks = 0; |
2633 | tset->csets = &tset->src_csets; |
2634 | return ret; |
2635 | } |
2636 | |
2637 | /** |
2638 | * cgroup_migrate_vet_dst - verify whether a cgroup can be migration destination |
2639 | * @dst_cgrp: destination cgroup to test |
2640 | * |
2641 | * On the default hierarchy, except for the mixable, (possible) thread root |
2642 | * and threaded cgroups, subtree_control must be zero for migration |
2643 | * destination cgroups with tasks so that child cgroups don't compete |
2644 | * against tasks. |
2645 | */ |
2646 | int cgroup_migrate_vet_dst(struct cgroup *dst_cgrp) |
2647 | { |
2648 | /* v1 doesn't have any restriction */ |
2649 | if (!cgroup_on_dfl(cgrp: dst_cgrp)) |
2650 | return 0; |
2651 | |
2652 | /* verify @dst_cgrp can host resources */ |
2653 | if (!cgroup_is_valid_domain(cgrp: dst_cgrp->dom_cgrp)) |
2654 | return -EOPNOTSUPP; |
2655 | |
2656 | /* |
2657 | * If @dst_cgrp is already or can become a thread root or is |
2658 | * threaded, it doesn't matter. |
2659 | */ |
2660 | if (cgroup_can_be_thread_root(cgrp: dst_cgrp) || cgroup_is_threaded(cgrp: dst_cgrp)) |
2661 | return 0; |
2662 | |
2663 | /* apply no-internal-process constraint */ |
2664 | if (dst_cgrp->subtree_control) |
2665 | return -EBUSY; |
2666 | |
2667 | return 0; |
2668 | } |
2669 | |
2670 | /** |
2671 | * cgroup_migrate_finish - cleanup after attach |
2672 | * @mgctx: migration context |
2673 | * |
2674 | * Undo cgroup_migrate_add_src() and cgroup_migrate_prepare_dst(). See |
2675 | * those functions for details. |
2676 | */ |
2677 | void cgroup_migrate_finish(struct cgroup_mgctx *mgctx) |
2678 | { |
2679 | struct css_set *cset, *tmp_cset; |
2680 | |
2681 | lockdep_assert_held(&cgroup_mutex); |
2682 | |
2683 | spin_lock_irq(lock: &css_set_lock); |
2684 | |
2685 | list_for_each_entry_safe(cset, tmp_cset, &mgctx->preloaded_src_csets, |
2686 | mg_src_preload_node) { |
2687 | cset->mg_src_cgrp = NULL; |
2688 | cset->mg_dst_cgrp = NULL; |
2689 | cset->mg_dst_cset = NULL; |
2690 | list_del_init(entry: &cset->mg_src_preload_node); |
2691 | put_css_set_locked(cset); |
2692 | } |
2693 | |
2694 | list_for_each_entry_safe(cset, tmp_cset, &mgctx->preloaded_dst_csets, |
2695 | mg_dst_preload_node) { |
2696 | cset->mg_src_cgrp = NULL; |
2697 | cset->mg_dst_cgrp = NULL; |
2698 | cset->mg_dst_cset = NULL; |
2699 | list_del_init(entry: &cset->mg_dst_preload_node); |
2700 | put_css_set_locked(cset); |
2701 | } |
2702 | |
2703 | spin_unlock_irq(lock: &css_set_lock); |
2704 | } |
2705 | |
2706 | /** |
2707 | * cgroup_migrate_add_src - add a migration source css_set |
2708 | * @src_cset: the source css_set to add |
2709 | * @dst_cgrp: the destination cgroup |
2710 | * @mgctx: migration context |
2711 | * |
2712 | * Tasks belonging to @src_cset are about to be migrated to @dst_cgrp. Pin |
2713 | * @src_cset and add it to @mgctx->src_csets, which should later be cleaned |
2714 | * up by cgroup_migrate_finish(). |
2715 | * |
2716 | * This function may be called without holding cgroup_threadgroup_rwsem |
2717 | * even if the target is a process. Threads may be created and destroyed |
2718 | * but as long as cgroup_mutex is not dropped, no new css_set can be put |
2719 | * into play and the preloaded css_sets are guaranteed to cover all |
2720 | * migrations. |
2721 | */ |
2722 | void cgroup_migrate_add_src(struct css_set *src_cset, |
2723 | struct cgroup *dst_cgrp, |
2724 | struct cgroup_mgctx *mgctx) |
2725 | { |
2726 | struct cgroup *src_cgrp; |
2727 | |
2728 | lockdep_assert_held(&cgroup_mutex); |
2729 | lockdep_assert_held(&css_set_lock); |
2730 | |
2731 | /* |
2732 | * If ->dead, @src_set is associated with one or more dead cgroups |
2733 | * and doesn't contain any migratable tasks. Ignore it early so |
2734 | * that the rest of migration path doesn't get confused by it. |
2735 | */ |
2736 | if (src_cset->dead) |
2737 | return; |
2738 | |
2739 | if (!list_empty(head: &src_cset->mg_src_preload_node)) |
2740 | return; |
2741 | |
2742 | src_cgrp = cset_cgroup_from_root(cset: src_cset, root: dst_cgrp->root); |
2743 | |
2744 | WARN_ON(src_cset->mg_src_cgrp); |
2745 | WARN_ON(src_cset->mg_dst_cgrp); |
2746 | WARN_ON(!list_empty(&src_cset->mg_tasks)); |
2747 | WARN_ON(!list_empty(&src_cset->mg_node)); |
2748 | |
2749 | src_cset->mg_src_cgrp = src_cgrp; |
2750 | src_cset->mg_dst_cgrp = dst_cgrp; |
2751 | get_css_set(cset: src_cset); |
2752 | list_add_tail(new: &src_cset->mg_src_preload_node, head: &mgctx->preloaded_src_csets); |
2753 | } |
2754 | |
2755 | /** |
2756 | * cgroup_migrate_prepare_dst - prepare destination css_sets for migration |
2757 | * @mgctx: migration context |
2758 | * |
2759 | * Tasks are about to be moved and all the source css_sets have been |
2760 | * preloaded to @mgctx->preloaded_src_csets. This function looks up and |
2761 | * pins all destination css_sets, links each to its source, and append them |
2762 | * to @mgctx->preloaded_dst_csets. |
2763 | * |
2764 | * This function must be called after cgroup_migrate_add_src() has been |
2765 | * called on each migration source css_set. After migration is performed |
2766 | * using cgroup_migrate(), cgroup_migrate_finish() must be called on |
2767 | * @mgctx. |
2768 | */ |
2769 | int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx) |
2770 | { |
2771 | struct css_set *src_cset, *tmp_cset; |
2772 | |
2773 | lockdep_assert_held(&cgroup_mutex); |
2774 | |
2775 | /* look up the dst cset for each src cset and link it to src */ |
2776 | list_for_each_entry_safe(src_cset, tmp_cset, &mgctx->preloaded_src_csets, |
2777 | mg_src_preload_node) { |
2778 | struct css_set *dst_cset; |
2779 | struct cgroup_subsys *ss; |
2780 | int ssid; |
2781 | |
2782 | dst_cset = find_css_set(old_cset: src_cset, cgrp: src_cset->mg_dst_cgrp); |
2783 | if (!dst_cset) |
2784 | return -ENOMEM; |
2785 | |
2786 | WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset); |
2787 | |
2788 | /* |
2789 | * If src cset equals dst, it's noop. Drop the src. |
2790 | * cgroup_migrate() will skip the cset too. Note that we |
2791 | * can't handle src == dst as some nodes are used by both. |
2792 | */ |
2793 | if (src_cset == dst_cset) { |
2794 | src_cset->mg_src_cgrp = NULL; |
2795 | src_cset->mg_dst_cgrp = NULL; |
2796 | list_del_init(entry: &src_cset->mg_src_preload_node); |
2797 | put_css_set(cset: src_cset); |
2798 | put_css_set(cset: dst_cset); |
2799 | continue; |
2800 | } |
2801 | |
2802 | src_cset->mg_dst_cset = dst_cset; |
2803 | |
2804 | if (list_empty(head: &dst_cset->mg_dst_preload_node)) |
2805 | list_add_tail(new: &dst_cset->mg_dst_preload_node, |
2806 | head: &mgctx->preloaded_dst_csets); |
2807 | else |
2808 | put_css_set(cset: dst_cset); |
2809 | |
2810 | for_each_subsys(ss, ssid) |
2811 | if (src_cset->subsys[ssid] != dst_cset->subsys[ssid]) |
2812 | mgctx->ss_mask |= 1 << ssid; |
2813 | } |
2814 | |
2815 | return 0; |
2816 | } |
2817 | |
2818 | /** |
2819 | * cgroup_migrate - migrate a process or task to a cgroup |
2820 | * @leader: the leader of the process or the task to migrate |
2821 | * @threadgroup: whether @leader points to the whole process or a single task |
2822 | * @mgctx: migration context |
2823 | * |
2824 | * Migrate a process or task denoted by @leader. If migrating a process, |
2825 | * the caller must be holding cgroup_threadgroup_rwsem. The caller is also |
2826 | * responsible for invoking cgroup_migrate_add_src() and |
2827 | * cgroup_migrate_prepare_dst() on the targets before invoking this |
2828 | * function and following up with cgroup_migrate_finish(). |
2829 | * |
2830 | * As long as a controller's ->can_attach() doesn't fail, this function is |
2831 | * guaranteed to succeed. This means that, excluding ->can_attach() |
2832 | * failure, when migrating multiple targets, the success or failure can be |
2833 | * decided for all targets by invoking group_migrate_prepare_dst() before |
2834 | * actually starting migrating. |
2835 | */ |
2836 | int cgroup_migrate(struct task_struct *leader, bool threadgroup, |
2837 | struct cgroup_mgctx *mgctx) |
2838 | { |
2839 | struct task_struct *task; |
2840 | |
2841 | /* |
2842 | * The following thread iteration should be inside an RCU critical |
2843 | * section to prevent tasks from being freed while taking the snapshot. |
2844 | * spin_lock_irq() implies RCU critical section here. |
2845 | */ |
2846 | spin_lock_irq(lock: &css_set_lock); |
2847 | task = leader; |
2848 | do { |
2849 | cgroup_migrate_add_task(task, mgctx); |
2850 | if (!threadgroup) |
2851 | break; |
2852 | } while_each_thread(leader, task); |
2853 | spin_unlock_irq(lock: &css_set_lock); |
2854 | |
2855 | return cgroup_migrate_execute(mgctx); |
2856 | } |
2857 | |
2858 | /** |
2859 | * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup |
2860 | * @dst_cgrp: the cgroup to attach to |
2861 | * @leader: the task or the leader of the threadgroup to be attached |
2862 | * @threadgroup: attach the whole threadgroup? |
2863 | * |
2864 | * Call holding cgroup_mutex and cgroup_threadgroup_rwsem. |
2865 | */ |
2866 | int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader, |
2867 | bool threadgroup) |
2868 | { |
2869 | DEFINE_CGROUP_MGCTX(mgctx); |
2870 | struct task_struct *task; |
2871 | int ret = 0; |
2872 | |
2873 | /* look up all src csets */ |
2874 | spin_lock_irq(lock: &css_set_lock); |
2875 | rcu_read_lock(); |
2876 | task = leader; |
2877 | do { |
2878 | cgroup_migrate_add_src(src_cset: task_css_set(task), dst_cgrp, mgctx: &mgctx); |
2879 | if (!threadgroup) |
2880 | break; |
2881 | } while_each_thread(leader, task); |
2882 | rcu_read_unlock(); |
2883 | spin_unlock_irq(lock: &css_set_lock); |
2884 | |
2885 | /* prepare dst csets and commit */ |
2886 | ret = cgroup_migrate_prepare_dst(mgctx: &mgctx); |
2887 | if (!ret) |
2888 | ret = cgroup_migrate(leader, threadgroup, mgctx: &mgctx); |
2889 | |
2890 | cgroup_migrate_finish(mgctx: &mgctx); |
2891 | |
2892 | if (!ret) |
2893 | TRACE_CGROUP_PATH(attach_task, dst_cgrp, leader, threadgroup); |
2894 | |
2895 | return ret; |
2896 | } |
2897 | |
2898 | struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup, |
2899 | bool *threadgroup_locked) |
2900 | { |
2901 | struct task_struct *tsk; |
2902 | pid_t pid; |
2903 | |
2904 | if (kstrtoint(s: strstrip(str: buf), base: 0, res: &pid) || pid < 0) |
2905 | return ERR_PTR(error: -EINVAL); |
2906 | |
2907 | /* |
2908 | * If we migrate a single thread, we don't care about threadgroup |
2909 | * stability. If the thread is `current`, it won't exit(2) under our |
2910 | * hands or change PID through exec(2). We exclude |
2911 | * cgroup_update_dfl_csses and other cgroup_{proc,thread}s_write |
2912 | * callers by cgroup_mutex. |
2913 | * Therefore, we can skip the global lock. |
2914 | */ |
2915 | lockdep_assert_held(&cgroup_mutex); |
2916 | *threadgroup_locked = pid || threadgroup; |
2917 | cgroup_attach_lock(lock_threadgroup: *threadgroup_locked); |
2918 | |
2919 | rcu_read_lock(); |
2920 | if (pid) { |
2921 | tsk = find_task_by_vpid(nr: pid); |
2922 | if (!tsk) { |
2923 | tsk = ERR_PTR(error: -ESRCH); |
2924 | goto out_unlock_threadgroup; |
2925 | } |
2926 | } else { |
2927 | tsk = current; |
2928 | } |
2929 | |
2930 | if (threadgroup) |
2931 | tsk = tsk->group_leader; |
2932 | |
2933 | /* |
2934 | * kthreads may acquire PF_NO_SETAFFINITY during initialization. |
2935 | * If userland migrates such a kthread to a non-root cgroup, it can |
2936 | * become trapped in a cpuset, or RT kthread may be born in a |
2937 | * cgroup with no rt_runtime allocated. Just say no. |
2938 | */ |
2939 | if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) { |
2940 | tsk = ERR_PTR(error: -EINVAL); |
2941 | goto out_unlock_threadgroup; |
2942 | } |
2943 | |
2944 | get_task_struct(t: tsk); |
2945 | goto out_unlock_rcu; |
2946 | |
2947 | out_unlock_threadgroup: |
2948 | cgroup_attach_unlock(lock_threadgroup: *threadgroup_locked); |
2949 | *threadgroup_locked = false; |
2950 | out_unlock_rcu: |
2951 | rcu_read_unlock(); |
2952 | return tsk; |
2953 | } |
2954 | |
2955 | void cgroup_procs_write_finish(struct task_struct *task, bool threadgroup_locked) |
2956 | { |
2957 | struct cgroup_subsys *ss; |
2958 | int ssid; |
2959 | |
2960 | /* release reference from cgroup_procs_write_start() */ |
2961 | put_task_struct(t: task); |
2962 | |
2963 | cgroup_attach_unlock(lock_threadgroup: threadgroup_locked); |
2964 | |
2965 | for_each_subsys(ss, ssid) |
2966 | if (ss->post_attach) |
2967 | ss->post_attach(); |
2968 | } |
2969 | |
2970 | static void cgroup_print_ss_mask(struct seq_file *seq, u16 ss_mask) |
2971 | { |
2972 | struct cgroup_subsys *ss; |
2973 | bool printed = false; |
2974 | int ssid; |
2975 | |
2976 | do_each_subsys_mask(ss, ssid, ss_mask) { |
2977 | if (printed) |
2978 | seq_putc(m: seq, c: ' '); |
2979 | seq_puts(m: seq, s: ss->name); |
2980 | printed = true; |
2981 | } while_each_subsys_mask(); |
2982 | if (printed) |
2983 | seq_putc(m: seq, c: '\n'); |
2984 | } |
2985 | |
2986 | /* show controllers which are enabled from the parent */ |
2987 | static int cgroup_controllers_show(struct seq_file *seq, void *v) |
2988 | { |
2989 | struct cgroup *cgrp = seq_css(seq)->cgroup; |
2990 | |
2991 | cgroup_print_ss_mask(seq, ss_mask: cgroup_control(cgrp)); |
2992 | return 0; |
2993 | } |
2994 | |
2995 | /* show controllers which are enabled for a given cgroup's children */ |
2996 | static int cgroup_subtree_control_show(struct seq_file *seq, void *v) |
2997 | { |
2998 | struct cgroup *cgrp = seq_css(seq)->cgroup; |
2999 | |
3000 | cgroup_print_ss_mask(seq, ss_mask: cgrp->subtree_control); |
3001 | return 0; |
3002 | } |
3003 | |
3004 | /** |
3005 | * cgroup_update_dfl_csses - update css assoc of a subtree in default hierarchy |
3006 | * @cgrp: root of the subtree to update csses for |
3007 | * |
3008 | * @cgrp's control masks have changed and its subtree's css associations |
3009 | * need to be updated accordingly. This function looks up all css_sets |
3010 | * which are attached to the subtree, creates the matching updated css_sets |
3011 | * and migrates the tasks to the new ones. |
3012 | */ |
3013 | static int cgroup_update_dfl_csses(struct cgroup *cgrp) |
3014 | { |
3015 | DEFINE_CGROUP_MGCTX(mgctx); |
3016 | struct cgroup_subsys_state *d_css; |
3017 | struct cgroup *dsct; |
3018 | struct css_set *src_cset; |
3019 | bool has_tasks; |
3020 | int ret; |
3021 | |
3022 | lockdep_assert_held(&cgroup_mutex); |
3023 | |
3024 | /* look up all csses currently attached to @cgrp's subtree */ |
3025 | spin_lock_irq(lock: &css_set_lock); |
3026 | cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) { |
3027 | struct cgrp_cset_link *link; |
3028 | |
3029 | /* |
3030 | * As cgroup_update_dfl_csses() is only called by |
3031 | * cgroup_apply_control(). The csses associated with the |
3032 | * given cgrp will not be affected by changes made to |
3033 | * its subtree_control file. We can skip them. |
3034 | */ |
3035 | if (dsct == cgrp) |
3036 | continue; |
3037 | |
3038 | list_for_each_entry(link, &dsct->cset_links, cset_link) |
3039 | cgroup_migrate_add_src(src_cset: link->cset, dst_cgrp: dsct, mgctx: &mgctx); |
3040 | } |
3041 | spin_unlock_irq(lock: &css_set_lock); |
3042 | |
3043 | /* |
3044 | * We need to write-lock threadgroup_rwsem while migrating tasks. |
3045 | * However, if there are no source csets for @cgrp, changing its |
3046 | * controllers isn't gonna produce any task migrations and the |
3047 | * write-locking can be skipped safely. |
3048 | */ |
3049 | has_tasks = !list_empty(head: &mgctx.preloaded_src_csets); |
3050 | cgroup_attach_lock(lock_threadgroup: has_tasks); |
3051 | |
3052 | /* NULL dst indicates self on default hierarchy */ |
3053 | ret = cgroup_migrate_prepare_dst(mgctx: &mgctx); |
3054 | if (ret) |
3055 | goto out_finish; |
3056 | |
3057 | spin_lock_irq(lock: &css_set_lock); |
3058 | list_for_each_entry(src_cset, &mgctx.preloaded_src_csets, |
3059 | mg_src_preload_node) { |
3060 | struct task_struct *task, *ntask; |
3061 | |
3062 | /* all tasks in src_csets need to be migrated */ |
3063 | list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list) |
3064 | cgroup_migrate_add_task(task, mgctx: &mgctx); |
3065 | } |
3066 | spin_unlock_irq(lock: &css_set_lock); |
3067 | |
3068 | ret = cgroup_migrate_execute(mgctx: &mgctx); |
3069 | out_finish: |
3070 | cgroup_migrate_finish(mgctx: &mgctx); |
3071 | cgroup_attach_unlock(lock_threadgroup: has_tasks); |
3072 | return ret; |
3073 | } |
3074 | |
3075 | /** |
3076 | * cgroup_lock_and_drain_offline - lock cgroup_mutex and drain offlined csses |
3077 | * @cgrp: root of the target subtree |
3078 | * |
3079 | * Because css offlining is asynchronous, userland may try to re-enable a |
3080 | * controller while the previous css is still around. This function grabs |
3081 | * cgroup_mutex and drains the previous css instances of @cgrp's subtree. |
3082 | */ |
3083 | void cgroup_lock_and_drain_offline(struct cgroup *cgrp) |
3084 | __acquires(&cgroup_mutex) |
3085 | { |
3086 | struct cgroup *dsct; |
3087 | struct cgroup_subsys_state *d_css; |
3088 | struct cgroup_subsys *ss; |
3089 | int ssid; |
3090 | |
3091 | restart: |
3092 | cgroup_lock(); |
3093 | |
3094 | cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) { |
3095 | for_each_subsys(ss, ssid) { |
3096 | struct cgroup_subsys_state *css = cgroup_css(cgrp: dsct, ss); |
3097 | DEFINE_WAIT(wait); |
3098 | |
3099 | if (!css || !percpu_ref_is_dying(ref: &css->refcnt)) |
3100 | continue; |
3101 | |
3102 | cgroup_get_live(cgrp: dsct); |
3103 | prepare_to_wait(wq_head: &dsct->offline_waitq, wq_entry: &wait, |
3104 | TASK_UNINTERRUPTIBLE); |
3105 | |
3106 | cgroup_unlock(); |
3107 | schedule(); |
3108 | finish_wait(wq_head: &dsct->offline_waitq, wq_entry: &wait); |
3109 | |
3110 | cgroup_put(cgrp: dsct); |
3111 | goto restart; |
3112 | } |
3113 | } |
3114 | } |
3115 | |
3116 | /** |
3117 | * cgroup_save_control - save control masks and dom_cgrp of a subtree |
3118 | * @cgrp: root of the target subtree |
3119 | * |
3120 | * Save ->subtree_control, ->subtree_ss_mask and ->dom_cgrp to the |
3121 | * respective old_ prefixed fields for @cgrp's subtree including @cgrp |
3122 | * itself. |
3123 | */ |
3124 | static void cgroup_save_control(struct cgroup *cgrp) |
3125 | { |
3126 | struct cgroup *dsct; |
3127 | struct cgroup_subsys_state *d_css; |
3128 | |
3129 | cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) { |
3130 | dsct->old_subtree_control = dsct->subtree_control; |
3131 | dsct->old_subtree_ss_mask = dsct->subtree_ss_mask; |
3132 | dsct->old_dom_cgrp = dsct->dom_cgrp; |
3133 | } |
3134 | } |
3135 | |
3136 | /** |
3137 | * cgroup_propagate_control - refresh control masks of a subtree |
3138 | * @cgrp: root of the target subtree |
3139 | * |
3140 | * For @cgrp and its subtree, ensure ->subtree_ss_mask matches |
3141 | * ->subtree_control and propagate controller availability through the |
3142 | * subtree so that descendants don't have unavailable controllers enabled. |
3143 | */ |
3144 | static void cgroup_propagate_control(struct cgroup *cgrp) |
3145 | { |
3146 | struct cgroup *dsct; |
3147 | struct cgroup_subsys_state *d_css; |
3148 | |
3149 | cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) { |
3150 | dsct->subtree_control &= cgroup_control(cgrp: dsct); |
3151 | dsct->subtree_ss_mask = |
3152 | cgroup_calc_subtree_ss_mask(subtree_control: dsct->subtree_control, |
3153 | this_ss_mask: cgroup_ss_mask(cgrp: dsct)); |
3154 | } |
3155 | } |
3156 | |
3157 | /** |
3158 | * cgroup_restore_control - restore control masks and dom_cgrp of a subtree |
3159 | * @cgrp: root of the target subtree |
3160 | * |
3161 | * Restore ->subtree_control, ->subtree_ss_mask and ->dom_cgrp from the |
3162 | * respective old_ prefixed fields for @cgrp's subtree including @cgrp |
3163 | * itself. |
3164 | */ |
3165 | static void cgroup_restore_control(struct cgroup *cgrp) |
3166 | { |
3167 | struct cgroup *dsct; |
3168 | struct cgroup_subsys_state *d_css; |
3169 | |
3170 | cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) { |
3171 | dsct->subtree_control = dsct->old_subtree_control; |
3172 | dsct->subtree_ss_mask = dsct->old_subtree_ss_mask; |
3173 | dsct->dom_cgrp = dsct->old_dom_cgrp; |
3174 | } |
3175 | } |
3176 | |
3177 | static bool css_visible(struct cgroup_subsys_state *css) |
3178 | { |
3179 | struct cgroup_subsys *ss = css->ss; |
3180 | struct cgroup *cgrp = css->cgroup; |
3181 | |
3182 | if (cgroup_control(cgrp) & (1 << ss->id)) |
3183 | return true; |
3184 | if (!(cgroup_ss_mask(cgrp) & (1 << ss->id))) |
3185 | return false; |
3186 | return cgroup_on_dfl(cgrp) && ss->implicit_on_dfl; |
3187 | } |
3188 | |
3189 | /** |
3190 | * cgroup_apply_control_enable - enable or show csses according to control |
3191 | * @cgrp: root of the target subtree |
3192 | * |
3193 | * Walk @cgrp's subtree and create new csses or make the existing ones |
3194 | * visible. A css is created invisible if it's being implicitly enabled |
3195 | * through dependency. An invisible css is made visible when the userland |
3196 | * explicitly enables it. |
3197 | * |
3198 | * Returns 0 on success, -errno on failure. On failure, csses which have |
3199 | * been processed already aren't cleaned up. The caller is responsible for |
3200 | * cleaning up with cgroup_apply_control_disable(). |
3201 | */ |
3202 | static int cgroup_apply_control_enable(struct cgroup *cgrp) |
3203 | { |
3204 | struct cgroup *dsct; |
3205 | struct cgroup_subsys_state *d_css; |
3206 | struct cgroup_subsys *ss; |
3207 | int ssid, ret; |
3208 | |
3209 | cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) { |
3210 | for_each_subsys(ss, ssid) { |
3211 | struct cgroup_subsys_state *css = cgroup_css(cgrp: dsct, ss); |
3212 | |
3213 | if (!(cgroup_ss_mask(cgrp: dsct) & (1 << ss->id))) |
3214 | continue; |
3215 | |
3216 | if (!css) { |
3217 | css = css_create(cgrp: dsct, ss); |
3218 | if (IS_ERR(ptr: css)) |
3219 | return PTR_ERR(ptr: css); |
3220 | } |
3221 | |
3222 | WARN_ON_ONCE(percpu_ref_is_dying(&css->refcnt)); |
3223 | |
3224 | if (css_visible(css)) { |
3225 | ret = css_populate_dir(css); |
3226 | if (ret) |
3227 | return ret; |
3228 | } |
3229 | } |
3230 | } |
3231 | |
3232 | return 0; |
3233 | } |
3234 | |
3235 | /** |
3236 | * cgroup_apply_control_disable - kill or hide csses according to control |
3237 | * @cgrp: root of the target subtree |
3238 | * |
3239 | * Walk @cgrp's subtree and kill and hide csses so that they match |
3240 | * cgroup_ss_mask() and cgroup_visible_mask(). |
3241 | * |
3242 | * A css is hidden when the userland requests it to be disabled while other |
3243 | * subsystems are still depending on it. The css must not actively control |
3244 | * resources and be in the vanilla state if it's made visible again later. |
3245 | * Controllers which may be depended upon should provide ->css_reset() for |
3246 | * this purpose. |
3247 | */ |
3248 | static void cgroup_apply_control_disable(struct cgroup *cgrp) |
3249 | { |
3250 | struct cgroup *dsct; |
3251 | struct cgroup_subsys_state *d_css; |
3252 | struct cgroup_subsys *ss; |
3253 | int ssid; |
3254 | |
3255 | cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) { |
3256 | for_each_subsys(ss, ssid) { |
3257 | struct cgroup_subsys_state *css = cgroup_css(cgrp: dsct, ss); |
3258 | |
3259 | if (!css) |
3260 | continue; |
3261 | |
3262 | WARN_ON_ONCE(percpu_ref_is_dying(&css->refcnt)); |
3263 | |
3264 | if (css->parent && |
3265 | !(cgroup_ss_mask(cgrp: dsct) & (1 << ss->id))) { |
3266 | kill_css(css); |
3267 | } else if (!css_visible(css)) { |
3268 | css_clear_dir(css); |
3269 | if (ss->css_reset) |
3270 | ss->css_reset(css); |
3271 | } |
3272 | } |
3273 | } |
3274 | } |
3275 | |
3276 | /** |
3277 | * cgroup_apply_control - apply control mask updates to the subtree |
3278 | * @cgrp: root of the target subtree |
3279 | * |
3280 | * subsystems can be enabled and disabled in a subtree using the following |
3281 | * steps. |
3282 | * |
3283 | * 1. Call cgroup_save_control() to stash the current state. |
3284 | * 2. Update ->subtree_control masks in the subtree as desired. |
3285 | * 3. Call cgroup_apply_control() to apply the changes. |
3286 | * 4. Optionally perform other related operations. |
3287 | * 5. Call cgroup_finalize_control() to finish up. |
3288 | * |
3289 | * This function implements step 3 and propagates the mask changes |
3290 | * throughout @cgrp's subtree, updates csses accordingly and perform |
3291 | * process migrations. |
3292 | */ |
3293 | static int cgroup_apply_control(struct cgroup *cgrp) |
3294 | { |
3295 | int ret; |
3296 | |
3297 | cgroup_propagate_control(cgrp); |
3298 | |
3299 | ret = cgroup_apply_control_enable(cgrp); |
3300 | if (ret) |
3301 | return ret; |
3302 | |
3303 | /* |
3304 | * At this point, cgroup_e_css_by_mask() results reflect the new csses |
3305 | * making the following cgroup_update_dfl_csses() properly update |
3306 | * css associations of all tasks in the subtree. |
3307 | */ |
3308 | return cgroup_update_dfl_csses(cgrp); |
3309 | } |
3310 | |
3311 | /** |
3312 | * cgroup_finalize_control - finalize control mask update |
3313 | * @cgrp: root of the target subtree |
3314 | * @ret: the result of the update |
3315 | * |
3316 | * Finalize control mask update. See cgroup_apply_control() for more info. |
3317 | */ |
3318 | static void cgroup_finalize_control(struct cgroup *cgrp, int ret) |
3319 | { |
3320 | if (ret) { |
3321 | cgroup_restore_control(cgrp); |
3322 | cgroup_propagate_control(cgrp); |
3323 | } |
3324 | |
3325 | cgroup_apply_control_disable(cgrp); |
3326 | } |
3327 | |
3328 | static int cgroup_vet_subtree_control_enable(struct cgroup *cgrp, u16 enable) |
3329 | { |
3330 | u16 domain_enable = enable & ~cgrp_dfl_threaded_ss_mask; |
3331 | |
3332 | /* if nothing is getting enabled, nothing to worry about */ |
3333 | if (!enable) |
3334 | return 0; |
3335 | |
3336 | /* can @cgrp host any resources? */ |
3337 | if (!cgroup_is_valid_domain(cgrp: cgrp->dom_cgrp)) |
3338 | return -EOPNOTSUPP; |
3339 | |
3340 | /* mixables don't care */ |
3341 | if (cgroup_is_mixable(cgrp)) |
3342 | return 0; |
3343 | |
3344 | if (domain_enable) { |
3345 | /* can't enable domain controllers inside a thread subtree */ |
3346 | if (cgroup_is_thread_root(cgrp) || cgroup_is_threaded(cgrp)) |
3347 | return -EOPNOTSUPP; |
3348 | } else { |
3349 | /* |
3350 | * Threaded controllers can handle internal competitions |
3351 | * and are always allowed inside a (prospective) thread |
3352 | * subtree. |
3353 | */ |
3354 | if (cgroup_can_be_thread_root(cgrp) || cgroup_is_threaded(cgrp)) |
3355 | return 0; |
3356 | } |
3357 | |
3358 | /* |
3359 | * Controllers can't be enabled for a cgroup with tasks to avoid |
3360 | * child cgroups competing against tasks. |
3361 | */ |
3362 | if (cgroup_has_tasks(cgrp)) |
3363 | return -EBUSY; |
3364 | |
3365 | return 0; |
3366 | } |
3367 | |
3368 | /* change the enabled child controllers for a cgroup in the default hierarchy */ |
3369 | static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of, |
3370 | char *buf, size_t nbytes, |
3371 | loff_t off) |
3372 | { |
3373 | u16 enable = 0, disable = 0; |
3374 | struct cgroup *cgrp, *child; |
3375 | struct cgroup_subsys *ss; |
3376 | char *tok; |
3377 | int ssid, ret; |
3378 | |
3379 | /* |
3380 | * Parse input - space separated list of subsystem names prefixed |
3381 | * with either + or -. |
3382 | */ |
3383 | buf = strstrip(str: buf); |
3384 | while ((tok = strsep(&buf, " " ))) { |
3385 | if (tok[0] == '\0') |
3386 | continue; |
3387 | do_each_subsys_mask(ss, ssid, ~cgrp_dfl_inhibit_ss_mask) { |
3388 | if (!cgroup_ssid_enabled(ssid) || |
3389 | strcmp(tok + 1, ss->name)) |
3390 | continue; |
3391 | |
3392 | if (*tok == '+') { |
3393 | enable |= 1 << ssid; |
3394 | disable &= ~(1 << ssid); |
3395 | } else if (*tok == '-') { |
3396 | disable |= 1 << ssid; |
3397 | enable &= ~(1 << ssid); |
3398 | } else { |
3399 | return -EINVAL; |
3400 | } |
3401 | break; |
3402 | } while_each_subsys_mask(); |
3403 | if (ssid == CGROUP_SUBSYS_COUNT) |
3404 | return -EINVAL; |
3405 | } |
3406 | |
3407 | cgrp = cgroup_kn_lock_live(kn: of->kn, drain_offline: true); |
3408 | if (!cgrp) |
3409 | return -ENODEV; |
3410 | |
3411 | for_each_subsys(ss, ssid) { |
3412 | if (enable & (1 << ssid)) { |
3413 | if (cgrp->subtree_control & (1 << ssid)) { |
3414 | enable &= ~(1 << ssid); |
3415 | continue; |
3416 | } |
3417 | |
3418 | if (!(cgroup_control(cgrp) & (1 << ssid))) { |
3419 | ret = -ENOENT; |
3420 | goto out_unlock; |
3421 | } |
3422 | } else if (disable & (1 << ssid)) { |
3423 | if (!(cgrp->subtree_control & (1 << ssid))) { |
3424 | disable &= ~(1 << ssid); |
3425 | continue; |
3426 | } |
3427 | |
3428 | /* a child has it enabled? */ |
3429 | cgroup_for_each_live_child(child, cgrp) { |
3430 | if (child->subtree_control & (1 << ssid)) { |
3431 | ret = -EBUSY; |
3432 | goto out_unlock; |
3433 | } |
3434 | } |
3435 | } |
3436 | } |
3437 | |
3438 | if (!enable && !disable) { |
3439 | ret = 0; |
3440 | goto out_unlock; |
3441 | } |
3442 | |
3443 | ret = cgroup_vet_subtree_control_enable(cgrp, enable); |
3444 | if (ret) |
3445 | goto out_unlock; |
3446 | |
3447 | /* save and update control masks and prepare csses */ |
3448 | cgroup_save_control(cgrp); |
3449 | |
3450 | cgrp->subtree_control |= enable; |
3451 | cgrp->subtree_control &= ~disable; |
3452 | |
3453 | ret = cgroup_apply_control(cgrp); |
3454 | cgroup_finalize_control(cgrp, ret); |
3455 | if (ret) |
3456 | goto out_unlock; |
3457 | |
3458 | kernfs_activate(kn: cgrp->kn); |
3459 | out_unlock: |
3460 | cgroup_kn_unlock(kn: of->kn); |
3461 | return ret ?: nbytes; |
3462 | } |
3463 | |
3464 | /** |
3465 | * cgroup_enable_threaded - make @cgrp threaded |
3466 | * @cgrp: the target cgroup |
3467 | * |
3468 | * Called when "threaded" is written to the cgroup.type interface file and |
3469 | * tries to make @cgrp threaded and join the parent's resource domain. |
3470 | * This function is never called on the root cgroup as cgroup.type doesn't |
3471 | * exist on it. |
3472 | */ |
3473 | static int cgroup_enable_threaded(struct cgroup *cgrp) |
3474 | { |
3475 | struct cgroup *parent = cgroup_parent(cgrp); |
3476 | struct cgroup *dom_cgrp = parent->dom_cgrp; |
3477 | struct cgroup *dsct; |
3478 | struct cgroup_subsys_state *d_css; |
3479 | int ret; |
3480 | |
3481 | lockdep_assert_held(&cgroup_mutex); |
3482 | |
3483 | /* noop if already threaded */ |
3484 | if (cgroup_is_threaded(cgrp)) |
3485 | return 0; |
3486 | |
3487 | /* |
3488 | * If @cgroup is populated or has domain controllers enabled, it |
3489 | * can't be switched. While the below cgroup_can_be_thread_root() |
3490 | * test can catch the same conditions, that's only when @parent is |
3491 | * not mixable, so let's check it explicitly. |
3492 | */ |
3493 | if (cgroup_is_populated(cgrp) || |
3494 | cgrp->subtree_control & ~cgrp_dfl_threaded_ss_mask) |
3495 | return -EOPNOTSUPP; |
3496 | |
3497 | /* we're joining the parent's domain, ensure its validity */ |
3498 | if (!cgroup_is_valid_domain(cgrp: dom_cgrp) || |
3499 | !cgroup_can_be_thread_root(cgrp: dom_cgrp)) |
3500 | return -EOPNOTSUPP; |
3501 | |
3502 | /* |
3503 | * The following shouldn't cause actual migrations and should |
3504 | * always succeed. |
3505 | */ |
3506 | cgroup_save_control(cgrp); |
3507 | |
3508 | cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) |
3509 | if (dsct == cgrp || cgroup_is_threaded(cgrp: dsct)) |
3510 | dsct->dom_cgrp = dom_cgrp; |
3511 | |
3512 | ret = cgroup_apply_control(cgrp); |
3513 | if (!ret) |
3514 | parent->nr_threaded_children++; |
3515 | |
3516 | cgroup_finalize_control(cgrp, ret); |
3517 | return ret; |
3518 | } |
3519 | |
3520 | static int cgroup_type_show(struct seq_file *seq, void *v) |
3521 | { |
3522 | struct cgroup *cgrp = seq_css(seq)->cgroup; |
3523 | |
3524 | if (cgroup_is_threaded(cgrp)) |
3525 | seq_puts(m: seq, s: "threaded\n" ); |
3526 | else if (!cgroup_is_valid_domain(cgrp)) |
3527 | seq_puts(m: seq, s: "domain invalid\n" ); |
3528 | else if (cgroup_is_thread_root(cgrp)) |
3529 | seq_puts(m: seq, s: "domain threaded\n" ); |
3530 | else |
3531 | seq_puts(m: seq, s: "domain\n" ); |
3532 | |
3533 | return 0; |
3534 | } |
3535 | |
3536 | static ssize_t cgroup_type_write(struct kernfs_open_file *of, char *buf, |
3537 | size_t nbytes, loff_t off) |
3538 | { |
3539 | struct cgroup *cgrp; |
3540 | int ret; |
3541 | |
3542 | /* only switching to threaded mode is supported */ |
3543 | if (strcmp(strstrip(str: buf), "threaded" )) |
3544 | return -EINVAL; |
3545 | |
3546 | /* drain dying csses before we re-apply (threaded) subtree control */ |
3547 | cgrp = cgroup_kn_lock_live(kn: of->kn, drain_offline: true); |
3548 | if (!cgrp) |
3549 | return -ENOENT; |
3550 | |
3551 | /* threaded can only be enabled */ |
3552 | ret = cgroup_enable_threaded(cgrp); |
3553 | |
3554 | cgroup_kn_unlock(kn: of->kn); |
3555 | return ret ?: nbytes; |
3556 | } |
3557 | |
3558 | static int cgroup_max_descendants_show(struct seq_file *seq, void *v) |
3559 | { |
3560 | struct cgroup *cgrp = seq_css(seq)->cgroup; |
3561 | int descendants = READ_ONCE(cgrp->max_descendants); |
3562 | |
3563 | if (descendants == INT_MAX) |
3564 | seq_puts(m: seq, s: "max\n" ); |
3565 | else |
3566 | seq_printf(m: seq, fmt: "%d\n" , descendants); |
3567 | |
3568 | return 0; |
3569 | } |
3570 | |
3571 | static ssize_t cgroup_max_descendants_write(struct kernfs_open_file *of, |
3572 | char *buf, size_t nbytes, loff_t off) |
3573 | { |
3574 | struct cgroup *cgrp; |
3575 | int descendants; |
3576 | ssize_t ret; |
3577 | |
3578 | buf = strstrip(str: buf); |
3579 | if (!strcmp(buf, "max" )) { |
3580 | descendants = INT_MAX; |
3581 | } else { |
3582 | ret = kstrtoint(s: buf, base: 0, res: &descendants); |
3583 | if (ret) |
3584 | return ret; |
3585 | } |
3586 | |
3587 | if (descendants < 0) |
3588 | return -ERANGE; |
3589 | |
3590 | cgrp = cgroup_kn_lock_live(kn: of->kn, drain_offline: false); |
3591 | if (!cgrp) |
3592 | return -ENOENT; |
3593 | |
3594 | cgrp->max_descendants = descendants; |
3595 | |
3596 | cgroup_kn_unlock(kn: of->kn); |
3597 | |
3598 | return nbytes; |
3599 | } |
3600 | |
3601 | static int cgroup_max_depth_show(struct seq_file *seq, void *v) |
3602 | { |
3603 | struct cgroup *cgrp = seq_css(seq)->cgroup; |
3604 | int depth = READ_ONCE(cgrp->max_depth); |
3605 | |
3606 | if (depth == INT_MAX) |
3607 | seq_puts(m: seq, s: "max\n" ); |
3608 | else |
3609 | seq_printf(m: seq, fmt: "%d\n" , depth); |
3610 | |
3611 | return 0; |
3612 | } |
3613 | |
3614 | static ssize_t cgroup_max_depth_write(struct kernfs_open_file *of, |
3615 | char *buf, size_t nbytes, loff_t off) |
3616 | { |
3617 | struct cgroup *cgrp; |
3618 | ssize_t ret; |
3619 | int depth; |
3620 | |
3621 | buf = strstrip(str: buf); |
3622 | if (!strcmp(buf, "max" )) { |
3623 | depth = INT_MAX; |
3624 | } else { |
3625 | ret = kstrtoint(s: buf, base: 0, res: &depth); |
3626 | if (ret) |
3627 | return ret; |
3628 | } |
3629 | |
3630 | if (depth < 0) |
3631 | return -ERANGE; |
3632 | |
3633 | cgrp = cgroup_kn_lock_live(kn: of->kn, drain_offline: false); |
3634 | if (!cgrp) |
3635 | return -ENOENT; |
3636 | |
3637 | cgrp->max_depth = depth; |
3638 | |
3639 | cgroup_kn_unlock(kn: of->kn); |
3640 | |
3641 | return nbytes; |
3642 | } |
3643 | |
3644 | static int cgroup_events_show(struct seq_file *seq, void *v) |
3645 | { |
3646 | struct cgroup *cgrp = seq_css(seq)->cgroup; |
3647 | |
3648 | seq_printf(m: seq, fmt: "populated %d\n" , cgroup_is_populated(cgrp)); |
3649 | seq_printf(m: seq, fmt: "frozen %d\n" , test_bit(CGRP_FROZEN, &cgrp->flags)); |
3650 | |
3651 | return 0; |
3652 | } |
3653 | |
3654 | static int cgroup_stat_show(struct seq_file *seq, void *v) |
3655 | { |
3656 | struct cgroup *cgroup = seq_css(seq)->cgroup; |
3657 | |
3658 | seq_printf(m: seq, fmt: "nr_descendants %d\n" , |
3659 | cgroup->nr_descendants); |
3660 | seq_printf(m: seq, fmt: "nr_dying_descendants %d\n" , |
3661 | cgroup->nr_dying_descendants); |
3662 | |
3663 | return 0; |
3664 | } |
3665 | |
3666 | #ifdef CONFIG_CGROUP_SCHED |
3667 | /** |
3668 | * cgroup_tryget_css - try to get a cgroup's css for the specified subsystem |
3669 | * @cgrp: the cgroup of interest |
3670 | * @ss: the subsystem of interest |
3671 | * |
3672 | * Find and get @cgrp's css associated with @ss. If the css doesn't exist |
3673 | * or is offline, %NULL is returned. |
3674 | */ |
3675 | static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp, |
3676 | struct cgroup_subsys *ss) |
3677 | { |
3678 | struct cgroup_subsys_state *css; |
3679 | |
3680 | rcu_read_lock(); |
3681 | css = cgroup_css(cgrp, ss); |
3682 | if (css && !css_tryget_online(css)) |
3683 | css = NULL; |
3684 | rcu_read_unlock(); |
3685 | |
3686 | return css; |
3687 | } |
3688 | |
3689 | static int (struct seq_file *seq, int ssid) |
3690 | { |
3691 | struct cgroup *cgrp = seq_css(seq)->cgroup; |
3692 | struct cgroup_subsys *ss = cgroup_subsys[ssid]; |
3693 | struct cgroup_subsys_state *css; |
3694 | int ret; |
3695 | |
3696 | if (!ss->css_extra_stat_show) |
3697 | return 0; |
3698 | |
3699 | css = cgroup_tryget_css(cgrp, ss); |
3700 | if (!css) |
3701 | return 0; |
3702 | |
3703 | ret = ss->css_extra_stat_show(seq, css); |
3704 | css_put(css); |
3705 | return ret; |
3706 | } |
3707 | |
3708 | static int cgroup_local_stat_show(struct seq_file *seq, |
3709 | struct cgroup *cgrp, int ssid) |
3710 | { |
3711 | struct cgroup_subsys *ss = cgroup_subsys[ssid]; |
3712 | struct cgroup_subsys_state *css; |
3713 | int ret; |
3714 | |
3715 | if (!ss->css_local_stat_show) |
3716 | return 0; |
3717 | |
3718 | css = cgroup_tryget_css(cgrp, ss); |
3719 | if (!css) |
3720 | return 0; |
3721 | |
3722 | ret = ss->css_local_stat_show(seq, css); |
3723 | css_put(css); |
3724 | return ret; |
3725 | } |
3726 | #endif |
3727 | |
3728 | static int cpu_stat_show(struct seq_file *seq, void *v) |
3729 | { |
3730 | int ret = 0; |
3731 | |
3732 | cgroup_base_stat_cputime_show(seq); |
3733 | #ifdef CONFIG_CGROUP_SCHED |
3734 | ret = cgroup_extra_stat_show(seq, ssid: cpu_cgrp_id); |
3735 | #endif |
3736 | return ret; |
3737 | } |
3738 | |
3739 | static int cpu_local_stat_show(struct seq_file *seq, void *v) |
3740 | { |
3741 | struct cgroup __maybe_unused *cgrp = seq_css(seq)->cgroup; |
3742 | int ret = 0; |
3743 | |
3744 | #ifdef CONFIG_CGROUP_SCHED |
3745 | ret = cgroup_local_stat_show(seq, cgrp, ssid: cpu_cgrp_id); |
3746 | #endif |
3747 | return ret; |
3748 | } |
3749 | |
3750 | #ifdef CONFIG_PSI |
3751 | static int cgroup_io_pressure_show(struct seq_file *seq, void *v) |
3752 | { |
3753 | struct cgroup *cgrp = seq_css(seq)->cgroup; |
3754 | struct psi_group *psi = cgroup_psi(cgrp); |
3755 | |
3756 | return psi_show(s: seq, group: psi, res: PSI_IO); |
3757 | } |
3758 | static int cgroup_memory_pressure_show(struct seq_file *seq, void *v) |
3759 | { |
3760 | struct cgroup *cgrp = seq_css(seq)->cgroup; |
3761 | struct psi_group *psi = cgroup_psi(cgrp); |
3762 | |
3763 | return psi_show(s: seq, group: psi, res: PSI_MEM); |
3764 | } |
3765 | static int cgroup_cpu_pressure_show(struct seq_file *seq, void *v) |
3766 | { |
3767 | struct cgroup *cgrp = seq_css(seq)->cgroup; |
3768 | struct psi_group *psi = cgroup_psi(cgrp); |
3769 | |
3770 | return psi_show(s: seq, group: psi, res: PSI_CPU); |
3771 | } |
3772 | |
3773 | static ssize_t pressure_write(struct kernfs_open_file *of, char *buf, |
3774 | size_t nbytes, enum psi_res res) |
3775 | { |
3776 | struct cgroup_file_ctx *ctx = of->priv; |
3777 | struct psi_trigger *new; |
3778 | struct cgroup *cgrp; |
3779 | struct psi_group *psi; |
3780 | |
3781 | cgrp = cgroup_kn_lock_live(kn: of->kn, drain_offline: false); |
3782 | if (!cgrp) |
3783 | return -ENODEV; |
3784 | |
3785 | cgroup_get(cgrp); |
3786 | cgroup_kn_unlock(kn: of->kn); |
3787 | |
3788 | /* Allow only one trigger per file descriptor */ |
3789 | if (ctx->psi.trigger) { |
3790 | cgroup_put(cgrp); |
3791 | return -EBUSY; |
3792 | } |
3793 | |
3794 | psi = cgroup_psi(cgrp); |
3795 | new = psi_trigger_create(group: psi, buf, res, file: of->file, of); |
3796 | if (IS_ERR(ptr: new)) { |
3797 | cgroup_put(cgrp); |
3798 | return PTR_ERR(ptr: new); |
3799 | } |
3800 | |
3801 | smp_store_release(&ctx->psi.trigger, new); |
3802 | cgroup_put(cgrp); |
3803 | |
3804 | return nbytes; |
3805 | } |
3806 | |
3807 | static ssize_t cgroup_io_pressure_write(struct kernfs_open_file *of, |
3808 | char *buf, size_t nbytes, |
3809 | loff_t off) |
3810 | { |
3811 | return pressure_write(of, buf, nbytes, res: PSI_IO); |
3812 | } |
3813 | |
3814 | static ssize_t cgroup_memory_pressure_write(struct kernfs_open_file *of, |
3815 | char *buf, size_t nbytes, |
3816 | loff_t off) |
3817 | { |
3818 | return pressure_write(of, buf, nbytes, res: PSI_MEM); |
3819 | } |
3820 | |
3821 | static ssize_t cgroup_cpu_pressure_write(struct kernfs_open_file *of, |
3822 | char *buf, size_t nbytes, |
3823 | loff_t off) |
3824 | { |
3825 | return pressure_write(of, buf, nbytes, res: PSI_CPU); |
3826 | } |
3827 | |
3828 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
3829 | static int cgroup_irq_pressure_show(struct seq_file *seq, void *v) |
3830 | { |
3831 | struct cgroup *cgrp = seq_css(seq)->cgroup; |
3832 | struct psi_group *psi = cgroup_psi(cgrp); |
3833 | |
3834 | return psi_show(s: seq, group: psi, res: PSI_IRQ); |
3835 | } |
3836 | |
3837 | static ssize_t cgroup_irq_pressure_write(struct kernfs_open_file *of, |
3838 | char *buf, size_t nbytes, |
3839 | loff_t off) |
3840 | { |
3841 | return pressure_write(of, buf, nbytes, res: PSI_IRQ); |
3842 | } |
3843 | #endif |
3844 | |
3845 | static int cgroup_pressure_show(struct seq_file *seq, void *v) |
3846 | { |
3847 | struct cgroup *cgrp = seq_css(seq)->cgroup; |
3848 | struct psi_group *psi = cgroup_psi(cgrp); |
3849 | |
3850 | seq_printf(m: seq, fmt: "%d\n" , psi->enabled); |
3851 | |
3852 | return 0; |
3853 | } |
3854 | |
3855 | static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, |
3856 | char *buf, size_t nbytes, |
3857 | loff_t off) |
3858 | { |
3859 | ssize_t ret; |
3860 | int enable; |
3861 | struct cgroup *cgrp; |
3862 | struct psi_group *psi; |
3863 | |
3864 | ret = kstrtoint(s: strstrip(str: buf), base: 0, res: &enable); |
3865 | if (ret) |
3866 | return ret; |
3867 | |
3868 | if (enable < 0 || enable > 1) |
3869 | return -ERANGE; |
3870 | |
3871 | cgrp = cgroup_kn_lock_live(kn: of->kn, drain_offline: false); |
3872 | if (!cgrp) |
3873 | return -ENOENT; |
3874 | |
3875 | psi = cgroup_psi(cgrp); |
3876 | if (psi->enabled != enable) { |
3877 | int i; |
3878 | |
3879 | /* show or hide {cpu,memory,io,irq}.pressure files */ |
3880 | for (i = 0; i < NR_PSI_RESOURCES; i++) |
3881 | cgroup_file_show(cfile: &cgrp->psi_files[i], show: enable); |
3882 | |
3883 | psi->enabled = enable; |
3884 | if (enable) |
3885 | psi_cgroup_restart(group: psi); |
3886 | } |
3887 | |
3888 | cgroup_kn_unlock(kn: of->kn); |
3889 | |
3890 | return nbytes; |
3891 | } |
3892 | |
3893 | static __poll_t cgroup_pressure_poll(struct kernfs_open_file *of, |
3894 | poll_table *pt) |
3895 | { |
3896 | struct cgroup_file_ctx *ctx = of->priv; |
3897 | |
3898 | return psi_trigger_poll(trigger_ptr: &ctx->psi.trigger, file: of->file, wait: pt); |
3899 | } |
3900 | |
3901 | static void cgroup_pressure_release(struct kernfs_open_file *of) |
3902 | { |
3903 | struct cgroup_file_ctx *ctx = of->priv; |
3904 | |
3905 | psi_trigger_destroy(t: ctx->psi.trigger); |
3906 | } |
3907 | |
3908 | bool cgroup_psi_enabled(void) |
3909 | { |
3910 | if (static_branch_likely(&psi_disabled)) |
3911 | return false; |
3912 | |
3913 | return (cgroup_feature_disable_mask & (1 << OPT_FEATURE_PRESSURE)) == 0; |
3914 | } |
3915 | |
3916 | #else /* CONFIG_PSI */ |
3917 | bool cgroup_psi_enabled(void) |
3918 | { |
3919 | return false; |
3920 | } |
3921 | |
3922 | #endif /* CONFIG_PSI */ |
3923 | |
3924 | static int cgroup_freeze_show(struct seq_file *seq, void *v) |
3925 | { |
3926 | struct cgroup *cgrp = seq_css(seq)->cgroup; |
3927 | |
3928 | seq_printf(m: seq, fmt: "%d\n" , cgrp->freezer.freeze); |
3929 | |
3930 | return 0; |
3931 | } |
3932 | |
3933 | static ssize_t cgroup_freeze_write(struct kernfs_open_file *of, |
3934 | char *buf, size_t nbytes, loff_t off) |
3935 | { |
3936 | struct cgroup *cgrp; |
3937 | ssize_t ret; |
3938 | int freeze; |
3939 | |
3940 | ret = kstrtoint(s: strstrip(str: buf), base: 0, res: &freeze); |
3941 | if (ret) |
3942 | return ret; |
3943 | |
3944 | if (freeze < 0 || freeze > 1) |
3945 | return -ERANGE; |
3946 | |
3947 | cgrp = cgroup_kn_lock_live(kn: of->kn, drain_offline: false); |
3948 | if (!cgrp) |
3949 | return -ENOENT; |
3950 | |
3951 | cgroup_freeze(cgrp, freeze); |
3952 | |
3953 | cgroup_kn_unlock(kn: of->kn); |
3954 | |
3955 | return nbytes; |
3956 | } |
3957 | |
3958 | static void __cgroup_kill(struct cgroup *cgrp) |
3959 | { |
3960 | struct css_task_iter it; |
3961 | struct task_struct *task; |
3962 | |
3963 | lockdep_assert_held(&cgroup_mutex); |
3964 | |
3965 | spin_lock_irq(lock: &css_set_lock); |
3966 | set_bit(nr: CGRP_KILL, addr: &cgrp->flags); |
3967 | spin_unlock_irq(lock: &css_set_lock); |
3968 | |
3969 | css_task_iter_start(css: &cgrp->self, flags: CSS_TASK_ITER_PROCS | CSS_TASK_ITER_THREADED, it: &it); |
3970 | while ((task = css_task_iter_next(it: &it))) { |
3971 | /* Ignore kernel threads here. */ |
3972 | if (task->flags & PF_KTHREAD) |
3973 | continue; |
3974 | |
3975 | /* Skip tasks that are already dying. */ |
3976 | if (__fatal_signal_pending(p: task)) |
3977 | continue; |
3978 | |
3979 | send_sig(SIGKILL, task, 0); |
3980 | } |
3981 | css_task_iter_end(it: &it); |
3982 | |
3983 | spin_lock_irq(lock: &css_set_lock); |
3984 | clear_bit(nr: CGRP_KILL, addr: &cgrp->flags); |
3985 | spin_unlock_irq(lock: &css_set_lock); |
3986 | } |
3987 | |
3988 | static void cgroup_kill(struct cgroup *cgrp) |
3989 | { |
3990 | struct cgroup_subsys_state *css; |
3991 | struct cgroup *dsct; |
3992 | |
3993 | lockdep_assert_held(&cgroup_mutex); |
3994 | |
3995 | cgroup_for_each_live_descendant_pre(dsct, css, cgrp) |
3996 | __cgroup_kill(cgrp: dsct); |
3997 | } |
3998 | |
3999 | static ssize_t cgroup_kill_write(struct kernfs_open_file *of, char *buf, |
4000 | size_t nbytes, loff_t off) |
4001 | { |
4002 | ssize_t ret = 0; |
4003 | int kill; |
4004 | struct cgroup *cgrp; |
4005 | |
4006 | ret = kstrtoint(s: strstrip(str: buf), base: 0, res: &kill); |
4007 | if (ret) |
4008 | return ret; |
4009 | |
4010 | if (kill != 1) |
4011 | return -ERANGE; |
4012 | |
4013 | cgrp = cgroup_kn_lock_live(kn: of->kn, drain_offline: false); |
4014 | if (!cgrp) |
4015 | return -ENOENT; |
4016 | |
4017 | /* |
4018 | * Killing is a process directed operation, i.e. the whole thread-group |
4019 | * is taken down so act like we do for cgroup.procs and only make this |
4020 | * writable in non-threaded cgroups. |
4021 | */ |
4022 | if (cgroup_is_threaded(cgrp)) |
4023 | ret = -EOPNOTSUPP; |
4024 | else |
4025 | cgroup_kill(cgrp); |
4026 | |
4027 | cgroup_kn_unlock(kn: of->kn); |
4028 | |
4029 | return ret ?: nbytes; |
4030 | } |
4031 | |
4032 | static int cgroup_file_open(struct kernfs_open_file *of) |
4033 | { |
4034 | struct cftype *cft = of_cft(of); |
4035 | struct cgroup_file_ctx *ctx; |
4036 | int ret; |
4037 | |
4038 | ctx = kzalloc(size: sizeof(*ctx), GFP_KERNEL); |
4039 | if (!ctx) |
4040 | return -ENOMEM; |
4041 | |
4042 | ctx->ns = current->nsproxy->cgroup_ns; |
4043 | get_cgroup_ns(ns: ctx->ns); |
4044 | of->priv = ctx; |
4045 | |
4046 | if (!cft->open) |
4047 | return 0; |
4048 | |
4049 | ret = cft->open(of); |
4050 | if (ret) { |
4051 | put_cgroup_ns(ns: ctx->ns); |
4052 | kfree(objp: ctx); |
4053 | } |
4054 | return ret; |
4055 | } |
4056 | |
4057 | static void cgroup_file_release(struct kernfs_open_file *of) |
4058 | { |
4059 | struct cftype *cft = of_cft(of); |
4060 | struct cgroup_file_ctx *ctx = of->priv; |
4061 | |
4062 | if (cft->release) |
4063 | cft->release(of); |
4064 | put_cgroup_ns(ns: ctx->ns); |
4065 | kfree(objp: ctx); |
4066 | } |
4067 | |
4068 | static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf, |
4069 | size_t nbytes, loff_t off) |
4070 | { |
4071 | struct cgroup_file_ctx *ctx = of->priv; |
4072 | struct cgroup *cgrp = of->kn->parent->priv; |
4073 | struct cftype *cft = of_cft(of); |
4074 | struct cgroup_subsys_state *css; |
4075 | int ret; |
4076 | |
4077 | if (!nbytes) |
4078 | return 0; |
4079 | |
4080 | /* |
4081 | * If namespaces are delegation boundaries, disallow writes to |
4082 | * files in an non-init namespace root from inside the namespace |
4083 | * except for the files explicitly marked delegatable - |
4084 | * cgroup.procs and cgroup.subtree_control. |
4085 | */ |
4086 | if ((cgrp->root->flags & CGRP_ROOT_NS_DELEGATE) && |
4087 | !(cft->flags & CFTYPE_NS_DELEGATABLE) && |
4088 | ctx->ns != &init_cgroup_ns && ctx->ns->root_cset->dfl_cgrp == cgrp) |
4089 | return -EPERM; |
4090 | |
4091 | if (cft->write) |
4092 | return cft->write(of, buf, nbytes, off); |
4093 | |
4094 | /* |
4095 | * kernfs guarantees that a file isn't deleted with operations in |
4096 | * flight, which means that the matching css is and stays alive and |
4097 | * doesn't need to be pinned. The RCU locking is not necessary |
4098 | * either. It's just for the convenience of using cgroup_css(). |
4099 | */ |
4100 | rcu_read_lock(); |
4101 | css = cgroup_css(cgrp, ss: cft->ss); |
4102 | rcu_read_unlock(); |
4103 | |
4104 | if (cft->write_u64) { |
4105 | unsigned long long v; |
4106 | ret = kstrtoull(s: buf, base: 0, res: &v); |
4107 | if (!ret) |
4108 | ret = cft->write_u64(css, cft, v); |
4109 | } else if (cft->write_s64) { |
4110 | long long v; |
4111 | ret = kstrtoll(s: buf, base: 0, res: &v); |
4112 | if (!ret) |
4113 | ret = cft->write_s64(css, cft, v); |
4114 | } else { |
4115 | ret = -EINVAL; |
4116 | } |
4117 | |
4118 | return ret ?: nbytes; |
4119 | } |
4120 | |
4121 | static __poll_t cgroup_file_poll(struct kernfs_open_file *of, poll_table *pt) |
4122 | { |
4123 | struct cftype *cft = of_cft(of); |
4124 | |
4125 | if (cft->poll) |
4126 | return cft->poll(of, pt); |
4127 | |
4128 | return kernfs_generic_poll(of, pt); |
4129 | } |
4130 | |
4131 | static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos) |
4132 | { |
4133 | return seq_cft(seq)->seq_start(seq, ppos); |
4134 | } |
4135 | |
4136 | static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos) |
4137 | { |
4138 | return seq_cft(seq)->seq_next(seq, v, ppos); |
4139 | } |
4140 | |
4141 | static void cgroup_seqfile_stop(struct seq_file *seq, void *v) |
4142 | { |
4143 | if (seq_cft(seq)->seq_stop) |
4144 | seq_cft(seq)->seq_stop(seq, v); |
4145 | } |
4146 | |
4147 | static int cgroup_seqfile_show(struct seq_file *m, void *arg) |
4148 | { |
4149 | struct cftype *cft = seq_cft(seq: m); |
4150 | struct cgroup_subsys_state *css = seq_css(seq: m); |
4151 | |
4152 | if (cft->seq_show) |
4153 | return cft->seq_show(m, arg); |
4154 | |
4155 | if (cft->read_u64) |
4156 | seq_printf(m, fmt: "%llu\n" , cft->read_u64(css, cft)); |
4157 | else if (cft->read_s64) |
4158 | seq_printf(m, fmt: "%lld\n" , cft->read_s64(css, cft)); |
4159 | else |
4160 | return -EINVAL; |
4161 | return 0; |
4162 | } |
4163 | |
4164 | static struct kernfs_ops cgroup_kf_single_ops = { |
4165 | .atomic_write_len = PAGE_SIZE, |
4166 | .open = cgroup_file_open, |
4167 | .release = cgroup_file_release, |
4168 | .write = cgroup_file_write, |
4169 | .poll = cgroup_file_poll, |
4170 | .seq_show = cgroup_seqfile_show, |
4171 | }; |
4172 | |
4173 | static struct kernfs_ops cgroup_kf_ops = { |
4174 | .atomic_write_len = PAGE_SIZE, |
4175 | .open = cgroup_file_open, |
4176 | .release = cgroup_file_release, |
4177 | .write = cgroup_file_write, |
4178 | .poll = cgroup_file_poll, |
4179 | .seq_start = cgroup_seqfile_start, |
4180 | .seq_next = cgroup_seqfile_next, |
4181 | .seq_stop = cgroup_seqfile_stop, |
4182 | .seq_show = cgroup_seqfile_show, |
4183 | }; |
4184 | |
4185 | static void cgroup_file_notify_timer(struct timer_list *timer) |
4186 | { |
4187 | cgroup_file_notify(container_of(timer, struct cgroup_file, |
4188 | notify_timer)); |
4189 | } |
4190 | |
4191 | static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp, |
4192 | struct cftype *cft) |
4193 | { |
4194 | char name[CGROUP_FILE_NAME_MAX]; |
4195 | struct kernfs_node *kn; |
4196 | struct lock_class_key *key = NULL; |
4197 | |
4198 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
4199 | key = &cft->lockdep_key; |
4200 | #endif |
4201 | kn = __kernfs_create_file(parent: cgrp->kn, name: cgroup_file_name(cgrp, cft, buf: name), |
4202 | mode: cgroup_file_mode(cft), |
4203 | current_fsuid(), current_fsgid(), |
4204 | size: 0, ops: cft->kf_ops, priv: cft, |
4205 | NULL, key); |
4206 | if (IS_ERR(ptr: kn)) |
4207 | return PTR_ERR(ptr: kn); |
4208 | |
4209 | if (cft->file_offset) { |
4210 | struct cgroup_file *cfile = (void *)css + cft->file_offset; |
4211 | |
4212 | timer_setup(&cfile->notify_timer, cgroup_file_notify_timer, 0); |
4213 | |
4214 | spin_lock_irq(lock: &cgroup_file_kn_lock); |
4215 | cfile->kn = kn; |
4216 | spin_unlock_irq(lock: &cgroup_file_kn_lock); |
4217 | } |
4218 | |
4219 | return 0; |
4220 | } |
4221 | |
4222 | /** |
4223 | * cgroup_addrm_files - add or remove files to a cgroup directory |
4224 | * @css: the target css |
4225 | * @cgrp: the target cgroup (usually css->cgroup) |
4226 | * @cfts: array of cftypes to be added |
4227 | * @is_add: whether to add or remove |
4228 | * |
4229 | * Depending on @is_add, add or remove files defined by @cfts on @cgrp. |
4230 | * For removals, this function never fails. |
4231 | */ |
4232 | static int cgroup_addrm_files(struct cgroup_subsys_state *css, |
4233 | struct cgroup *cgrp, struct cftype cfts[], |
4234 | bool is_add) |
4235 | { |
4236 | struct cftype *cft, *cft_end = NULL; |
4237 | int ret = 0; |
4238 | |
4239 | lockdep_assert_held(&cgroup_mutex); |
4240 | |
4241 | restart: |
4242 | for (cft = cfts; cft != cft_end && cft->name[0] != '\0'; cft++) { |
4243 | /* does cft->flags tell us to skip this file on @cgrp? */ |
4244 | if ((cft->flags & __CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp)) |
4245 | continue; |
4246 | if ((cft->flags & __CFTYPE_NOT_ON_DFL) && cgroup_on_dfl(cgrp)) |
4247 | continue; |
4248 | if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgroup_parent(cgrp)) |
4249 | continue; |
4250 | if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgroup_parent(cgrp)) |
4251 | continue; |
4252 | if ((cft->flags & CFTYPE_DEBUG) && !cgroup_debug) |
4253 | continue; |
4254 | if (is_add) { |
4255 | ret = cgroup_add_file(css, cgrp, cft); |
4256 | if (ret) { |
4257 | pr_warn("%s: failed to add %s, err=%d\n" , |
4258 | __func__, cft->name, ret); |
4259 | cft_end = cft; |
4260 | is_add = false; |
4261 | goto restart; |
4262 | } |
4263 | } else { |
4264 | cgroup_rm_file(cgrp, cft); |
4265 | } |
4266 | } |
4267 | return ret; |
4268 | } |
4269 | |
4270 | static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add) |
4271 | { |
4272 | struct cgroup_subsys *ss = cfts[0].ss; |
4273 | struct cgroup *root = &ss->root->cgrp; |
4274 | struct cgroup_subsys_state *css; |
4275 | int ret = 0; |
4276 | |
4277 | lockdep_assert_held(&cgroup_mutex); |
4278 | |
4279 | /* add/rm files for all cgroups created before */ |
4280 | css_for_each_descendant_pre(css, cgroup_css(root, ss)) { |
4281 | struct cgroup *cgrp = css->cgroup; |
4282 | |
4283 | if (!(css->flags & CSS_VISIBLE)) |
4284 | continue; |
4285 | |
4286 | ret = cgroup_addrm_files(css, cgrp, cfts, is_add); |
4287 | if (ret) |
4288 | break; |
4289 | } |
4290 | |
4291 | if (is_add && !ret) |
4292 | kernfs_activate(kn: root->kn); |
4293 | return ret; |
4294 | } |
4295 | |
4296 | static void cgroup_exit_cftypes(struct cftype *cfts) |
4297 | { |
4298 | struct cftype *cft; |
4299 | |
4300 | for (cft = cfts; cft->name[0] != '\0'; cft++) { |
4301 | /* free copy for custom atomic_write_len, see init_cftypes() */ |
4302 | if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) |
4303 | kfree(objp: cft->kf_ops); |
4304 | cft->kf_ops = NULL; |
4305 | cft->ss = NULL; |
4306 | |
4307 | /* revert flags set by cgroup core while adding @cfts */ |
4308 | cft->flags &= ~(__CFTYPE_ONLY_ON_DFL | __CFTYPE_NOT_ON_DFL | |
4309 | __CFTYPE_ADDED); |
4310 | } |
4311 | } |
4312 | |
4313 | static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) |
4314 | { |
4315 | struct cftype *cft; |
4316 | int ret = 0; |
4317 | |
4318 | for (cft = cfts; cft->name[0] != '\0'; cft++) { |
4319 | struct kernfs_ops *kf_ops; |
4320 | |
4321 | WARN_ON(cft->ss || cft->kf_ops); |
4322 | |
4323 | if (cft->flags & __CFTYPE_ADDED) { |
4324 | ret = -EBUSY; |
4325 | break; |
4326 | } |
4327 | |
4328 | if (cft->seq_start) |
4329 | kf_ops = &cgroup_kf_ops; |
4330 | else |
4331 | kf_ops = &cgroup_kf_single_ops; |
4332 | |
4333 | /* |
4334 | * Ugh... if @cft wants a custom max_write_len, we need to |
4335 | * make a copy of kf_ops to set its atomic_write_len. |
4336 | */ |
4337 | if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) { |
4338 | kf_ops = kmemdup(p: kf_ops, size: sizeof(*kf_ops), GFP_KERNEL); |
4339 | if (!kf_ops) { |
4340 | ret = -ENOMEM; |
4341 | break; |
4342 | } |
4343 | kf_ops->atomic_write_len = cft->max_write_len; |
4344 | } |
4345 | |
4346 | cft->kf_ops = kf_ops; |
4347 | cft->ss = ss; |
4348 | cft->flags |= __CFTYPE_ADDED; |
4349 | } |
4350 | |
4351 | if (ret) |
4352 | cgroup_exit_cftypes(cfts); |
4353 | return ret; |
4354 | } |
4355 | |
4356 | static void cgroup_rm_cftypes_locked(struct cftype *cfts) |
4357 | { |
4358 | lockdep_assert_held(&cgroup_mutex); |
4359 | |
4360 | list_del(entry: &cfts->node); |
4361 | cgroup_apply_cftypes(cfts, is_add: false); |
4362 | cgroup_exit_cftypes(cfts); |
4363 | } |
4364 | |
4365 | /** |
4366 | * cgroup_rm_cftypes - remove an array of cftypes from a subsystem |
4367 | * @cfts: zero-length name terminated array of cftypes |
4368 | * |
4369 | * Unregister @cfts. Files described by @cfts are removed from all |
4370 | * existing cgroups and all future cgroups won't have them either. This |
4371 | * function can be called anytime whether @cfts' subsys is attached or not. |
4372 | * |
4373 | * Returns 0 on successful unregistration, -ENOENT if @cfts is not |
4374 | * registered. |
4375 | */ |
4376 | int cgroup_rm_cftypes(struct cftype *cfts) |
4377 | { |
4378 | if (!cfts || cfts[0].name[0] == '\0') |
4379 | return 0; |
4380 | |
4381 | if (!(cfts[0].flags & __CFTYPE_ADDED)) |
4382 | return -ENOENT; |
4383 | |
4384 | cgroup_lock(); |
4385 | cgroup_rm_cftypes_locked(cfts); |
4386 | cgroup_unlock(); |
4387 | return 0; |
4388 | } |
4389 | |
4390 | /** |
4391 | * cgroup_add_cftypes - add an array of cftypes to a subsystem |
4392 | * @ss: target cgroup subsystem |
4393 | * @cfts: zero-length name terminated array of cftypes |
4394 | * |
4395 | * Register @cfts to @ss. Files described by @cfts are created for all |
4396 | * existing cgroups to which @ss is attached and all future cgroups will |
4397 | * have them too. This function can be called anytime whether @ss is |
4398 | * attached or not. |
4399 | * |
4400 | * Returns 0 on successful registration, -errno on failure. Note that this |
4401 | * function currently returns 0 as long as @cfts registration is successful |
4402 | * even if some file creation attempts on existing cgroups fail. |
4403 | */ |
4404 | static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) |
4405 | { |
4406 | int ret; |
4407 | |
4408 | if (!cgroup_ssid_enabled(ssid: ss->id)) |
4409 | return 0; |
4410 | |
4411 | if (!cfts || cfts[0].name[0] == '\0') |
4412 | return 0; |
4413 | |
4414 | ret = cgroup_init_cftypes(ss, cfts); |
4415 | if (ret) |
4416 | return ret; |
4417 | |
4418 | cgroup_lock(); |
4419 | |
4420 | list_add_tail(new: &cfts->node, head: &ss->cfts); |
4421 | ret = cgroup_apply_cftypes(cfts, is_add: true); |
4422 | if (ret) |
4423 | cgroup_rm_cftypes_locked(cfts); |
4424 | |
4425 | cgroup_unlock(); |
4426 | return ret; |
4427 | } |
4428 | |
4429 | /** |
4430 | * cgroup_add_dfl_cftypes - add an array of cftypes for default hierarchy |
4431 | * @ss: target cgroup subsystem |
4432 | * @cfts: zero-length name terminated array of cftypes |
4433 | * |
4434 | * Similar to cgroup_add_cftypes() but the added files are only used for |
4435 | * the default hierarchy. |
4436 | */ |
4437 | int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) |
4438 | { |
4439 | struct cftype *cft; |
4440 | |
4441 | for (cft = cfts; cft && cft->name[0] != '\0'; cft++) |
4442 | cft->flags |= __CFTYPE_ONLY_ON_DFL; |
4443 | return cgroup_add_cftypes(ss, cfts); |
4444 | } |
4445 | |
4446 | /** |
4447 | * cgroup_add_legacy_cftypes - add an array of cftypes for legacy hierarchies |
4448 | * @ss: target cgroup subsystem |
4449 | * @cfts: zero-length name terminated array of cftypes |
4450 | * |
4451 | * Similar to cgroup_add_cftypes() but the added files are only used for |
4452 | * the legacy hierarchies. |
4453 | */ |
4454 | int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) |
4455 | { |
4456 | struct cftype *cft; |
4457 | |
4458 | for (cft = cfts; cft && cft->name[0] != '\0'; cft++) |
4459 | cft->flags |= __CFTYPE_NOT_ON_DFL; |
4460 | return cgroup_add_cftypes(ss, cfts); |
4461 | } |
4462 | |
4463 | /** |
4464 | * cgroup_file_notify - generate a file modified event for a cgroup_file |
4465 | * @cfile: target cgroup_file |
4466 | * |
4467 | * @cfile must have been obtained by setting cftype->file_offset. |
4468 | */ |
4469 | void cgroup_file_notify(struct cgroup_file *cfile) |
4470 | { |
4471 | unsigned long flags; |
4472 | |
4473 | spin_lock_irqsave(&cgroup_file_kn_lock, flags); |
4474 | if (cfile->kn) { |
4475 | unsigned long last = cfile->notified_at; |
4476 | unsigned long next = last + CGROUP_FILE_NOTIFY_MIN_INTV; |
4477 | |
4478 | if (time_in_range(jiffies, last, next)) { |
4479 | timer_reduce(timer: &cfile->notify_timer, expires: next); |
4480 | } else { |
4481 | kernfs_notify(kn: cfile->kn); |
4482 | cfile->notified_at = jiffies; |
4483 | } |
4484 | } |
4485 | spin_unlock_irqrestore(lock: &cgroup_file_kn_lock, flags); |
4486 | } |
4487 | |
4488 | /** |
4489 | * cgroup_file_show - show or hide a hidden cgroup file |
4490 | * @cfile: target cgroup_file obtained by setting cftype->file_offset |
4491 | * @show: whether to show or hide |
4492 | */ |
4493 | void cgroup_file_show(struct cgroup_file *cfile, bool show) |
4494 | { |
4495 | struct kernfs_node *kn; |
4496 | |
4497 | spin_lock_irq(lock: &cgroup_file_kn_lock); |
4498 | kn = cfile->kn; |
4499 | kernfs_get(kn); |
4500 | spin_unlock_irq(lock: &cgroup_file_kn_lock); |
4501 | |
4502 | if (kn) |
4503 | kernfs_show(kn, show); |
4504 | |
4505 | kernfs_put(kn); |
4506 | } |
4507 | |
4508 | /** |
4509 | * css_next_child - find the next child of a given css |
4510 | * @pos: the current position (%NULL to initiate traversal) |
4511 | * @parent: css whose children to walk |
4512 | * |
4513 | * This function returns the next child of @parent and should be called |
4514 | * under either cgroup_mutex or RCU read lock. The only requirement is |
4515 | * that @parent and @pos are accessible. The next sibling is guaranteed to |
4516 | * be returned regardless of their states. |
4517 | * |
4518 | * If a subsystem synchronizes ->css_online() and the start of iteration, a |
4519 | * css which finished ->css_online() is guaranteed to be visible in the |
4520 | * future iterations and will stay visible until the last reference is put. |
4521 | * A css which hasn't finished ->css_online() or already finished |
4522 | * ->css_offline() may show up during traversal. It's each subsystem's |
4523 | * responsibility to synchronize against on/offlining. |
4524 | */ |
4525 | struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos, |
4526 | struct cgroup_subsys_state *parent) |
4527 | { |
4528 | struct cgroup_subsys_state *next; |
4529 | |
4530 | cgroup_assert_mutex_or_rcu_locked(); |
4531 | |
4532 | /* |
4533 | * @pos could already have been unlinked from the sibling list. |
4534 | * Once a cgroup is removed, its ->sibling.next is no longer |
4535 | * updated when its next sibling changes. CSS_RELEASED is set when |
4536 | * @pos is taken off list, at which time its next pointer is valid, |
4537 | * and, as releases are serialized, the one pointed to by the next |
4538 | * pointer is guaranteed to not have started release yet. This |
4539 | * implies that if we observe !CSS_RELEASED on @pos in this RCU |
4540 | * critical section, the one pointed to by its next pointer is |
4541 | * guaranteed to not have finished its RCU grace period even if we |
4542 | * have dropped rcu_read_lock() in-between iterations. |
4543 | * |
4544 | * If @pos has CSS_RELEASED set, its next pointer can't be |
4545 | * dereferenced; however, as each css is given a monotonically |
4546 | * increasing unique serial number and always appended to the |
4547 | * sibling list, the next one can be found by walking the parent's |
4548 | * children until the first css with higher serial number than |
4549 | * @pos's. While this path can be slower, it happens iff iteration |
4550 | * races against release and the race window is very small. |
4551 | */ |
4552 | if (!pos) { |
4553 | next = list_entry_rcu(parent->children.next, struct cgroup_subsys_state, sibling); |
4554 | } else if (likely(!(pos->flags & CSS_RELEASED))) { |
4555 | next = list_entry_rcu(pos->sibling.next, struct cgroup_subsys_state, sibling); |
4556 | } else { |
4557 | list_for_each_entry_rcu(next, &parent->children, sibling, |
4558 | lockdep_is_held(&cgroup_mutex)) |
4559 | if (next->serial_nr > pos->serial_nr) |
4560 | break; |
4561 | } |
4562 | |
4563 | /* |
4564 | * @next, if not pointing to the head, can be dereferenced and is |
4565 | * the next sibling. |
4566 | */ |
4567 | if (&next->sibling != &parent->children) |
4568 | return next; |
4569 | return NULL; |
4570 | } |
4571 | |
4572 | /** |
4573 | * css_next_descendant_pre - find the next descendant for pre-order walk |
4574 | * @pos: the current position (%NULL to initiate traversal) |
4575 | * @root: css whose descendants to walk |
4576 | * |
4577 | * To be used by css_for_each_descendant_pre(). Find the next descendant |
4578 | * to visit for pre-order traversal of @root's descendants. @root is |
4579 | * included in the iteration and the first node to be visited. |
4580 | * |
4581 | * While this function requires cgroup_mutex or RCU read locking, it |
4582 | * doesn't require the whole traversal to be contained in a single critical |
4583 | * section. This function will return the correct next descendant as long |
4584 | * as both @pos and @root are accessible and @pos is a descendant of @root. |
4585 | * |
4586 | * If a subsystem synchronizes ->css_online() and the start of iteration, a |
4587 | * css which finished ->css_online() is guaranteed to be visible in the |
4588 | * future iterations and will stay visible until the last reference is put. |
4589 | * A css which hasn't finished ->css_online() or already finished |
4590 | * ->css_offline() may show up during traversal. It's each subsystem's |
4591 | * responsibility to synchronize against on/offlining. |
4592 | */ |
4593 | struct cgroup_subsys_state * |
4594 | css_next_descendant_pre(struct cgroup_subsys_state *pos, |
4595 | struct cgroup_subsys_state *root) |
4596 | { |
4597 | struct cgroup_subsys_state *next; |
4598 | |
4599 | cgroup_assert_mutex_or_rcu_locked(); |
4600 | |
4601 | /* if first iteration, visit @root */ |
4602 | if (!pos) |
4603 | return root; |
4604 | |
4605 | /* visit the first child if exists */ |
4606 | next = css_next_child(NULL, parent: pos); |
4607 | if (next) |
4608 | return next; |
4609 | |
4610 | /* no child, visit my or the closest ancestor's next sibling */ |
4611 | while (pos != root) { |
4612 | next = css_next_child(pos, parent: pos->parent); |
4613 | if (next) |
4614 | return next; |
4615 | pos = pos->parent; |
4616 | } |
4617 | |
4618 | return NULL; |
4619 | } |
4620 | EXPORT_SYMBOL_GPL(css_next_descendant_pre); |
4621 | |
4622 | /** |
4623 | * css_rightmost_descendant - return the rightmost descendant of a css |
4624 | * @pos: css of interest |
4625 | * |
4626 | * Return the rightmost descendant of @pos. If there's no descendant, @pos |
4627 | * is returned. This can be used during pre-order traversal to skip |
4628 | * subtree of @pos. |
4629 | * |
4630 | * While this function requires cgroup_mutex or RCU read locking, it |
4631 | * doesn't require the whole traversal to be contained in a single critical |
4632 | * section. This function will return the correct rightmost descendant as |
4633 | * long as @pos is accessible. |
4634 | */ |
4635 | struct cgroup_subsys_state * |
4636 | css_rightmost_descendant(struct cgroup_subsys_state *pos) |
4637 | { |
4638 | struct cgroup_subsys_state *last, *tmp; |
4639 | |
4640 | cgroup_assert_mutex_or_rcu_locked(); |
4641 | |
4642 | do { |
4643 | last = pos; |
4644 | /* ->prev isn't RCU safe, walk ->next till the end */ |
4645 | pos = NULL; |
4646 | css_for_each_child(tmp, last) |
4647 | pos = tmp; |
4648 | } while (pos); |
4649 | |
4650 | return last; |
4651 | } |
4652 | |
4653 | static struct cgroup_subsys_state * |
4654 | css_leftmost_descendant(struct cgroup_subsys_state *pos) |
4655 | { |
4656 | struct cgroup_subsys_state *last; |
4657 | |
4658 | do { |
4659 | last = pos; |
4660 | pos = css_next_child(NULL, parent: pos); |
4661 | } while (pos); |
4662 | |
4663 | return last; |
4664 | } |
4665 | |
4666 | /** |
4667 | * css_next_descendant_post - find the next descendant for post-order walk |
4668 | * @pos: the current position (%NULL to initiate traversal) |
4669 | * @root: css whose descendants to walk |
4670 | * |
4671 | * To be used by css_for_each_descendant_post(). Find the next descendant |
4672 | * to visit for post-order traversal of @root's descendants. @root is |
4673 | * included in the iteration and the last node to be visited. |
4674 | * |
4675 | * While this function requires cgroup_mutex or RCU read locking, it |
4676 | * doesn't require the whole traversal to be contained in a single critical |
4677 | * section. This function will return the correct next descendant as long |
4678 | * as both @pos and @cgroup are accessible and @pos is a descendant of |
4679 | * @cgroup. |
4680 | * |
4681 | * If a subsystem synchronizes ->css_online() and the start of iteration, a |
4682 | * css which finished ->css_online() is guaranteed to be visible in the |
4683 | * future iterations and will stay visible until the last reference is put. |
4684 | * A css which hasn't finished ->css_online() or already finished |
4685 | * ->css_offline() may show up during traversal. It's each subsystem's |
4686 | * responsibility to synchronize against on/offlining. |
4687 | */ |
4688 | struct cgroup_subsys_state * |
4689 | css_next_descendant_post(struct cgroup_subsys_state *pos, |
4690 | struct cgroup_subsys_state *root) |
4691 | { |
4692 | struct cgroup_subsys_state *next; |
4693 | |
4694 | cgroup_assert_mutex_or_rcu_locked(); |
4695 | |
4696 | /* if first iteration, visit leftmost descendant which may be @root */ |
4697 | if (!pos) |
4698 | return css_leftmost_descendant(pos: root); |
4699 | |
4700 | /* if we visited @root, we're done */ |
4701 | if (pos == root) |
4702 | return NULL; |
4703 | |
4704 | /* if there's an unvisited sibling, visit its leftmost descendant */ |
4705 | next = css_next_child(pos, parent: pos->parent); |
4706 | if (next) |
4707 | return css_leftmost_descendant(pos: next); |
4708 | |
4709 | /* no sibling left, visit parent */ |
4710 | return pos->parent; |
4711 | } |
4712 | |
4713 | /** |
4714 | * css_has_online_children - does a css have online children |
4715 | * @css: the target css |
4716 | * |
4717 | * Returns %true if @css has any online children; otherwise, %false. This |
4718 | * function can be called from any context but the caller is responsible |
4719 | * for synchronizing against on/offlining as necessary. |
4720 | */ |
4721 | bool css_has_online_children(struct cgroup_subsys_state *css) |
4722 | { |
4723 | struct cgroup_subsys_state *child; |
4724 | bool ret = false; |
4725 | |
4726 | rcu_read_lock(); |
4727 | css_for_each_child(child, css) { |
4728 | if (child->flags & CSS_ONLINE) { |
4729 | ret = true; |
4730 | break; |
4731 | } |
4732 | } |
4733 | rcu_read_unlock(); |
4734 | return ret; |
4735 | } |
4736 | |
4737 | static struct css_set *css_task_iter_next_css_set(struct css_task_iter *it) |
4738 | { |
4739 | struct list_head *l; |
4740 | struct cgrp_cset_link *link; |
4741 | struct css_set *cset; |
4742 | |
4743 | lockdep_assert_held(&css_set_lock); |
4744 | |
4745 | /* find the next threaded cset */ |
4746 | if (it->tcset_pos) { |
4747 | l = it->tcset_pos->next; |
4748 | |
4749 | if (l != it->tcset_head) { |
4750 | it->tcset_pos = l; |
4751 | return container_of(l, struct css_set, |
4752 | threaded_csets_node); |
4753 | } |
4754 | |
4755 | it->tcset_pos = NULL; |
4756 | } |
4757 | |
4758 | /* find the next cset */ |
4759 | l = it->cset_pos; |
4760 | l = l->next; |
4761 | if (l == it->cset_head) { |
4762 | it->cset_pos = NULL; |
4763 | return NULL; |
4764 | } |
4765 | |
4766 | if (it->ss) { |
4767 | cset = container_of(l, struct css_set, e_cset_node[it->ss->id]); |
4768 | } else { |
4769 | link = list_entry(l, struct cgrp_cset_link, cset_link); |
4770 | cset = link->cset; |
4771 | } |
4772 | |
4773 | it->cset_pos = l; |
4774 | |
4775 | /* initialize threaded css_set walking */ |
4776 | if (it->flags & CSS_TASK_ITER_THREADED) { |
4777 | if (it->cur_dcset) |
4778 | put_css_set_locked(cset: it->cur_dcset); |
4779 | it->cur_dcset = cset; |
4780 | get_css_set(cset); |
4781 | |
4782 | it->tcset_head = &cset->threaded_csets; |
4783 | it->tcset_pos = &cset->threaded_csets; |
4784 | } |
4785 | |
4786 | return cset; |
4787 | } |
4788 | |
4789 | /** |
4790 | * css_task_iter_advance_css_set - advance a task iterator to the next css_set |
4791 | * @it: the iterator to advance |
4792 | * |
4793 | * Advance @it to the next css_set to walk. |
4794 | */ |
4795 | static void css_task_iter_advance_css_set(struct css_task_iter *it) |
4796 | { |
4797 | struct css_set *cset; |
4798 | |
4799 | lockdep_assert_held(&css_set_lock); |
4800 | |
4801 | /* Advance to the next non-empty css_set and find first non-empty tasks list*/ |
4802 | while ((cset = css_task_iter_next_css_set(it))) { |
4803 | if (!list_empty(head: &cset->tasks)) { |
4804 | it->cur_tasks_head = &cset->tasks; |
4805 | break; |
4806 | } else if (!list_empty(head: &cset->mg_tasks)) { |
4807 | it->cur_tasks_head = &cset->mg_tasks; |
4808 | break; |
4809 | } else if (!list_empty(head: &cset->dying_tasks)) { |
4810 | it->cur_tasks_head = &cset->dying_tasks; |
4811 | break; |
4812 | } |
4813 | } |
4814 | if (!cset) { |
4815 | it->task_pos = NULL; |
4816 | return; |
4817 | } |
4818 | it->task_pos = it->cur_tasks_head->next; |
4819 | |
4820 | /* |
4821 | * We don't keep css_sets locked across iteration steps and thus |
4822 | * need to take steps to ensure that iteration can be resumed after |
4823 | * the lock is re-acquired. Iteration is performed at two levels - |
4824 | * css_sets and tasks in them. |
4825 | * |
4826 | * Once created, a css_set never leaves its cgroup lists, so a |
4827 | * pinned css_set is guaranteed to stay put and we can resume |
4828 | * iteration afterwards. |
4829 | * |
4830 | * Tasks may leave @cset across iteration steps. This is resolved |
4831 | * by registering each iterator with the css_set currently being |
4832 | * walked and making css_set_move_task() advance iterators whose |
4833 | * next task is leaving. |
4834 | */ |
4835 | if (it->cur_cset) { |
4836 | list_del(entry: &it->iters_node); |
4837 | put_css_set_locked(cset: it->cur_cset); |
4838 | } |
4839 | get_css_set(cset); |
4840 | it->cur_cset = cset; |
4841 | list_add(new: &it->iters_node, head: &cset->task_iters); |
4842 | } |
4843 | |
4844 | static void css_task_iter_skip(struct css_task_iter *it, |
4845 | struct task_struct *task) |
4846 | { |
4847 | lockdep_assert_held(&css_set_lock); |
4848 | |
4849 | if (it->task_pos == &task->cg_list) { |
4850 | it->task_pos = it->task_pos->next; |
4851 | it->flags |= CSS_TASK_ITER_SKIPPED; |
4852 | } |
4853 | } |
4854 | |
4855 | static void css_task_iter_advance(struct css_task_iter *it) |
4856 | { |
4857 | struct task_struct *task; |
4858 | |
4859 | lockdep_assert_held(&css_set_lock); |
4860 | repeat: |
4861 | if (it->task_pos) { |
4862 | /* |
4863 | * Advance iterator to find next entry. We go through cset |
4864 | * tasks, mg_tasks and dying_tasks, when consumed we move onto |
4865 | * the next cset. |
4866 | */ |
4867 | if (it->flags & CSS_TASK_ITER_SKIPPED) |
4868 | it->flags &= ~CSS_TASK_ITER_SKIPPED; |
4869 | else |
4870 | it->task_pos = it->task_pos->next; |
4871 | |
4872 | if (it->task_pos == &it->cur_cset->tasks) { |
4873 | it->cur_tasks_head = &it->cur_cset->mg_tasks; |
4874 | it->task_pos = it->cur_tasks_head->next; |
4875 | } |
4876 | if (it->task_pos == &it->cur_cset->mg_tasks) { |
4877 | it->cur_tasks_head = &it->cur_cset->dying_tasks; |
4878 | it->task_pos = it->cur_tasks_head->next; |
4879 | } |
4880 | if (it->task_pos == &it->cur_cset->dying_tasks) |
4881 | css_task_iter_advance_css_set(it); |
4882 | } else { |
4883 | /* called from start, proceed to the first cset */ |
4884 | css_task_iter_advance_css_set(it); |
4885 | } |
4886 | |
4887 | if (!it->task_pos) |
4888 | return; |
4889 | |
4890 | task = list_entry(it->task_pos, struct task_struct, cg_list); |
4891 | |
4892 | if (it->flags & CSS_TASK_ITER_PROCS) { |
4893 | /* if PROCS, skip over tasks which aren't group leaders */ |
4894 | if (!thread_group_leader(p: task)) |
4895 | goto repeat; |
4896 | |
4897 | /* and dying leaders w/o live member threads */ |
4898 | if (it->cur_tasks_head == &it->cur_cset->dying_tasks && |
4899 | !atomic_read(v: &task->signal->live)) |
4900 | goto repeat; |
4901 | } else { |
4902 | /* skip all dying ones */ |
4903 | if (it->cur_tasks_head == &it->cur_cset->dying_tasks) |
4904 | goto repeat; |
4905 | } |
4906 | } |
4907 | |
4908 | /** |
4909 | * css_task_iter_start - initiate task iteration |
4910 | * @css: the css to walk tasks of |
4911 | * @flags: CSS_TASK_ITER_* flags |
4912 | * @it: the task iterator to use |
4913 | * |
4914 | * Initiate iteration through the tasks of @css. The caller can call |
4915 | * css_task_iter_next() to walk through the tasks until the function |
4916 | * returns NULL. On completion of iteration, css_task_iter_end() must be |
4917 | * called. |
4918 | */ |
4919 | void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags, |
4920 | struct css_task_iter *it) |
4921 | { |
4922 | unsigned long irqflags; |
4923 | |
4924 | memset(it, 0, sizeof(*it)); |
4925 | |
4926 | spin_lock_irqsave(&css_set_lock, irqflags); |
4927 | |
4928 | it->ss = css->ss; |
4929 | it->flags = flags; |
4930 | |
4931 | if (CGROUP_HAS_SUBSYS_CONFIG && it->ss) |
4932 | it->cset_pos = &css->cgroup->e_csets[css->ss->id]; |
4933 | else |
4934 | it->cset_pos = &css->cgroup->cset_links; |
4935 | |
4936 | it->cset_head = it->cset_pos; |
4937 | |
4938 | css_task_iter_advance(it); |
4939 | |
4940 | spin_unlock_irqrestore(lock: &css_set_lock, flags: irqflags); |
4941 | } |
4942 | |
4943 | /** |
4944 | * css_task_iter_next - return the next task for the iterator |
4945 | * @it: the task iterator being iterated |
4946 | * |
4947 | * The "next" function for task iteration. @it should have been |
4948 | * initialized via css_task_iter_start(). Returns NULL when the iteration |
4949 | * reaches the end. |
4950 | */ |
4951 | struct task_struct *css_task_iter_next(struct css_task_iter *it) |
4952 | { |
4953 | unsigned long irqflags; |
4954 | |
4955 | if (it->cur_task) { |
4956 | put_task_struct(t: it->cur_task); |
4957 | it->cur_task = NULL; |
4958 | } |
4959 | |
4960 | spin_lock_irqsave(&css_set_lock, irqflags); |
4961 | |
4962 | /* @it may be half-advanced by skips, finish advancing */ |
4963 | if (it->flags & CSS_TASK_ITER_SKIPPED) |
4964 | css_task_iter_advance(it); |
4965 | |
4966 | if (it->task_pos) { |
4967 | it->cur_task = list_entry(it->task_pos, struct task_struct, |
4968 | cg_list); |
4969 | get_task_struct(t: it->cur_task); |
4970 | css_task_iter_advance(it); |
4971 | } |
4972 | |
4973 | spin_unlock_irqrestore(lock: &css_set_lock, flags: irqflags); |
4974 | |
4975 | return it->cur_task; |
4976 | } |
4977 | |
4978 | /** |
4979 | * css_task_iter_end - finish task iteration |
4980 | * @it: the task iterator to finish |
4981 | * |
4982 | * Finish task iteration started by css_task_iter_start(). |
4983 | */ |
4984 | void css_task_iter_end(struct css_task_iter *it) |
4985 | { |
4986 | unsigned long irqflags; |
4987 | |
4988 | if (it->cur_cset) { |
4989 | spin_lock_irqsave(&css_set_lock, irqflags); |
4990 | list_del(entry: &it->iters_node); |
4991 | put_css_set_locked(cset: it->cur_cset); |
4992 | spin_unlock_irqrestore(lock: &css_set_lock, flags: irqflags); |
4993 | } |
4994 | |
4995 | if (it->cur_dcset) |
4996 | put_css_set(cset: it->cur_dcset); |
4997 | |
4998 | if (it->cur_task) |
4999 | put_task_struct(t: it->cur_task); |
5000 | } |
5001 | |
5002 | static void cgroup_procs_release(struct kernfs_open_file *of) |
5003 | { |
5004 | struct cgroup_file_ctx *ctx = of->priv; |
5005 | |
5006 | if (ctx->procs.started) |
5007 | css_task_iter_end(it: &ctx->procs.iter); |
5008 | } |
5009 | |
5010 | static void *cgroup_procs_next(struct seq_file *s, void *v, loff_t *pos) |
5011 | { |
5012 | struct kernfs_open_file *of = s->private; |
5013 | struct cgroup_file_ctx *ctx = of->priv; |
5014 | |
5015 | if (pos) |
5016 | (*pos)++; |
5017 | |
5018 | return css_task_iter_next(it: &ctx->procs.iter); |
5019 | } |
5020 | |
5021 | static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos, |
5022 | unsigned int iter_flags) |
5023 | { |
5024 | struct kernfs_open_file *of = s->private; |
5025 | struct cgroup *cgrp = seq_css(seq: s)->cgroup; |
5026 | struct cgroup_file_ctx *ctx = of->priv; |
5027 | struct css_task_iter *it = &ctx->procs.iter; |
5028 | |
5029 | /* |
5030 | * When a seq_file is seeked, it's always traversed sequentially |
5031 | * from position 0, so we can simply keep iterating on !0 *pos. |
5032 | */ |
5033 | if (!ctx->procs.started) { |
5034 | if (WARN_ON_ONCE((*pos))) |
5035 | return ERR_PTR(error: -EINVAL); |
5036 | css_task_iter_start(css: &cgrp->self, flags: iter_flags, it); |
5037 | ctx->procs.started = true; |
5038 | } else if (!(*pos)) { |
5039 | css_task_iter_end(it); |
5040 | css_task_iter_start(css: &cgrp->self, flags: iter_flags, it); |
5041 | } else |
5042 | return it->cur_task; |
5043 | |
5044 | return cgroup_procs_next(s, NULL, NULL); |
5045 | } |
5046 | |
5047 | static void *cgroup_procs_start(struct seq_file *s, loff_t *pos) |
5048 | { |
5049 | struct cgroup *cgrp = seq_css(seq: s)->cgroup; |
5050 | |
5051 | /* |
5052 | * All processes of a threaded subtree belong to the domain cgroup |
5053 | * of the subtree. Only threads can be distributed across the |
5054 | * subtree. Reject reads on cgroup.procs in the subtree proper. |
5055 | * They're always empty anyway. |
5056 | */ |
5057 | if (cgroup_is_threaded(cgrp)) |
5058 | return ERR_PTR(error: -EOPNOTSUPP); |
5059 | |
5060 | return __cgroup_procs_start(s, pos, iter_flags: CSS_TASK_ITER_PROCS | |
5061 | CSS_TASK_ITER_THREADED); |
5062 | } |
5063 | |
5064 | static int cgroup_procs_show(struct seq_file *s, void *v) |
5065 | { |
5066 | seq_printf(m: s, fmt: "%d\n" , task_pid_vnr(tsk: v)); |
5067 | return 0; |
5068 | } |
5069 | |
5070 | static int cgroup_may_write(const struct cgroup *cgrp, struct super_block *sb) |
5071 | { |
5072 | int ret; |
5073 | struct inode *inode; |
5074 | |
5075 | lockdep_assert_held(&cgroup_mutex); |
5076 | |
5077 | inode = kernfs_get_inode(sb, kn: cgrp->procs_file.kn); |
5078 | if (!inode) |
5079 | return -ENOMEM; |
5080 | |
5081 | ret = inode_permission(&nop_mnt_idmap, inode, MAY_WRITE); |
5082 | iput(inode); |
5083 | return ret; |
5084 | } |
5085 | |
5086 | static int cgroup_procs_write_permission(struct cgroup *src_cgrp, |
5087 | struct cgroup *dst_cgrp, |
5088 | struct super_block *sb, |
5089 | struct cgroup_namespace *ns) |
5090 | { |
5091 | struct cgroup *com_cgrp = src_cgrp; |
5092 | int ret; |
5093 | |
5094 | lockdep_assert_held(&cgroup_mutex); |
5095 | |
5096 | /* find the common ancestor */ |
5097 | while (!cgroup_is_descendant(cgrp: dst_cgrp, ancestor: com_cgrp)) |
5098 | com_cgrp = cgroup_parent(cgrp: com_cgrp); |
5099 | |
5100 | /* %current should be authorized to migrate to the common ancestor */ |
5101 | ret = cgroup_may_write(cgrp: com_cgrp, sb); |
5102 | if (ret) |
5103 | return ret; |
5104 | |
5105 | /* |
5106 | * If namespaces are delegation boundaries, %current must be able |
5107 | * to see both source and destination cgroups from its namespace. |
5108 | */ |
5109 | if ((cgrp_dfl_root.flags & CGRP_ROOT_NS_DELEGATE) && |
5110 | (!cgroup_is_descendant(cgrp: src_cgrp, ancestor: ns->root_cset->dfl_cgrp) || |
5111 | !cgroup_is_descendant(cgrp: dst_cgrp, ancestor: ns->root_cset->dfl_cgrp))) |
5112 | return -ENOENT; |
5113 | |
5114 | return 0; |
5115 | } |
5116 | |
5117 | static int cgroup_attach_permissions(struct cgroup *src_cgrp, |
5118 | struct cgroup *dst_cgrp, |
5119 | struct super_block *sb, bool threadgroup, |
5120 | struct cgroup_namespace *ns) |
5121 | { |
5122 | int ret = 0; |
5123 | |
5124 | ret = cgroup_procs_write_permission(src_cgrp, dst_cgrp, sb, ns); |
5125 | if (ret) |
5126 | return ret; |
5127 | |
5128 | ret = cgroup_migrate_vet_dst(dst_cgrp); |
5129 | if (ret) |
5130 | return ret; |
5131 | |
5132 | if (!threadgroup && (src_cgrp->dom_cgrp != dst_cgrp->dom_cgrp)) |
5133 | ret = -EOPNOTSUPP; |
5134 | |
5135 | return ret; |
5136 | } |
5137 | |
5138 | static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf, |
5139 | bool threadgroup) |
5140 | { |
5141 | struct cgroup_file_ctx *ctx = of->priv; |
5142 | struct cgroup *src_cgrp, *dst_cgrp; |
5143 | struct task_struct *task; |
5144 | const struct cred *saved_cred; |
5145 | ssize_t ret; |
5146 | bool threadgroup_locked; |
5147 | |
5148 | dst_cgrp = cgroup_kn_lock_live(kn: of->kn, drain_offline: false); |
5149 | if (!dst_cgrp) |
5150 | return -ENODEV; |
5151 | |
5152 | task = cgroup_procs_write_start(buf, threadgroup, threadgroup_locked: &threadgroup_locked); |
5153 | ret = PTR_ERR_OR_ZERO(ptr: task); |
5154 | if (ret) |
5155 | goto out_unlock; |
5156 | |
5157 | /* find the source cgroup */ |
5158 | spin_lock_irq(lock: &css_set_lock); |
5159 | src_cgrp = task_cgroup_from_root(task, root: &cgrp_dfl_root); |
5160 | spin_unlock_irq(lock: &css_set_lock); |
5161 | |
5162 | /* |
5163 | * Process and thread migrations follow same delegation rule. Check |
5164 | * permissions using the credentials from file open to protect against |
5165 | * inherited fd attacks. |
5166 | */ |
5167 | saved_cred = override_creds(of->file->f_cred); |
5168 | ret = cgroup_attach_permissions(src_cgrp, dst_cgrp, |
5169 | sb: of->file->f_path.dentry->d_sb, |
5170 | threadgroup, ns: ctx->ns); |
5171 | revert_creds(saved_cred); |
5172 | if (ret) |
5173 | goto out_finish; |
5174 | |
5175 | ret = cgroup_attach_task(dst_cgrp, leader: task, threadgroup); |
5176 | |
5177 | out_finish: |
5178 | cgroup_procs_write_finish(task, threadgroup_locked); |
5179 | out_unlock: |
5180 | cgroup_kn_unlock(kn: of->kn); |
5181 | |
5182 | return ret; |
5183 | } |
5184 | |
5185 | static ssize_t cgroup_procs_write(struct kernfs_open_file *of, |
5186 | char *buf, size_t nbytes, loff_t off) |
5187 | { |
5188 | return __cgroup_procs_write(of, buf, threadgroup: true) ?: nbytes; |
5189 | } |
5190 | |
5191 | static void *cgroup_threads_start(struct seq_file *s, loff_t *pos) |
5192 | { |
5193 | return __cgroup_procs_start(s, pos, iter_flags: 0); |
5194 | } |
5195 | |
5196 | static ssize_t cgroup_threads_write(struct kernfs_open_file *of, |
5197 | char *buf, size_t nbytes, loff_t off) |
5198 | { |
5199 | return __cgroup_procs_write(of, buf, threadgroup: false) ?: nbytes; |
5200 | } |
5201 | |
5202 | /* cgroup core interface files for the default hierarchy */ |
5203 | static struct cftype cgroup_base_files[] = { |
5204 | { |
5205 | .name = "cgroup.type" , |
5206 | .flags = CFTYPE_NOT_ON_ROOT, |
5207 | .seq_show = cgroup_type_show, |
5208 | .write = cgroup_type_write, |
5209 | }, |
5210 | { |
5211 | .name = "cgroup.procs" , |
5212 | .flags = CFTYPE_NS_DELEGATABLE, |
5213 | .file_offset = offsetof(struct cgroup, procs_file), |
5214 | .release = cgroup_procs_release, |
5215 | .seq_start = cgroup_procs_start, |
5216 | .seq_next = cgroup_procs_next, |
5217 | .seq_show = cgroup_procs_show, |
5218 | .write = cgroup_procs_write, |
5219 | }, |
5220 | { |
5221 | .name = "cgroup.threads" , |
5222 | .flags = CFTYPE_NS_DELEGATABLE, |
5223 | .release = cgroup_procs_release, |
5224 | .seq_start = cgroup_threads_start, |
5225 | .seq_next = cgroup_procs_next, |
5226 | .seq_show = cgroup_procs_show, |
5227 | .write = cgroup_threads_write, |
5228 | }, |
5229 | { |
5230 | .name = "cgroup.controllers" , |
5231 | .seq_show = cgroup_controllers_show, |
5232 | }, |
5233 | { |
5234 | .name = "cgroup.subtree_control" , |
5235 | .flags = CFTYPE_NS_DELEGATABLE, |
5236 | .seq_show = cgroup_subtree_control_show, |
5237 | .write = cgroup_subtree_control_write, |
5238 | }, |
5239 | { |
5240 | .name = "cgroup.events" , |
5241 | .flags = CFTYPE_NOT_ON_ROOT, |
5242 | .file_offset = offsetof(struct cgroup, events_file), |
5243 | .seq_show = cgroup_events_show, |
5244 | }, |
5245 | { |
5246 | .name = "cgroup.max.descendants" , |
5247 | .seq_show = cgroup_max_descendants_show, |
5248 | .write = cgroup_max_descendants_write, |
5249 | }, |
5250 | { |
5251 | .name = "cgroup.max.depth" , |
5252 | .seq_show = cgroup_max_depth_show, |
5253 | .write = cgroup_max_depth_write, |
5254 | }, |
5255 | { |
5256 | .name = "cgroup.stat" , |
5257 | .seq_show = cgroup_stat_show, |
5258 | }, |
5259 | { |
5260 | .name = "cgroup.freeze" , |
5261 | .flags = CFTYPE_NOT_ON_ROOT, |
5262 | .seq_show = cgroup_freeze_show, |
5263 | .write = cgroup_freeze_write, |
5264 | }, |
5265 | { |
5266 | .name = "cgroup.kill" , |
5267 | .flags = CFTYPE_NOT_ON_ROOT, |
5268 | .write = cgroup_kill_write, |
5269 | }, |
5270 | { |
5271 | .name = "cpu.stat" , |
5272 | .seq_show = cpu_stat_show, |
5273 | }, |
5274 | { |
5275 | .name = "cpu.stat.local" , |
5276 | .seq_show = cpu_local_stat_show, |
5277 | }, |
5278 | { } /* terminate */ |
5279 | }; |
5280 | |
5281 | static struct cftype cgroup_psi_files[] = { |
5282 | #ifdef CONFIG_PSI |
5283 | { |
5284 | .name = "io.pressure" , |
5285 | .file_offset = offsetof(struct cgroup, psi_files[PSI_IO]), |
5286 | .seq_show = cgroup_io_pressure_show, |
5287 | .write = cgroup_io_pressure_write, |
5288 | .poll = cgroup_pressure_poll, |
5289 | .release = cgroup_pressure_release, |
5290 | }, |
5291 | { |
5292 | .name = "memory.pressure" , |
5293 | .file_offset = offsetof(struct cgroup, psi_files[PSI_MEM]), |
5294 | .seq_show = cgroup_memory_pressure_show, |
5295 | .write = cgroup_memory_pressure_write, |
5296 | .poll = cgroup_pressure_poll, |
5297 | .release = cgroup_pressure_release, |
5298 | }, |
5299 | { |
5300 | .name = "cpu.pressure" , |
5301 | .file_offset = offsetof(struct cgroup, psi_files[PSI_CPU]), |
5302 | .seq_show = cgroup_cpu_pressure_show, |
5303 | .write = cgroup_cpu_pressure_write, |
5304 | .poll = cgroup_pressure_poll, |
5305 | .release = cgroup_pressure_release, |
5306 | }, |
5307 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
5308 | { |
5309 | .name = "irq.pressure" , |
5310 | .file_offset = offsetof(struct cgroup, psi_files[PSI_IRQ]), |
5311 | .seq_show = cgroup_irq_pressure_show, |
5312 | .write = cgroup_irq_pressure_write, |
5313 | .poll = cgroup_pressure_poll, |
5314 | .release = cgroup_pressure_release, |
5315 | }, |
5316 | #endif |
5317 | { |
5318 | .name = "cgroup.pressure" , |
5319 | .seq_show = cgroup_pressure_show, |
5320 | .write = cgroup_pressure_write, |
5321 | }, |
5322 | #endif /* CONFIG_PSI */ |
5323 | { } /* terminate */ |
5324 | }; |
5325 | |
5326 | /* |
5327 | * css destruction is four-stage process. |
5328 | * |
5329 | * 1. Destruction starts. Killing of the percpu_ref is initiated. |
5330 | * Implemented in kill_css(). |
5331 | * |
5332 | * 2. When the percpu_ref is confirmed to be visible as killed on all CPUs |
5333 | * and thus css_tryget_online() is guaranteed to fail, the css can be |
5334 | * offlined by invoking offline_css(). After offlining, the base ref is |
5335 | * put. Implemented in css_killed_work_fn(). |
5336 | * |
5337 | * 3. When the percpu_ref reaches zero, the only possible remaining |
5338 | * accessors are inside RCU read sections. css_release() schedules the |
5339 | * RCU callback. |
5340 | * |
5341 | * 4. After the grace period, the css can be freed. Implemented in |
5342 | * css_free_rwork_fn(). |
5343 | * |
5344 | * It is actually hairier because both step 2 and 4 require process context |
5345 | * and thus involve punting to css->destroy_work adding two additional |
5346 | * steps to the already complex sequence. |
5347 | */ |
5348 | static void css_free_rwork_fn(struct work_struct *work) |
5349 | { |
5350 | struct cgroup_subsys_state *css = container_of(to_rcu_work(work), |
5351 | struct cgroup_subsys_state, destroy_rwork); |
5352 | struct cgroup_subsys *ss = css->ss; |
5353 | struct cgroup *cgrp = css->cgroup; |
5354 | |
5355 | percpu_ref_exit(ref: &css->refcnt); |
5356 | |
5357 | if (ss) { |
5358 | /* css free path */ |
5359 | struct cgroup_subsys_state *parent = css->parent; |
5360 | int id = css->id; |
5361 | |
5362 | ss->css_free(css); |
5363 | cgroup_idr_remove(idr: &ss->css_idr, id); |
5364 | cgroup_put(cgrp); |
5365 | |
5366 | if (parent) |
5367 | css_put(parent); |
5368 | } else { |
5369 | /* cgroup free path */ |
5370 | atomic_dec(v: &cgrp->root->nr_cgrps); |
5371 | cgroup1_pidlist_destroy_all(cgrp); |
5372 | cancel_work_sync(work: &cgrp->release_agent_work); |
5373 | bpf_cgrp_storage_free(cgroup: cgrp); |
5374 | |
5375 | if (cgroup_parent(cgrp)) { |
5376 | /* |
5377 | * We get a ref to the parent, and put the ref when |
5378 | * this cgroup is being freed, so it's guaranteed |
5379 | * that the parent won't be destroyed before its |
5380 | * children. |
5381 | */ |
5382 | cgroup_put(cgrp: cgroup_parent(cgrp)); |
5383 | kernfs_put(kn: cgrp->kn); |
5384 | psi_cgroup_free(cgrp); |
5385 | cgroup_rstat_exit(cgrp); |
5386 | kfree(objp: cgrp); |
5387 | } else { |
5388 | /* |
5389 | * This is root cgroup's refcnt reaching zero, |
5390 | * which indicates that the root should be |
5391 | * released. |
5392 | */ |
5393 | cgroup_destroy_root(root: cgrp->root); |
5394 | } |
5395 | } |
5396 | } |
5397 | |
5398 | static void css_release_work_fn(struct work_struct *work) |
5399 | { |
5400 | struct cgroup_subsys_state *css = |
5401 | container_of(work, struct cgroup_subsys_state, destroy_work); |
5402 | struct cgroup_subsys *ss = css->ss; |
5403 | struct cgroup *cgrp = css->cgroup; |
5404 | |
5405 | cgroup_lock(); |
5406 | |
5407 | css->flags |= CSS_RELEASED; |
5408 | list_del_rcu(entry: &css->sibling); |
5409 | |
5410 | if (ss) { |
5411 | /* css release path */ |
5412 | if (!list_empty(head: &css->rstat_css_node)) { |
5413 | cgroup_rstat_flush(cgrp); |
5414 | list_del_rcu(entry: &css->rstat_css_node); |
5415 | } |
5416 | |
5417 | cgroup_idr_replace(idr: &ss->css_idr, NULL, id: css->id); |
5418 | if (ss->css_released) |
5419 | ss->css_released(css); |
5420 | } else { |
5421 | struct cgroup *tcgrp; |
5422 | |
5423 | /* cgroup release path */ |
5424 | TRACE_CGROUP_PATH(release, cgrp); |
5425 | |
5426 | cgroup_rstat_flush(cgrp); |
5427 | |
5428 | spin_lock_irq(lock: &css_set_lock); |
5429 | for (tcgrp = cgroup_parent(cgrp); tcgrp; |
5430 | tcgrp = cgroup_parent(cgrp: tcgrp)) |
5431 | tcgrp->nr_dying_descendants--; |
5432 | spin_unlock_irq(lock: &css_set_lock); |
5433 | |
5434 | /* |
5435 | * There are two control paths which try to determine |
5436 | * cgroup from dentry without going through kernfs - |
5437 | * cgroupstats_build() and css_tryget_online_from_dir(). |
5438 | * Those are supported by RCU protecting clearing of |
5439 | * cgrp->kn->priv backpointer. |
5440 | */ |
5441 | if (cgrp->kn) |
5442 | RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv, |
5443 | NULL); |
5444 | } |
5445 | |
5446 | cgroup_unlock(); |
5447 | |
5448 | INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn); |
5449 | queue_rcu_work(wq: cgroup_destroy_wq, rwork: &css->destroy_rwork); |
5450 | } |
5451 | |
5452 | static void css_release(struct percpu_ref *ref) |
5453 | { |
5454 | struct cgroup_subsys_state *css = |
5455 | container_of(ref, struct cgroup_subsys_state, refcnt); |
5456 | |
5457 | INIT_WORK(&css->destroy_work, css_release_work_fn); |
5458 | queue_work(wq: cgroup_destroy_wq, work: &css->destroy_work); |
5459 | } |
5460 | |
5461 | static void init_and_link_css(struct cgroup_subsys_state *css, |
5462 | struct cgroup_subsys *ss, struct cgroup *cgrp) |
5463 | { |
5464 | lockdep_assert_held(&cgroup_mutex); |
5465 | |
5466 | cgroup_get_live(cgrp); |
5467 | |
5468 | memset(css, 0, sizeof(*css)); |
5469 | css->cgroup = cgrp; |
5470 | css->ss = ss; |
5471 | css->id = -1; |
5472 | INIT_LIST_HEAD(list: &css->sibling); |
5473 | INIT_LIST_HEAD(list: &css->children); |
5474 | INIT_LIST_HEAD(list: &css->rstat_css_node); |
5475 | css->serial_nr = css_serial_nr_next++; |
5476 | atomic_set(v: &css->online_cnt, i: 0); |
5477 | |
5478 | if (cgroup_parent(cgrp)) { |
5479 | css->parent = cgroup_css(cgrp: cgroup_parent(cgrp), ss); |
5480 | css_get(css->parent); |
5481 | } |
5482 | |
5483 | if (ss->css_rstat_flush) |
5484 | list_add_rcu(new: &css->rstat_css_node, head: &cgrp->rstat_css_list); |
5485 | |
5486 | BUG_ON(cgroup_css(cgrp, ss)); |
5487 | } |
5488 | |
5489 | /* invoke ->css_online() on a new CSS and mark it online if successful */ |
5490 | static int online_css(struct cgroup_subsys_state *css) |
5491 | { |
5492 | struct cgroup_subsys *ss = css->ss; |
5493 | int ret = 0; |
5494 | |
5495 | lockdep_assert_held(&cgroup_mutex); |
5496 | |
5497 | if (ss->css_online) |
5498 | ret = ss->css_online(css); |
5499 | if (!ret) { |
5500 | css->flags |= CSS_ONLINE; |
5501 | rcu_assign_pointer(css->cgroup->subsys[ss->id], css); |
5502 | |
5503 | atomic_inc(v: &css->online_cnt); |
5504 | if (css->parent) |
5505 | atomic_inc(v: &css->parent->online_cnt); |
5506 | } |
5507 | return ret; |
5508 | } |
5509 | |
5510 | /* if the CSS is online, invoke ->css_offline() on it and mark it offline */ |
5511 | static void offline_css(struct cgroup_subsys_state *css) |
5512 | { |
5513 | struct cgroup_subsys *ss = css->ss; |
5514 | |
5515 | lockdep_assert_held(&cgroup_mutex); |
5516 | |
5517 | if (!(css->flags & CSS_ONLINE)) |
5518 | return; |
5519 | |
5520 | if (ss->css_offline) |
5521 | ss->css_offline(css); |
5522 | |
5523 | css->flags &= ~CSS_ONLINE; |
5524 | RCU_INIT_POINTER(css->cgroup->subsys[ss->id], NULL); |
5525 | |
5526 | wake_up_all(&css->cgroup->offline_waitq); |
5527 | } |
5528 | |
5529 | /** |
5530 | * css_create - create a cgroup_subsys_state |
5531 | * @cgrp: the cgroup new css will be associated with |
5532 | * @ss: the subsys of new css |
5533 | * |
5534 | * Create a new css associated with @cgrp - @ss pair. On success, the new |
5535 | * css is online and installed in @cgrp. This function doesn't create the |
5536 | * interface files. Returns 0 on success, -errno on failure. |
5537 | */ |
5538 | static struct cgroup_subsys_state *css_create(struct cgroup *cgrp, |
5539 | struct cgroup_subsys *ss) |
5540 | { |
5541 | struct cgroup *parent = cgroup_parent(cgrp); |
5542 | struct cgroup_subsys_state *parent_css = cgroup_css(cgrp: parent, ss); |
5543 | struct cgroup_subsys_state *css; |
5544 | int err; |
5545 | |
5546 | lockdep_assert_held(&cgroup_mutex); |
5547 | |
5548 | css = ss->css_alloc(parent_css); |
5549 | if (!css) |
5550 | css = ERR_PTR(error: -ENOMEM); |
5551 | if (IS_ERR(ptr: css)) |
5552 | return css; |
5553 | |
5554 | init_and_link_css(css, ss, cgrp); |
5555 | |
5556 | err = percpu_ref_init(ref: &css->refcnt, release: css_release, flags: 0, GFP_KERNEL); |
5557 | if (err) |
5558 | goto err_free_css; |
5559 | |
5560 | err = cgroup_idr_alloc(idr: &ss->css_idr, NULL, start: 2, end: 0, GFP_KERNEL); |
5561 | if (err < 0) |
5562 | goto err_free_css; |
5563 | css->id = err; |
5564 | |
5565 | /* @css is ready to be brought online now, make it visible */ |
5566 | list_add_tail_rcu(new: &css->sibling, head: &parent_css->children); |
5567 | cgroup_idr_replace(idr: &ss->css_idr, ptr: css, id: css->id); |
5568 | |
5569 | err = online_css(css); |
5570 | if (err) |
5571 | goto err_list_del; |
5572 | |
5573 | return css; |
5574 | |
5575 | err_list_del: |
5576 | list_del_rcu(entry: &css->sibling); |
5577 | err_free_css: |
5578 | list_del_rcu(entry: &css->rstat_css_node); |
5579 | INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn); |
5580 | queue_rcu_work(wq: cgroup_destroy_wq, rwork: &css->destroy_rwork); |
5581 | return ERR_PTR(error: err); |
5582 | } |
5583 | |
5584 | /* |
5585 | * The returned cgroup is fully initialized including its control mask, but |
5586 | * it doesn't have the control mask applied. |
5587 | */ |
5588 | static struct cgroup *cgroup_create(struct cgroup *parent, const char *name, |
5589 | umode_t mode) |
5590 | { |
5591 | struct cgroup_root *root = parent->root; |
5592 | struct cgroup *cgrp, *tcgrp; |
5593 | struct kernfs_node *kn; |
5594 | int level = parent->level + 1; |
5595 | int ret; |
5596 | |
5597 | /* allocate the cgroup and its ID, 0 is reserved for the root */ |
5598 | cgrp = kzalloc(struct_size(cgrp, ancestors, (level + 1)), GFP_KERNEL); |
5599 | if (!cgrp) |
5600 | return ERR_PTR(error: -ENOMEM); |
5601 | |
5602 | ret = percpu_ref_init(ref: &cgrp->self.refcnt, release: css_release, flags: 0, GFP_KERNEL); |
5603 | if (ret) |
5604 | goto out_free_cgrp; |
5605 | |
5606 | ret = cgroup_rstat_init(cgrp); |
5607 | if (ret) |
5608 | goto out_cancel_ref; |
5609 | |
5610 | /* create the directory */ |
5611 | kn = kernfs_create_dir_ns(parent: parent->kn, name, mode, |
5612 | current_fsuid(), current_fsgid(), |
5613 | priv: cgrp, NULL); |
5614 | if (IS_ERR(ptr: kn)) { |
5615 | ret = PTR_ERR(ptr: kn); |
5616 | goto out_stat_exit; |
5617 | } |
5618 | cgrp->kn = kn; |
5619 | |
5620 | init_cgroup_housekeeping(cgrp); |
5621 | |
5622 | cgrp->self.parent = &parent->self; |
5623 | cgrp->root = root; |
5624 | cgrp->level = level; |
5625 | |
5626 | ret = psi_cgroup_alloc(cgrp); |
5627 | if (ret) |
5628 | goto out_kernfs_remove; |
5629 | |
5630 | ret = cgroup_bpf_inherit(cgrp); |
5631 | if (ret) |
5632 | goto out_psi_free; |
5633 | |
5634 | /* |
5635 | * New cgroup inherits effective freeze counter, and |
5636 | * if the parent has to be frozen, the child has too. |
5637 | */ |
5638 | cgrp->freezer.e_freeze = parent->freezer.e_freeze; |
5639 | if (cgrp->freezer.e_freeze) { |
5640 | /* |
5641 | * Set the CGRP_FREEZE flag, so when a process will be |
5642 | * attached to the child cgroup, it will become frozen. |
5643 | * At this point the new cgroup is unpopulated, so we can |
5644 | * consider it frozen immediately. |
5645 | */ |
5646 | set_bit(nr: CGRP_FREEZE, addr: &cgrp->flags); |
5647 | set_bit(nr: CGRP_FROZEN, addr: &cgrp->flags); |
5648 | } |
5649 | |
5650 | spin_lock_irq(lock: &css_set_lock); |
5651 | for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(cgrp: tcgrp)) { |
5652 | cgrp->ancestors[tcgrp->level] = tcgrp; |
5653 | |
5654 | if (tcgrp != cgrp) { |
5655 | tcgrp->nr_descendants++; |
5656 | |
5657 | /* |
5658 | * If the new cgroup is frozen, all ancestor cgroups |
5659 | * get a new frozen descendant, but their state can't |
5660 | * change because of this. |
5661 | */ |
5662 | if (cgrp->freezer.e_freeze) |
5663 | tcgrp->freezer.nr_frozen_descendants++; |
5664 | } |
5665 | } |
5666 | spin_unlock_irq(lock: &css_set_lock); |
5667 | |
5668 | if (notify_on_release(cgrp: parent)) |
5669 | set_bit(nr: CGRP_NOTIFY_ON_RELEASE, addr: &cgrp->flags); |
5670 | |
5671 | if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags)) |
5672 | set_bit(nr: CGRP_CPUSET_CLONE_CHILDREN, addr: &cgrp->flags); |
5673 | |
5674 | cgrp->self.serial_nr = css_serial_nr_next++; |
5675 | |
5676 | /* allocation complete, commit to creation */ |
5677 | list_add_tail_rcu(new: &cgrp->self.sibling, head: &cgroup_parent(cgrp)->self.children); |
5678 | atomic_inc(v: &root->nr_cgrps); |
5679 | cgroup_get_live(cgrp: parent); |
5680 | |
5681 | /* |
5682 | * On the default hierarchy, a child doesn't automatically inherit |
5683 | * subtree_control from the parent. Each is configured manually. |
5684 | */ |
5685 | if (!cgroup_on_dfl(cgrp)) |
5686 | cgrp->subtree_control = cgroup_control(cgrp); |
5687 | |
5688 | cgroup_propagate_control(cgrp); |
5689 | |
5690 | return cgrp; |
5691 | |
5692 | out_psi_free: |
5693 | psi_cgroup_free(cgrp); |
5694 | out_kernfs_remove: |
5695 | kernfs_remove(kn: cgrp->kn); |
5696 | out_stat_exit: |
5697 | cgroup_rstat_exit(cgrp); |
5698 | out_cancel_ref: |
5699 | percpu_ref_exit(ref: &cgrp->self.refcnt); |
5700 | out_free_cgrp: |
5701 | kfree(objp: cgrp); |
5702 | return ERR_PTR(error: ret); |
5703 | } |
5704 | |
5705 | static bool cgroup_check_hierarchy_limits(struct cgroup *parent) |
5706 | { |
5707 | struct cgroup *cgroup; |
5708 | int ret = false; |
5709 | int level = 1; |
5710 | |
5711 | lockdep_assert_held(&cgroup_mutex); |
5712 | |
5713 | for (cgroup = parent; cgroup; cgroup = cgroup_parent(cgrp: cgroup)) { |
5714 | if (cgroup->nr_descendants >= cgroup->max_descendants) |
5715 | goto fail; |
5716 | |
5717 | if (level > cgroup->max_depth) |
5718 | goto fail; |
5719 | |
5720 | level++; |
5721 | } |
5722 | |
5723 | ret = true; |
5724 | fail: |
5725 | return ret; |
5726 | } |
5727 | |
5728 | int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode) |
5729 | { |
5730 | struct cgroup *parent, *cgrp; |
5731 | int ret; |
5732 | |
5733 | /* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */ |
5734 | if (strchr(name, '\n')) |
5735 | return -EINVAL; |
5736 | |
5737 | parent = cgroup_kn_lock_live(kn: parent_kn, drain_offline: false); |
5738 | if (!parent) |
5739 | return -ENODEV; |
5740 | |
5741 | if (!cgroup_check_hierarchy_limits(parent)) { |
5742 | ret = -EAGAIN; |
5743 | goto out_unlock; |
5744 | } |
5745 | |
5746 | cgrp = cgroup_create(parent, name, mode); |
5747 | if (IS_ERR(ptr: cgrp)) { |
5748 | ret = PTR_ERR(ptr: cgrp); |
5749 | goto out_unlock; |
5750 | } |
5751 | |
5752 | /* |
5753 | * This extra ref will be put in cgroup_free_fn() and guarantees |
5754 | * that @cgrp->kn is always accessible. |
5755 | */ |
5756 | kernfs_get(kn: cgrp->kn); |
5757 | |
5758 | ret = css_populate_dir(css: &cgrp->self); |
5759 | if (ret) |
5760 | goto out_destroy; |
5761 | |
5762 | ret = cgroup_apply_control_enable(cgrp); |
5763 | if (ret) |
5764 | goto out_destroy; |
5765 | |
5766 | TRACE_CGROUP_PATH(mkdir, cgrp); |
5767 | |
5768 | /* let's create and online css's */ |
5769 | kernfs_activate(kn: cgrp->kn); |
5770 | |
5771 | ret = 0; |
5772 | goto out_unlock; |
5773 | |
5774 | out_destroy: |
5775 | cgroup_destroy_locked(cgrp); |
5776 | out_unlock: |
5777 | cgroup_kn_unlock(kn: parent_kn); |
5778 | return ret; |
5779 | } |
5780 | |
5781 | /* |
5782 | * This is called when the refcnt of a css is confirmed to be killed. |
5783 | * css_tryget_online() is now guaranteed to fail. Tell the subsystem to |
5784 | * initiate destruction and put the css ref from kill_css(). |
5785 | */ |
5786 | static void css_killed_work_fn(struct work_struct *work) |
5787 | { |
5788 | struct cgroup_subsys_state *css = |
5789 | container_of(work, struct cgroup_subsys_state, destroy_work); |
5790 | |
5791 | cgroup_lock(); |
5792 | |
5793 | do { |
5794 | offline_css(css); |
5795 | css_put(css); |
5796 | /* @css can't go away while we're holding cgroup_mutex */ |
5797 | css = css->parent; |
5798 | } while (css && atomic_dec_and_test(v: &css->online_cnt)); |
5799 | |
5800 | cgroup_unlock(); |
5801 | } |
5802 | |
5803 | /* css kill confirmation processing requires process context, bounce */ |
5804 | static void css_killed_ref_fn(struct percpu_ref *ref) |
5805 | { |
5806 | struct cgroup_subsys_state *css = |
5807 | container_of(ref, struct cgroup_subsys_state, refcnt); |
5808 | |
5809 | if (atomic_dec_and_test(v: &css->online_cnt)) { |
5810 | INIT_WORK(&css->destroy_work, css_killed_work_fn); |
5811 | queue_work(wq: cgroup_destroy_wq, work: &css->destroy_work); |
5812 | } |
5813 | } |
5814 | |
5815 | /** |
5816 | * kill_css - destroy a css |
5817 | * @css: css to destroy |
5818 | * |
5819 | * This function initiates destruction of @css by removing cgroup interface |
5820 | * files and putting its base reference. ->css_offline() will be invoked |
5821 | * asynchronously once css_tryget_online() is guaranteed to fail and when |
5822 | * the reference count reaches zero, @css will be released. |
5823 | */ |
5824 | static void kill_css(struct cgroup_subsys_state *css) |
5825 | { |
5826 | lockdep_assert_held(&cgroup_mutex); |
5827 | |
5828 | if (css->flags & CSS_DYING) |
5829 | return; |
5830 | |
5831 | css->flags |= CSS_DYING; |
5832 | |
5833 | /* |
5834 | * This must happen before css is disassociated with its cgroup. |
5835 | * See seq_css() for details. |
5836 | */ |
5837 | css_clear_dir(css); |
5838 | |
5839 | /* |
5840 | * Killing would put the base ref, but we need to keep it alive |
5841 | * until after ->css_offline(). |
5842 | */ |
5843 | css_get(css); |
5844 | |
5845 | /* |
5846 | * cgroup core guarantees that, by the time ->css_offline() is |
5847 | * invoked, no new css reference will be given out via |
5848 | * css_tryget_online(). We can't simply call percpu_ref_kill() and |
5849 | * proceed to offlining css's because percpu_ref_kill() doesn't |
5850 | * guarantee that the ref is seen as killed on all CPUs on return. |
5851 | * |
5852 | * Use percpu_ref_kill_and_confirm() to get notifications as each |
5853 | * css is confirmed to be seen as killed on all CPUs. |
5854 | */ |
5855 | percpu_ref_kill_and_confirm(ref: &css->refcnt, confirm_kill: css_killed_ref_fn); |
5856 | } |
5857 | |
5858 | /** |
5859 | * cgroup_destroy_locked - the first stage of cgroup destruction |
5860 | * @cgrp: cgroup to be destroyed |
5861 | * |
5862 | * css's make use of percpu refcnts whose killing latency shouldn't be |
5863 | * exposed to userland and are RCU protected. Also, cgroup core needs to |
5864 | * guarantee that css_tryget_online() won't succeed by the time |
5865 | * ->css_offline() is invoked. To satisfy all the requirements, |
5866 | * destruction is implemented in the following two steps. |
5867 | * |
5868 | * s1. Verify @cgrp can be destroyed and mark it dying. Remove all |
5869 | * userland visible parts and start killing the percpu refcnts of |
5870 | * css's. Set up so that the next stage will be kicked off once all |
5871 | * the percpu refcnts are confirmed to be killed. |
5872 | * |
5873 | * s2. Invoke ->css_offline(), mark the cgroup dead and proceed with the |
5874 | * rest of destruction. Once all cgroup references are gone, the |
5875 | * cgroup is RCU-freed. |
5876 | * |
5877 | * This function implements s1. After this step, @cgrp is gone as far as |
5878 | * the userland is concerned and a new cgroup with the same name may be |
5879 | * created. As cgroup doesn't care about the names internally, this |
5880 | * doesn't cause any problem. |
5881 | */ |
5882 | static int cgroup_destroy_locked(struct cgroup *cgrp) |
5883 | __releases(&cgroup_mutex) __acquires(&cgroup_mutex) |
5884 | { |
5885 | struct cgroup *tcgrp, *parent = cgroup_parent(cgrp); |
5886 | struct cgroup_subsys_state *css; |
5887 | struct cgrp_cset_link *link; |
5888 | int ssid; |
5889 | |
5890 | lockdep_assert_held(&cgroup_mutex); |
5891 | |
5892 | /* |
5893 | * Only migration can raise populated from zero and we're already |
5894 | * holding cgroup_mutex. |
5895 | */ |
5896 | if (cgroup_is_populated(cgrp)) |
5897 | return -EBUSY; |
5898 | |
5899 | /* |
5900 | * Make sure there's no live children. We can't test emptiness of |
5901 | * ->self.children as dead children linger on it while being |
5902 | * drained; otherwise, "rmdir parent/child parent" may fail. |
5903 | */ |
5904 | if (css_has_online_children(css: &cgrp->self)) |
5905 | return -EBUSY; |
5906 | |
5907 | /* |
5908 | * Mark @cgrp and the associated csets dead. The former prevents |
5909 | * further task migration and child creation by disabling |
5910 | * cgroup_kn_lock_live(). The latter makes the csets ignored by |
5911 | * the migration path. |
5912 | */ |
5913 | cgrp->self.flags &= ~CSS_ONLINE; |
5914 | |
5915 | spin_lock_irq(lock: &css_set_lock); |
5916 | list_for_each_entry(link, &cgrp->cset_links, cset_link) |
5917 | link->cset->dead = true; |
5918 | spin_unlock_irq(lock: &css_set_lock); |
5919 | |
5920 | /* initiate massacre of all css's */ |
5921 | for_each_css(css, ssid, cgrp) |
5922 | kill_css(css); |
5923 | |
5924 | /* clear and remove @cgrp dir, @cgrp has an extra ref on its kn */ |
5925 | css_clear_dir(css: &cgrp->self); |
5926 | kernfs_remove(kn: cgrp->kn); |
5927 | |
5928 | if (cgroup_is_threaded(cgrp)) |
5929 | parent->nr_threaded_children--; |
5930 | |
5931 | spin_lock_irq(lock: &css_set_lock); |
5932 | for (tcgrp = parent; tcgrp; tcgrp = cgroup_parent(cgrp: tcgrp)) { |
5933 | tcgrp->nr_descendants--; |
5934 | tcgrp->nr_dying_descendants++; |
5935 | /* |
5936 | * If the dying cgroup is frozen, decrease frozen descendants |
5937 | * counters of ancestor cgroups. |
5938 | */ |
5939 | if (test_bit(CGRP_FROZEN, &cgrp->flags)) |
5940 | tcgrp->freezer.nr_frozen_descendants--; |
5941 | } |
5942 | spin_unlock_irq(lock: &css_set_lock); |
5943 | |
5944 | cgroup1_check_for_release(cgrp: parent); |
5945 | |
5946 | cgroup_bpf_offline(cgrp); |
5947 | |
5948 | /* put the base reference */ |
5949 | percpu_ref_kill(ref: &cgrp->self.refcnt); |
5950 | |
5951 | return 0; |
5952 | }; |
5953 | |
5954 | int cgroup_rmdir(struct kernfs_node *kn) |
5955 | { |
5956 | struct cgroup *cgrp; |
5957 | int ret = 0; |
5958 | |
5959 | cgrp = cgroup_kn_lock_live(kn, drain_offline: false); |
5960 | if (!cgrp) |
5961 | return 0; |
5962 | |
5963 | ret = cgroup_destroy_locked(cgrp); |
5964 | if (!ret) |
5965 | TRACE_CGROUP_PATH(rmdir, cgrp); |
5966 | |
5967 | cgroup_kn_unlock(kn); |
5968 | return ret; |
5969 | } |
5970 | |
5971 | static struct kernfs_syscall_ops cgroup_kf_syscall_ops = { |
5972 | .show_options = cgroup_show_options, |
5973 | .mkdir = cgroup_mkdir, |
5974 | .rmdir = cgroup_rmdir, |
5975 | .show_path = cgroup_show_path, |
5976 | }; |
5977 | |
5978 | static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early) |
5979 | { |
5980 | struct cgroup_subsys_state *css; |
5981 | |
5982 | pr_debug("Initializing cgroup subsys %s\n" , ss->name); |
5983 | |
5984 | cgroup_lock(); |
5985 | |
5986 | idr_init(idr: &ss->css_idr); |
5987 | INIT_LIST_HEAD(list: &ss->cfts); |
5988 | |
5989 | /* Create the root cgroup state for this subsystem */ |
5990 | ss->root = &cgrp_dfl_root; |
5991 | css = ss->css_alloc(NULL); |
5992 | /* We don't handle early failures gracefully */ |
5993 | BUG_ON(IS_ERR(css)); |
5994 | init_and_link_css(css, ss, cgrp: &cgrp_dfl_root.cgrp); |
5995 | |
5996 | /* |
5997 | * Root csses are never destroyed and we can't initialize |
5998 | * percpu_ref during early init. Disable refcnting. |
5999 | */ |
6000 | css->flags |= CSS_NO_REF; |
6001 | |
6002 | if (early) { |
6003 | /* allocation can't be done safely during early init */ |
6004 | css->id = 1; |
6005 | } else { |
6006 | css->id = cgroup_idr_alloc(idr: &ss->css_idr, ptr: css, start: 1, end: 2, GFP_KERNEL); |
6007 | BUG_ON(css->id < 0); |
6008 | } |
6009 | |
6010 | /* Update the init_css_set to contain a subsys |
6011 | * pointer to this state - since the subsystem is |
6012 | * newly registered, all tasks and hence the |
6013 | * init_css_set is in the subsystem's root cgroup. */ |
6014 | init_css_set.subsys[ss->id] = css; |
6015 | |
6016 | have_fork_callback |= (bool)ss->fork << ss->id; |
6017 | have_exit_callback |= (bool)ss->exit << ss->id; |
6018 | have_release_callback |= (bool)ss->release << ss->id; |
6019 | have_canfork_callback |= (bool)ss->can_fork << ss->id; |
6020 | |
6021 | /* At system boot, before all subsystems have been |
6022 | * registered, no tasks have been forked, so we don't |
6023 | * need to invoke fork callbacks here. */ |
6024 | BUG_ON(!list_empty(&init_task.tasks)); |
6025 | |
6026 | BUG_ON(online_css(css)); |
6027 | |
6028 | cgroup_unlock(); |
6029 | } |
6030 | |
6031 | /** |
6032 | * cgroup_init_early - cgroup initialization at system boot |
6033 | * |
6034 | * Initialize cgroups at system boot, and initialize any |
6035 | * subsystems that request early init. |
6036 | */ |
6037 | int __init cgroup_init_early(void) |
6038 | { |
6039 | static struct cgroup_fs_context __initdata ctx; |
6040 | struct cgroup_subsys *ss; |
6041 | int i; |
6042 | |
6043 | ctx.root = &cgrp_dfl_root; |
6044 | init_cgroup_root(ctx: &ctx); |
6045 | cgrp_dfl_root.cgrp.self.flags |= CSS_NO_REF; |
6046 | |
6047 | RCU_INIT_POINTER(init_task.cgroups, &init_css_set); |
6048 | |
6049 | for_each_subsys(ss, i) { |
6050 | WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id, |
6051 | "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p id:name=%d:%s\n" , |
6052 | i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free, |
6053 | ss->id, ss->name); |
6054 | WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN, |
6055 | "cgroup_subsys_name %s too long\n" , cgroup_subsys_name[i]); |
6056 | |
6057 | ss->id = i; |
6058 | ss->name = cgroup_subsys_name[i]; |
6059 | if (!ss->legacy_name) |
6060 | ss->legacy_name = cgroup_subsys_name[i]; |
6061 | |
6062 | if (ss->early_init) |
6063 | cgroup_init_subsys(ss, early: true); |
6064 | } |
6065 | return 0; |
6066 | } |
6067 | |
6068 | /** |
6069 | * cgroup_init - cgroup initialization |
6070 | * |
6071 | * Register cgroup filesystem and /proc file, and initialize |
6072 | * any subsystems that didn't request early init. |
6073 | */ |
6074 | int __init cgroup_init(void) |
6075 | { |
6076 | struct cgroup_subsys *ss; |
6077 | int ssid; |
6078 | |
6079 | BUILD_BUG_ON(CGROUP_SUBSYS_COUNT > 16); |
6080 | BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files)); |
6081 | BUG_ON(cgroup_init_cftypes(NULL, cgroup_psi_files)); |
6082 | BUG_ON(cgroup_init_cftypes(NULL, cgroup1_base_files)); |
6083 | |
6084 | cgroup_rstat_boot(); |
6085 | |
6086 | get_user_ns(ns: init_cgroup_ns.user_ns); |
6087 | |
6088 | cgroup_lock(); |
6089 | |
6090 | /* |
6091 | * Add init_css_set to the hash table so that dfl_root can link to |
6092 | * it during init. |
6093 | */ |
6094 | hash_add(css_set_table, &init_css_set.hlist, |
6095 | css_set_hash(init_css_set.subsys)); |
6096 | |
6097 | BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0)); |
6098 | |
6099 | cgroup_unlock(); |
6100 | |
6101 | for_each_subsys(ss, ssid) { |
6102 | if (ss->early_init) { |
6103 | struct cgroup_subsys_state *css = |
6104 | init_css_set.subsys[ss->id]; |
6105 | |
6106 | css->id = cgroup_idr_alloc(idr: &ss->css_idr, ptr: css, start: 1, end: 2, |
6107 | GFP_KERNEL); |
6108 | BUG_ON(css->id < 0); |
6109 | } else { |
6110 | cgroup_init_subsys(ss, early: false); |
6111 | } |
6112 | |
6113 | list_add_tail(new: &init_css_set.e_cset_node[ssid], |
6114 | head: &cgrp_dfl_root.cgrp.e_csets[ssid]); |
6115 | |
6116 | /* |
6117 | * Setting dfl_root subsys_mask needs to consider the |
6118 | * disabled flag and cftype registration needs kmalloc, |
6119 | * both of which aren't available during early_init. |
6120 | */ |
6121 | if (!cgroup_ssid_enabled(ssid)) |
6122 | continue; |
6123 | |
6124 | if (cgroup1_ssid_disabled(ssid)) |
6125 | pr_info("Disabling %s control group subsystem in v1 mounts\n" , |
6126 | ss->legacy_name); |
6127 | |
6128 | cgrp_dfl_root.subsys_mask |= 1 << ss->id; |
6129 | |
6130 | /* implicit controllers must be threaded too */ |
6131 | WARN_ON(ss->implicit_on_dfl && !ss->threaded); |
6132 | |
6133 | if (ss->implicit_on_dfl) |
6134 | cgrp_dfl_implicit_ss_mask |= 1 << ss->id; |
6135 | else if (!ss->dfl_cftypes) |
6136 | cgrp_dfl_inhibit_ss_mask |= 1 << ss->id; |
6137 | |
6138 | if (ss->threaded) |
6139 | cgrp_dfl_threaded_ss_mask |= 1 << ss->id; |
6140 | |
6141 | if (ss->dfl_cftypes == ss->legacy_cftypes) { |
6142 | WARN_ON(cgroup_add_cftypes(ss, ss->dfl_cftypes)); |
6143 | } else { |
6144 | WARN_ON(cgroup_add_dfl_cftypes(ss, ss->dfl_cftypes)); |
6145 | WARN_ON(cgroup_add_legacy_cftypes(ss, ss->legacy_cftypes)); |
6146 | } |
6147 | |
6148 | if (ss->bind) |
6149 | ss->bind(init_css_set.subsys[ssid]); |
6150 | |
6151 | cgroup_lock(); |
6152 | css_populate_dir(css: init_css_set.subsys[ssid]); |
6153 | cgroup_unlock(); |
6154 | } |
6155 | |
6156 | /* init_css_set.subsys[] has been updated, re-hash */ |
6157 | hash_del(node: &init_css_set.hlist); |
6158 | hash_add(css_set_table, &init_css_set.hlist, |
6159 | css_set_hash(init_css_set.subsys)); |
6160 | |
6161 | WARN_ON(sysfs_create_mount_point(fs_kobj, "cgroup" )); |
6162 | WARN_ON(register_filesystem(&cgroup_fs_type)); |
6163 | WARN_ON(register_filesystem(&cgroup2_fs_type)); |
6164 | WARN_ON(!proc_create_single("cgroups" , 0, NULL, proc_cgroupstats_show)); |
6165 | #ifdef CONFIG_CPUSETS |
6166 | WARN_ON(register_filesystem(&cpuset_fs_type)); |
6167 | #endif |
6168 | |
6169 | return 0; |
6170 | } |
6171 | |
6172 | static int __init cgroup_wq_init(void) |
6173 | { |
6174 | /* |
6175 | * There isn't much point in executing destruction path in |
6176 | * parallel. Good chunk is serialized with cgroup_mutex anyway. |
6177 | * Use 1 for @max_active. |
6178 | * |
6179 | * We would prefer to do this in cgroup_init() above, but that |
6180 | * is called before init_workqueues(): so leave this until after. |
6181 | */ |
6182 | cgroup_destroy_wq = alloc_workqueue(fmt: "cgroup_destroy" , flags: 0, max_active: 1); |
6183 | BUG_ON(!cgroup_destroy_wq); |
6184 | return 0; |
6185 | } |
6186 | core_initcall(cgroup_wq_init); |
6187 | |
6188 | void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen) |
6189 | { |
6190 | struct kernfs_node *kn; |
6191 | |
6192 | kn = kernfs_find_and_get_node_by_id(root: cgrp_dfl_root.kf_root, id); |
6193 | if (!kn) |
6194 | return; |
6195 | kernfs_path(kn, buf, buflen); |
6196 | kernfs_put(kn); |
6197 | } |
6198 | |
6199 | /* |
6200 | * cgroup_get_from_id : get the cgroup associated with cgroup id |
6201 | * @id: cgroup id |
6202 | * On success return the cgrp or ERR_PTR on failure |
6203 | * Only cgroups within current task's cgroup NS are valid. |
6204 | */ |
6205 | struct cgroup *cgroup_get_from_id(u64 id) |
6206 | { |
6207 | struct kernfs_node *kn; |
6208 | struct cgroup *cgrp, *root_cgrp; |
6209 | |
6210 | kn = kernfs_find_and_get_node_by_id(root: cgrp_dfl_root.kf_root, id); |
6211 | if (!kn) |
6212 | return ERR_PTR(error: -ENOENT); |
6213 | |
6214 | if (kernfs_type(kn) != KERNFS_DIR) { |
6215 | kernfs_put(kn); |
6216 | return ERR_PTR(error: -ENOENT); |
6217 | } |
6218 | |
6219 | rcu_read_lock(); |
6220 | |
6221 | cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv); |
6222 | if (cgrp && !cgroup_tryget(cgrp)) |
6223 | cgrp = NULL; |
6224 | |
6225 | rcu_read_unlock(); |
6226 | kernfs_put(kn); |
6227 | |
6228 | if (!cgrp) |
6229 | return ERR_PTR(error: -ENOENT); |
6230 | |
6231 | root_cgrp = current_cgns_cgroup_dfl(); |
6232 | if (!cgroup_is_descendant(cgrp, ancestor: root_cgrp)) { |
6233 | cgroup_put(cgrp); |
6234 | return ERR_PTR(error: -ENOENT); |
6235 | } |
6236 | |
6237 | return cgrp; |
6238 | } |
6239 | EXPORT_SYMBOL_GPL(cgroup_get_from_id); |
6240 | |
6241 | /* |
6242 | * proc_cgroup_show() |
6243 | * - Print task's cgroup paths into seq_file, one line for each hierarchy |
6244 | * - Used for /proc/<pid>/cgroup. |
6245 | */ |
6246 | int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, |
6247 | struct pid *pid, struct task_struct *tsk) |
6248 | { |
6249 | char *buf; |
6250 | int retval; |
6251 | struct cgroup_root *root; |
6252 | |
6253 | retval = -ENOMEM; |
6254 | buf = kmalloc(PATH_MAX, GFP_KERNEL); |
6255 | if (!buf) |
6256 | goto out; |
6257 | |
6258 | rcu_read_lock(); |
6259 | spin_lock_irq(lock: &css_set_lock); |
6260 | |
6261 | for_each_root(root) { |
6262 | struct cgroup_subsys *ss; |
6263 | struct cgroup *cgrp; |
6264 | int ssid, count = 0; |
6265 | |
6266 | if (root == &cgrp_dfl_root && !READ_ONCE(cgrp_dfl_visible)) |
6267 | continue; |
6268 | |
6269 | cgrp = task_cgroup_from_root(task: tsk, root); |
6270 | /* The root has already been unmounted. */ |
6271 | if (!cgrp) |
6272 | continue; |
6273 | |
6274 | seq_printf(m, fmt: "%d:" , root->hierarchy_id); |
6275 | if (root != &cgrp_dfl_root) |
6276 | for_each_subsys(ss, ssid) |
6277 | if (root->subsys_mask & (1 << ssid)) |
6278 | seq_printf(m, fmt: "%s%s" , count++ ? "," : "" , |
6279 | ss->legacy_name); |
6280 | if (strlen(root->name)) |
6281 | seq_printf(m, fmt: "%sname=%s" , count ? "," : "" , |
6282 | root->name); |
6283 | seq_putc(m, c: ':'); |
6284 | /* |
6285 | * On traditional hierarchies, all zombie tasks show up as |
6286 | * belonging to the root cgroup. On the default hierarchy, |
6287 | * while a zombie doesn't show up in "cgroup.procs" and |
6288 | * thus can't be migrated, its /proc/PID/cgroup keeps |
6289 | * reporting the cgroup it belonged to before exiting. If |
6290 | * the cgroup is removed before the zombie is reaped, |
6291 | * " (deleted)" is appended to the cgroup path. |
6292 | */ |
6293 | if (cgroup_on_dfl(cgrp) || !(tsk->flags & PF_EXITING)) { |
6294 | retval = cgroup_path_ns_locked(cgrp, buf, PATH_MAX, |
6295 | current->nsproxy->cgroup_ns); |
6296 | if (retval == -E2BIG) |
6297 | retval = -ENAMETOOLONG; |
6298 | if (retval < 0) |
6299 | goto out_unlock; |
6300 | |
6301 | seq_puts(m, s: buf); |
6302 | } else { |
6303 | seq_puts(m, s: "/" ); |
6304 | } |
6305 | |
6306 | if (cgroup_on_dfl(cgrp) && cgroup_is_dead(cgrp)) |
6307 | seq_puts(m, s: " (deleted)\n" ); |
6308 | else |
6309 | seq_putc(m, c: '\n'); |
6310 | } |
6311 | |
6312 | retval = 0; |
6313 | out_unlock: |
6314 | spin_unlock_irq(lock: &css_set_lock); |
6315 | rcu_read_unlock(); |
6316 | kfree(objp: buf); |
6317 | out: |
6318 | return retval; |
6319 | } |
6320 | |
6321 | /** |
6322 | * cgroup_fork - initialize cgroup related fields during copy_process() |
6323 | * @child: pointer to task_struct of forking parent process. |
6324 | * |
6325 | * A task is associated with the init_css_set until cgroup_post_fork() |
6326 | * attaches it to the target css_set. |
6327 | */ |
6328 | void cgroup_fork(struct task_struct *child) |
6329 | { |
6330 | RCU_INIT_POINTER(child->cgroups, &init_css_set); |
6331 | INIT_LIST_HEAD(list: &child->cg_list); |
6332 | } |
6333 | |
6334 | /** |
6335 | * cgroup_v1v2_get_from_file - get a cgroup pointer from a file pointer |
6336 | * @f: file corresponding to cgroup_dir |
6337 | * |
6338 | * Find the cgroup from a file pointer associated with a cgroup directory. |
6339 | * Returns a pointer to the cgroup on success. ERR_PTR is returned if the |
6340 | * cgroup cannot be found. |
6341 | */ |
6342 | static struct cgroup *cgroup_v1v2_get_from_file(struct file *f) |
6343 | { |
6344 | struct cgroup_subsys_state *css; |
6345 | |
6346 | css = css_tryget_online_from_dir(dentry: f->f_path.dentry, NULL); |
6347 | if (IS_ERR(ptr: css)) |
6348 | return ERR_CAST(ptr: css); |
6349 | |
6350 | return css->cgroup; |
6351 | } |
6352 | |
6353 | /** |
6354 | * cgroup_get_from_file - same as cgroup_v1v2_get_from_file, but only supports |
6355 | * cgroup2. |
6356 | * @f: file corresponding to cgroup2_dir |
6357 | */ |
6358 | static struct cgroup *cgroup_get_from_file(struct file *f) |
6359 | { |
6360 | struct cgroup *cgrp = cgroup_v1v2_get_from_file(f); |
6361 | |
6362 | if (IS_ERR(ptr: cgrp)) |
6363 | return ERR_CAST(ptr: cgrp); |
6364 | |
6365 | if (!cgroup_on_dfl(cgrp)) { |
6366 | cgroup_put(cgrp); |
6367 | return ERR_PTR(error: -EBADF); |
6368 | } |
6369 | |
6370 | return cgrp; |
6371 | } |
6372 | |
6373 | /** |
6374 | * cgroup_css_set_fork - find or create a css_set for a child process |
6375 | * @kargs: the arguments passed to create the child process |
6376 | * |
6377 | * This functions finds or creates a new css_set which the child |
6378 | * process will be attached to in cgroup_post_fork(). By default, |
6379 | * the child process will be given the same css_set as its parent. |
6380 | * |
6381 | * If CLONE_INTO_CGROUP is specified this function will try to find an |
6382 | * existing css_set which includes the requested cgroup and if not create |
6383 | * a new css_set that the child will be attached to later. If this function |
6384 | * succeeds it will hold cgroup_threadgroup_rwsem on return. If |
6385 | * CLONE_INTO_CGROUP is requested this function will grab cgroup mutex |
6386 | * before grabbing cgroup_threadgroup_rwsem and will hold a reference |
6387 | * to the target cgroup. |
6388 | */ |
6389 | static int cgroup_css_set_fork(struct kernel_clone_args *kargs) |
6390 | __acquires(&cgroup_mutex) __acquires(&cgroup_threadgroup_rwsem) |
6391 | { |
6392 | int ret; |
6393 | struct cgroup *dst_cgrp = NULL; |
6394 | struct css_set *cset; |
6395 | struct super_block *sb; |
6396 | struct file *f; |
6397 | |
6398 | if (kargs->flags & CLONE_INTO_CGROUP) |
6399 | cgroup_lock(); |
6400 | |
6401 | cgroup_threadgroup_change_begin(current); |
6402 | |
6403 | spin_lock_irq(lock: &css_set_lock); |
6404 | cset = task_css_set(current); |
6405 | get_css_set(cset); |
6406 | spin_unlock_irq(lock: &css_set_lock); |
6407 | |
6408 | if (!(kargs->flags & CLONE_INTO_CGROUP)) { |
6409 | kargs->cset = cset; |
6410 | return 0; |
6411 | } |
6412 | |
6413 | f = fget_raw(fd: kargs->cgroup); |
6414 | if (!f) { |
6415 | ret = -EBADF; |
6416 | goto err; |
6417 | } |
6418 | sb = f->f_path.dentry->d_sb; |
6419 | |
6420 | dst_cgrp = cgroup_get_from_file(f); |
6421 | if (IS_ERR(ptr: dst_cgrp)) { |
6422 | ret = PTR_ERR(ptr: dst_cgrp); |
6423 | dst_cgrp = NULL; |
6424 | goto err; |
6425 | } |
6426 | |
6427 | if (cgroup_is_dead(cgrp: dst_cgrp)) { |
6428 | ret = -ENODEV; |
6429 | goto err; |
6430 | } |
6431 | |
6432 | /* |
6433 | * Verify that we the target cgroup is writable for us. This is |
6434 | * usually done by the vfs layer but since we're not going through |
6435 | * the vfs layer here we need to do it "manually". |
6436 | */ |
6437 | ret = cgroup_may_write(cgrp: dst_cgrp, sb); |
6438 | if (ret) |
6439 | goto err; |
6440 | |
6441 | /* |
6442 | * Spawning a task directly into a cgroup works by passing a file |
6443 | * descriptor to the target cgroup directory. This can even be an O_PATH |
6444 | * file descriptor. But it can never be a cgroup.procs file descriptor. |
6445 | * This was done on purpose so spawning into a cgroup could be |
6446 | * conceptualized as an atomic |
6447 | * |
6448 | * fd = openat(dfd_cgroup, "cgroup.procs", ...); |
6449 | * write(fd, <child-pid>, ...); |
6450 | * |
6451 | * sequence, i.e. it's a shorthand for the caller opening and writing |
6452 | * cgroup.procs of the cgroup indicated by @dfd_cgroup. This allows us |
6453 | * to always use the caller's credentials. |
6454 | */ |
6455 | ret = cgroup_attach_permissions(src_cgrp: cset->dfl_cgrp, dst_cgrp, sb, |
6456 | threadgroup: !(kargs->flags & CLONE_THREAD), |
6457 | current->nsproxy->cgroup_ns); |
6458 | if (ret) |
6459 | goto err; |
6460 | |
6461 | kargs->cset = find_css_set(old_cset: cset, cgrp: dst_cgrp); |
6462 | if (!kargs->cset) { |
6463 | ret = -ENOMEM; |
6464 | goto err; |
6465 | } |
6466 | |
6467 | put_css_set(cset); |
6468 | fput(f); |
6469 | kargs->cgrp = dst_cgrp; |
6470 | return ret; |
6471 | |
6472 | err: |
6473 | cgroup_threadgroup_change_end(current); |
6474 | cgroup_unlock(); |
6475 | if (f) |
6476 | fput(f); |
6477 | if (dst_cgrp) |
6478 | cgroup_put(cgrp: dst_cgrp); |
6479 | put_css_set(cset); |
6480 | if (kargs->cset) |
6481 | put_css_set(cset: kargs->cset); |
6482 | return ret; |
6483 | } |
6484 | |
6485 | /** |
6486 | * cgroup_css_set_put_fork - drop references we took during fork |
6487 | * @kargs: the arguments passed to create the child process |
6488 | * |
6489 | * Drop references to the prepared css_set and target cgroup if |
6490 | * CLONE_INTO_CGROUP was requested. |
6491 | */ |
6492 | static void cgroup_css_set_put_fork(struct kernel_clone_args *kargs) |
6493 | __releases(&cgroup_threadgroup_rwsem) __releases(&cgroup_mutex) |
6494 | { |
6495 | struct cgroup *cgrp = kargs->cgrp; |
6496 | struct css_set *cset = kargs->cset; |
6497 | |
6498 | cgroup_threadgroup_change_end(current); |
6499 | |
6500 | if (cset) { |
6501 | put_css_set(cset); |
6502 | kargs->cset = NULL; |
6503 | } |
6504 | |
6505 | if (kargs->flags & CLONE_INTO_CGROUP) { |
6506 | cgroup_unlock(); |
6507 | if (cgrp) { |
6508 | cgroup_put(cgrp); |
6509 | kargs->cgrp = NULL; |
6510 | } |
6511 | } |
6512 | } |
6513 | |
6514 | /** |
6515 | * cgroup_can_fork - called on a new task before the process is exposed |
6516 | * @child: the child process |
6517 | * @kargs: the arguments passed to create the child process |
6518 | * |
6519 | * This prepares a new css_set for the child process which the child will |
6520 | * be attached to in cgroup_post_fork(). |
6521 | * This calls the subsystem can_fork() callbacks. If the cgroup_can_fork() |
6522 | * callback returns an error, the fork aborts with that error code. This |
6523 | * allows for a cgroup subsystem to conditionally allow or deny new forks. |
6524 | */ |
6525 | int cgroup_can_fork(struct task_struct *child, struct kernel_clone_args *kargs) |
6526 | { |
6527 | struct cgroup_subsys *ss; |
6528 | int i, j, ret; |
6529 | |
6530 | ret = cgroup_css_set_fork(kargs); |
6531 | if (ret) |
6532 | return ret; |
6533 | |
6534 | do_each_subsys_mask(ss, i, have_canfork_callback) { |
6535 | ret = ss->can_fork(child, kargs->cset); |
6536 | if (ret) |
6537 | goto out_revert; |
6538 | } while_each_subsys_mask(); |
6539 | |
6540 | return 0; |
6541 | |
6542 | out_revert: |
6543 | for_each_subsys(ss, j) { |
6544 | if (j >= i) |
6545 | break; |
6546 | if (ss->cancel_fork) |
6547 | ss->cancel_fork(child, kargs->cset); |
6548 | } |
6549 | |
6550 | cgroup_css_set_put_fork(kargs); |
6551 | |
6552 | return ret; |
6553 | } |
6554 | |
6555 | /** |
6556 | * cgroup_cancel_fork - called if a fork failed after cgroup_can_fork() |
6557 | * @child: the child process |
6558 | * @kargs: the arguments passed to create the child process |
6559 | * |
6560 | * This calls the cancel_fork() callbacks if a fork failed *after* |
6561 | * cgroup_can_fork() succeeded and cleans up references we took to |
6562 | * prepare a new css_set for the child process in cgroup_can_fork(). |
6563 | */ |
6564 | void cgroup_cancel_fork(struct task_struct *child, |
6565 | struct kernel_clone_args *kargs) |
6566 | { |
6567 | struct cgroup_subsys *ss; |
6568 | int i; |
6569 | |
6570 | for_each_subsys(ss, i) |
6571 | if (ss->cancel_fork) |
6572 | ss->cancel_fork(child, kargs->cset); |
6573 | |
6574 | cgroup_css_set_put_fork(kargs); |
6575 | } |
6576 | |
6577 | /** |
6578 | * cgroup_post_fork - finalize cgroup setup for the child process |
6579 | * @child: the child process |
6580 | * @kargs: the arguments passed to create the child process |
6581 | * |
6582 | * Attach the child process to its css_set calling the subsystem fork() |
6583 | * callbacks. |
6584 | */ |
6585 | void cgroup_post_fork(struct task_struct *child, |
6586 | struct kernel_clone_args *kargs) |
6587 | __releases(&cgroup_threadgroup_rwsem) __releases(&cgroup_mutex) |
6588 | { |
6589 | unsigned long cgrp_flags = 0; |
6590 | bool kill = false; |
6591 | struct cgroup_subsys *ss; |
6592 | struct css_set *cset; |
6593 | int i; |
6594 | |
6595 | cset = kargs->cset; |
6596 | kargs->cset = NULL; |
6597 | |
6598 | spin_lock_irq(lock: &css_set_lock); |
6599 | |
6600 | /* init tasks are special, only link regular threads */ |
6601 | if (likely(child->pid)) { |
6602 | if (kargs->cgrp) |
6603 | cgrp_flags = kargs->cgrp->flags; |
6604 | else |
6605 | cgrp_flags = cset->dfl_cgrp->flags; |
6606 | |
6607 | WARN_ON_ONCE(!list_empty(&child->cg_list)); |
6608 | cset->nr_tasks++; |
6609 | css_set_move_task(task: child, NULL, to_cset: cset, use_mg_tasks: false); |
6610 | } else { |
6611 | put_css_set(cset); |
6612 | cset = NULL; |
6613 | } |
6614 | |
6615 | if (!(child->flags & PF_KTHREAD)) { |
6616 | if (unlikely(test_bit(CGRP_FREEZE, &cgrp_flags))) { |
6617 | /* |
6618 | * If the cgroup has to be frozen, the new task has |
6619 | * too. Let's set the JOBCTL_TRAP_FREEZE jobctl bit to |
6620 | * get the task into the frozen state. |
6621 | */ |
6622 | spin_lock(lock: &child->sighand->siglock); |
6623 | WARN_ON_ONCE(child->frozen); |
6624 | child->jobctl |= JOBCTL_TRAP_FREEZE; |
6625 | spin_unlock(lock: &child->sighand->siglock); |
6626 | |
6627 | /* |
6628 | * Calling cgroup_update_frozen() isn't required here, |
6629 | * because it will be called anyway a bit later from |
6630 | * do_freezer_trap(). So we avoid cgroup's transient |
6631 | * switch from the frozen state and back. |
6632 | */ |
6633 | } |
6634 | |
6635 | /* |
6636 | * If the cgroup is to be killed notice it now and take the |
6637 | * child down right after we finished preparing it for |
6638 | * userspace. |
6639 | */ |
6640 | kill = test_bit(CGRP_KILL, &cgrp_flags); |
6641 | } |
6642 | |
6643 | spin_unlock_irq(lock: &css_set_lock); |
6644 | |
6645 | /* |
6646 | * Call ss->fork(). This must happen after @child is linked on |
6647 | * css_set; otherwise, @child might change state between ->fork() |
6648 | * and addition to css_set. |
6649 | */ |
6650 | do_each_subsys_mask(ss, i, have_fork_callback) { |
6651 | ss->fork(child); |
6652 | } while_each_subsys_mask(); |
6653 | |
6654 | /* Make the new cset the root_cset of the new cgroup namespace. */ |
6655 | if (kargs->flags & CLONE_NEWCGROUP) { |
6656 | struct css_set *rcset = child->nsproxy->cgroup_ns->root_cset; |
6657 | |
6658 | get_css_set(cset); |
6659 | child->nsproxy->cgroup_ns->root_cset = cset; |
6660 | put_css_set(cset: rcset); |
6661 | } |
6662 | |
6663 | /* Cgroup has to be killed so take down child immediately. */ |
6664 | if (unlikely(kill)) |
6665 | do_send_sig_info(SIGKILL, SEND_SIG_NOINFO, p: child, type: PIDTYPE_TGID); |
6666 | |
6667 | cgroup_css_set_put_fork(kargs); |
6668 | } |
6669 | |
6670 | /** |
6671 | * cgroup_exit - detach cgroup from exiting task |
6672 | * @tsk: pointer to task_struct of exiting process |
6673 | * |
6674 | * Description: Detach cgroup from @tsk. |
6675 | * |
6676 | */ |
6677 | void cgroup_exit(struct task_struct *tsk) |
6678 | { |
6679 | struct cgroup_subsys *ss; |
6680 | struct css_set *cset; |
6681 | int i; |
6682 | |
6683 | spin_lock_irq(lock: &css_set_lock); |
6684 | |
6685 | WARN_ON_ONCE(list_empty(&tsk->cg_list)); |
6686 | cset = task_css_set(task: tsk); |
6687 | css_set_move_task(task: tsk, from_cset: cset, NULL, use_mg_tasks: false); |
6688 | list_add_tail(new: &tsk->cg_list, head: &cset->dying_tasks); |
6689 | cset->nr_tasks--; |
6690 | |
6691 | if (dl_task(p: tsk)) |
6692 | dec_dl_tasks_cs(task: tsk); |
6693 | |
6694 | WARN_ON_ONCE(cgroup_task_frozen(tsk)); |
6695 | if (unlikely(!(tsk->flags & PF_KTHREAD) && |
6696 | test_bit(CGRP_FREEZE, &task_dfl_cgroup(tsk)->flags))) |
6697 | cgroup_update_frozen(cgrp: task_dfl_cgroup(task: tsk)); |
6698 | |
6699 | spin_unlock_irq(lock: &css_set_lock); |
6700 | |
6701 | /* see cgroup_post_fork() for details */ |
6702 | do_each_subsys_mask(ss, i, have_exit_callback) { |
6703 | ss->exit(tsk); |
6704 | } while_each_subsys_mask(); |
6705 | } |
6706 | |
6707 | void cgroup_release(struct task_struct *task) |
6708 | { |
6709 | struct cgroup_subsys *ss; |
6710 | int ssid; |
6711 | |
6712 | do_each_subsys_mask(ss, ssid, have_release_callback) { |
6713 | ss->release(task); |
6714 | } while_each_subsys_mask(); |
6715 | |
6716 | spin_lock_irq(lock: &css_set_lock); |
6717 | css_set_skip_task_iters(cset: task_css_set(task), task); |
6718 | list_del_init(entry: &task->cg_list); |
6719 | spin_unlock_irq(lock: &css_set_lock); |
6720 | } |
6721 | |
6722 | void cgroup_free(struct task_struct *task) |
6723 | { |
6724 | struct css_set *cset = task_css_set(task); |
6725 | put_css_set(cset); |
6726 | } |
6727 | |
6728 | static int __init cgroup_disable(char *str) |
6729 | { |
6730 | struct cgroup_subsys *ss; |
6731 | char *token; |
6732 | int i; |
6733 | |
6734 | while ((token = strsep(&str, "," )) != NULL) { |
6735 | if (!*token) |
6736 | continue; |
6737 | |
6738 | for_each_subsys(ss, i) { |
6739 | if (strcmp(token, ss->name) && |
6740 | strcmp(token, ss->legacy_name)) |
6741 | continue; |
6742 | |
6743 | static_branch_disable(cgroup_subsys_enabled_key[i]); |
6744 | pr_info("Disabling %s control group subsystem\n" , |
6745 | ss->name); |
6746 | } |
6747 | |
6748 | for (i = 0; i < OPT_FEATURE_COUNT; i++) { |
6749 | if (strcmp(token, cgroup_opt_feature_names[i])) |
6750 | continue; |
6751 | cgroup_feature_disable_mask |= 1 << i; |
6752 | pr_info("Disabling %s control group feature\n" , |
6753 | cgroup_opt_feature_names[i]); |
6754 | break; |
6755 | } |
6756 | } |
6757 | return 1; |
6758 | } |
6759 | __setup("cgroup_disable=" , cgroup_disable); |
6760 | |
6761 | void __init __weak enable_debug_cgroup(void) { } |
6762 | |
6763 | static int __init enable_cgroup_debug(char *str) |
6764 | { |
6765 | cgroup_debug = true; |
6766 | enable_debug_cgroup(); |
6767 | return 1; |
6768 | } |
6769 | __setup("cgroup_debug" , enable_cgroup_debug); |
6770 | |
6771 | static int __init cgroup_favordynmods_setup(char *str) |
6772 | { |
6773 | return (kstrtobool(s: str, res: &have_favordynmods) == 0); |
6774 | } |
6775 | __setup("cgroup_favordynmods=" , cgroup_favordynmods_setup); |
6776 | |
6777 | /** |
6778 | * css_tryget_online_from_dir - get corresponding css from a cgroup dentry |
6779 | * @dentry: directory dentry of interest |
6780 | * @ss: subsystem of interest |
6781 | * |
6782 | * If @dentry is a directory for a cgroup which has @ss enabled on it, try |
6783 | * to get the corresponding css and return it. If such css doesn't exist |
6784 | * or can't be pinned, an ERR_PTR value is returned. |
6785 | */ |
6786 | struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, |
6787 | struct cgroup_subsys *ss) |
6788 | { |
6789 | struct kernfs_node *kn = kernfs_node_from_dentry(dentry); |
6790 | struct file_system_type *s_type = dentry->d_sb->s_type; |
6791 | struct cgroup_subsys_state *css = NULL; |
6792 | struct cgroup *cgrp; |
6793 | |
6794 | /* is @dentry a cgroup dir? */ |
6795 | if ((s_type != &cgroup_fs_type && s_type != &cgroup2_fs_type) || |
6796 | !kn || kernfs_type(kn) != KERNFS_DIR) |
6797 | return ERR_PTR(error: -EBADF); |
6798 | |
6799 | rcu_read_lock(); |
6800 | |
6801 | /* |
6802 | * This path doesn't originate from kernfs and @kn could already |
6803 | * have been or be removed at any point. @kn->priv is RCU |
6804 | * protected for this access. See css_release_work_fn() for details. |
6805 | */ |
6806 | cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv); |
6807 | if (cgrp) |
6808 | css = cgroup_css(cgrp, ss); |
6809 | |
6810 | if (!css || !css_tryget_online(css)) |
6811 | css = ERR_PTR(error: -ENOENT); |
6812 | |
6813 | rcu_read_unlock(); |
6814 | return css; |
6815 | } |
6816 | |
6817 | /** |
6818 | * css_from_id - lookup css by id |
6819 | * @id: the cgroup id |
6820 | * @ss: cgroup subsys to be looked into |
6821 | * |
6822 | * Returns the css if there's valid one with @id, otherwise returns NULL. |
6823 | * Should be called under rcu_read_lock(). |
6824 | */ |
6825 | struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss) |
6826 | { |
6827 | WARN_ON_ONCE(!rcu_read_lock_held()); |
6828 | return idr_find(&ss->css_idr, id); |
6829 | } |
6830 | |
6831 | /** |
6832 | * cgroup_get_from_path - lookup and get a cgroup from its default hierarchy path |
6833 | * @path: path on the default hierarchy |
6834 | * |
6835 | * Find the cgroup at @path on the default hierarchy, increment its |
6836 | * reference count and return it. Returns pointer to the found cgroup on |
6837 | * success, ERR_PTR(-ENOENT) if @path doesn't exist or if the cgroup has already |
6838 | * been released and ERR_PTR(-ENOTDIR) if @path points to a non-directory. |
6839 | */ |
6840 | struct cgroup *cgroup_get_from_path(const char *path) |
6841 | { |
6842 | struct kernfs_node *kn; |
6843 | struct cgroup *cgrp = ERR_PTR(error: -ENOENT); |
6844 | struct cgroup *root_cgrp; |
6845 | |
6846 | root_cgrp = current_cgns_cgroup_dfl(); |
6847 | kn = kernfs_walk_and_get(kn: root_cgrp->kn, path); |
6848 | if (!kn) |
6849 | goto out; |
6850 | |
6851 | if (kernfs_type(kn) != KERNFS_DIR) { |
6852 | cgrp = ERR_PTR(error: -ENOTDIR); |
6853 | goto out_kernfs; |
6854 | } |
6855 | |
6856 | rcu_read_lock(); |
6857 | |
6858 | cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv); |
6859 | if (!cgrp || !cgroup_tryget(cgrp)) |
6860 | cgrp = ERR_PTR(error: -ENOENT); |
6861 | |
6862 | rcu_read_unlock(); |
6863 | |
6864 | out_kernfs: |
6865 | kernfs_put(kn); |
6866 | out: |
6867 | return cgrp; |
6868 | } |
6869 | EXPORT_SYMBOL_GPL(cgroup_get_from_path); |
6870 | |
6871 | /** |
6872 | * cgroup_v1v2_get_from_fd - get a cgroup pointer from a fd |
6873 | * @fd: fd obtained by open(cgroup_dir) |
6874 | * |
6875 | * Find the cgroup from a fd which should be obtained |
6876 | * by opening a cgroup directory. Returns a pointer to the |
6877 | * cgroup on success. ERR_PTR is returned if the cgroup |
6878 | * cannot be found. |
6879 | */ |
6880 | struct cgroup *cgroup_v1v2_get_from_fd(int fd) |
6881 | { |
6882 | struct cgroup *cgrp; |
6883 | struct fd f = fdget_raw(fd); |
6884 | if (!f.file) |
6885 | return ERR_PTR(error: -EBADF); |
6886 | |
6887 | cgrp = cgroup_v1v2_get_from_file(f: f.file); |
6888 | fdput(fd: f); |
6889 | return cgrp; |
6890 | } |
6891 | |
6892 | /** |
6893 | * cgroup_get_from_fd - same as cgroup_v1v2_get_from_fd, but only supports |
6894 | * cgroup2. |
6895 | * @fd: fd obtained by open(cgroup2_dir) |
6896 | */ |
6897 | struct cgroup *cgroup_get_from_fd(int fd) |
6898 | { |
6899 | struct cgroup *cgrp = cgroup_v1v2_get_from_fd(fd); |
6900 | |
6901 | if (IS_ERR(ptr: cgrp)) |
6902 | return ERR_CAST(ptr: cgrp); |
6903 | |
6904 | if (!cgroup_on_dfl(cgrp)) { |
6905 | cgroup_put(cgrp); |
6906 | return ERR_PTR(error: -EBADF); |
6907 | } |
6908 | return cgrp; |
6909 | } |
6910 | EXPORT_SYMBOL_GPL(cgroup_get_from_fd); |
6911 | |
6912 | static u64 power_of_ten(int power) |
6913 | { |
6914 | u64 v = 1; |
6915 | while (power--) |
6916 | v *= 10; |
6917 | return v; |
6918 | } |
6919 | |
6920 | /** |
6921 | * cgroup_parse_float - parse a floating number |
6922 | * @input: input string |
6923 | * @dec_shift: number of decimal digits to shift |
6924 | * @v: output |
6925 | * |
6926 | * Parse a decimal floating point number in @input and store the result in |
6927 | * @v with decimal point right shifted @dec_shift times. For example, if |
6928 | * @input is "12.3456" and @dec_shift is 3, *@v will be set to 12345. |
6929 | * Returns 0 on success, -errno otherwise. |
6930 | * |
6931 | * There's nothing cgroup specific about this function except that it's |
6932 | * currently the only user. |
6933 | */ |
6934 | int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v) |
6935 | { |
6936 | s64 whole, frac = 0; |
6937 | int fstart = 0, fend = 0, flen; |
6938 | |
6939 | if (!sscanf(input, "%lld.%n%lld%n" , &whole, &fstart, &frac, &fend)) |
6940 | return -EINVAL; |
6941 | if (frac < 0) |
6942 | return -EINVAL; |
6943 | |
6944 | flen = fend > fstart ? fend - fstart : 0; |
6945 | if (flen < dec_shift) |
6946 | frac *= power_of_ten(power: dec_shift - flen); |
6947 | else |
6948 | frac = DIV_ROUND_CLOSEST_ULL(frac, power_of_ten(flen - dec_shift)); |
6949 | |
6950 | *v = whole * power_of_ten(power: dec_shift) + frac; |
6951 | return 0; |
6952 | } |
6953 | |
6954 | /* |
6955 | * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data |
6956 | * definition in cgroup-defs.h. |
6957 | */ |
6958 | #ifdef CONFIG_SOCK_CGROUP_DATA |
6959 | |
6960 | void cgroup_sk_alloc(struct sock_cgroup_data *skcd) |
6961 | { |
6962 | struct cgroup *cgroup; |
6963 | |
6964 | rcu_read_lock(); |
6965 | /* Don't associate the sock with unrelated interrupted task's cgroup. */ |
6966 | if (in_interrupt()) { |
6967 | cgroup = &cgrp_dfl_root.cgrp; |
6968 | cgroup_get(cgrp: cgroup); |
6969 | goto out; |
6970 | } |
6971 | |
6972 | while (true) { |
6973 | struct css_set *cset; |
6974 | |
6975 | cset = task_css_set(current); |
6976 | if (likely(cgroup_tryget(cset->dfl_cgrp))) { |
6977 | cgroup = cset->dfl_cgrp; |
6978 | break; |
6979 | } |
6980 | cpu_relax(); |
6981 | } |
6982 | out: |
6983 | skcd->cgroup = cgroup; |
6984 | cgroup_bpf_get(cgrp: cgroup); |
6985 | rcu_read_unlock(); |
6986 | } |
6987 | |
6988 | void cgroup_sk_clone(struct sock_cgroup_data *skcd) |
6989 | { |
6990 | struct cgroup *cgrp = sock_cgroup_ptr(skcd); |
6991 | |
6992 | /* |
6993 | * We might be cloning a socket which is left in an empty |
6994 | * cgroup and the cgroup might have already been rmdir'd. |
6995 | * Don't use cgroup_get_live(). |
6996 | */ |
6997 | cgroup_get(cgrp); |
6998 | cgroup_bpf_get(cgrp); |
6999 | } |
7000 | |
7001 | void cgroup_sk_free(struct sock_cgroup_data *skcd) |
7002 | { |
7003 | struct cgroup *cgrp = sock_cgroup_ptr(skcd); |
7004 | |
7005 | cgroup_bpf_put(cgrp); |
7006 | cgroup_put(cgrp); |
7007 | } |
7008 | |
7009 | #endif /* CONFIG_SOCK_CGROUP_DATA */ |
7010 | |
7011 | #ifdef CONFIG_SYSFS |
7012 | static ssize_t show_delegatable_files(struct cftype *files, char *buf, |
7013 | ssize_t size, const char *prefix) |
7014 | { |
7015 | struct cftype *cft; |
7016 | ssize_t ret = 0; |
7017 | |
7018 | for (cft = files; cft && cft->name[0] != '\0'; cft++) { |
7019 | if (!(cft->flags & CFTYPE_NS_DELEGATABLE)) |
7020 | continue; |
7021 | |
7022 | if (prefix) |
7023 | ret += snprintf(buf: buf + ret, size: size - ret, fmt: "%s." , prefix); |
7024 | |
7025 | ret += snprintf(buf: buf + ret, size: size - ret, fmt: "%s\n" , cft->name); |
7026 | |
7027 | if (WARN_ON(ret >= size)) |
7028 | break; |
7029 | } |
7030 | |
7031 | return ret; |
7032 | } |
7033 | |
7034 | static ssize_t delegate_show(struct kobject *kobj, struct kobj_attribute *attr, |
7035 | char *buf) |
7036 | { |
7037 | struct cgroup_subsys *ss; |
7038 | int ssid; |
7039 | ssize_t ret = 0; |
7040 | |
7041 | ret = show_delegatable_files(files: cgroup_base_files, buf: buf + ret, |
7042 | PAGE_SIZE - ret, NULL); |
7043 | if (cgroup_psi_enabled()) |
7044 | ret += show_delegatable_files(files: cgroup_psi_files, buf: buf + ret, |
7045 | PAGE_SIZE - ret, NULL); |
7046 | |
7047 | for_each_subsys(ss, ssid) |
7048 | ret += show_delegatable_files(files: ss->dfl_cftypes, buf: buf + ret, |
7049 | PAGE_SIZE - ret, |
7050 | prefix: cgroup_subsys_name[ssid]); |
7051 | |
7052 | return ret; |
7053 | } |
7054 | static struct kobj_attribute cgroup_delegate_attr = __ATTR_RO(delegate); |
7055 | |
7056 | static ssize_t features_show(struct kobject *kobj, struct kobj_attribute *attr, |
7057 | char *buf) |
7058 | { |
7059 | return snprintf(buf, PAGE_SIZE, |
7060 | fmt: "nsdelegate\n" |
7061 | "favordynmods\n" |
7062 | "memory_localevents\n" |
7063 | "memory_recursiveprot\n" |
7064 | "memory_hugetlb_accounting\n" ); |
7065 | } |
7066 | static struct kobj_attribute cgroup_features_attr = __ATTR_RO(features); |
7067 | |
7068 | static struct attribute *cgroup_sysfs_attrs[] = { |
7069 | &cgroup_delegate_attr.attr, |
7070 | &cgroup_features_attr.attr, |
7071 | NULL, |
7072 | }; |
7073 | |
7074 | static const struct attribute_group cgroup_sysfs_attr_group = { |
7075 | .attrs = cgroup_sysfs_attrs, |
7076 | .name = "cgroup" , |
7077 | }; |
7078 | |
7079 | static int __init cgroup_sysfs_init(void) |
7080 | { |
7081 | return sysfs_create_group(kobj: kernel_kobj, grp: &cgroup_sysfs_attr_group); |
7082 | } |
7083 | subsys_initcall(cgroup_sysfs_init); |
7084 | |
7085 | #endif /* CONFIG_SYSFS */ |
7086 | |