1#ifndef _LINUX_CGROUP_H
2#define _LINUX_CGROUP_H
3/*
4 *  cgroup interface
5 *
6 *  Copyright (C) 2003 BULL SA
7 *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
8 *
9 */
10
11#include <linux/sched.h>
12#include <linux/cpumask.h>
13#include <linux/nodemask.h>
14#include <linux/rculist.h>
15#include <linux/cgroupstats.h>
16#include <linux/rwsem.h>
17#include <linux/fs.h>
18#include <linux/seq_file.h>
19#include <linux/kernfs.h>
20
21#include <linux/cgroup-defs.h>
22
23#ifdef CONFIG_CGROUPS
24
25extern int cgroup_init_early(void);
26extern int cgroup_init(void);
27extern void cgroup_fork(struct task_struct *p);
28extern void cgroup_post_fork(struct task_struct *p);
29extern void cgroup_exit(struct task_struct *p);
30extern int cgroupstats_build(struct cgroupstats *stats,
31				struct dentry *dentry);
32
33extern int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
34			    struct pid *pid, struct task_struct *tsk);
35
36/**
37 * css_get - obtain a reference on the specified css
38 * @css: target css
39 *
40 * The caller must already have a reference.
41 */
42static inline void css_get(struct cgroup_subsys_state *css)
43{
44	if (!(css->flags & CSS_NO_REF))
45		percpu_ref_get(&css->refcnt);
46}
47
48/**
49 * css_get_many - obtain references on the specified css
50 * @css: target css
51 * @n: number of references to get
52 *
53 * The caller must already have a reference.
54 */
55static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
56{
57	if (!(css->flags & CSS_NO_REF))
58		percpu_ref_get_many(&css->refcnt, n);
59}
60
61/**
62 * css_tryget - try to obtain a reference on the specified css
63 * @css: target css
64 *
65 * Obtain a reference on @css unless it already has reached zero and is
66 * being released.  This function doesn't care whether @css is on or
67 * offline.  The caller naturally needs to ensure that @css is accessible
68 * but doesn't have to be holding a reference on it - IOW, RCU protected
69 * access is good enough for this function.  Returns %true if a reference
70 * count was successfully obtained; %false otherwise.
71 */
72static inline bool css_tryget(struct cgroup_subsys_state *css)
73{
74	if (!(css->flags & CSS_NO_REF))
75		return percpu_ref_tryget(&css->refcnt);
76	return true;
77}
78
79/**
80 * css_tryget_online - try to obtain a reference on the specified css if online
81 * @css: target css
82 *
83 * Obtain a reference on @css if it's online.  The caller naturally needs
84 * to ensure that @css is accessible but doesn't have to be holding a
85 * reference on it - IOW, RCU protected access is good enough for this
86 * function.  Returns %true if a reference count was successfully obtained;
87 * %false otherwise.
88 */
89static inline bool css_tryget_online(struct cgroup_subsys_state *css)
90{
91	if (!(css->flags & CSS_NO_REF))
92		return percpu_ref_tryget_live(&css->refcnt);
93	return true;
94}
95
96/**
97 * css_put - put a css reference
98 * @css: target css
99 *
100 * Put a reference obtained via css_get() and css_tryget_online().
101 */
102static inline void css_put(struct cgroup_subsys_state *css)
103{
104	if (!(css->flags & CSS_NO_REF))
105		percpu_ref_put(&css->refcnt);
106}
107
108/**
109 * css_put_many - put css references
110 * @css: target css
111 * @n: number of references to put
112 *
113 * Put references obtained via css_get() and css_tryget_online().
114 */
115static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
116{
117	if (!(css->flags & CSS_NO_REF))
118		percpu_ref_put_many(&css->refcnt, n);
119}
120
121extern struct cgroup_root cgrp_dfl_root;
122extern struct css_set init_css_set;
123
124/**
125 * cgroup_on_dfl - test whether a cgroup is on the default hierarchy
126 * @cgrp: the cgroup of interest
127 *
128 * The default hierarchy is the v2 interface of cgroup and this function
129 * can be used to test whether a cgroup is on the default hierarchy for
130 * cases where a subsystem should behave differnetly depending on the
131 * interface version.
132 *
133 * The set of behaviors which change on the default hierarchy are still
134 * being determined and the mount option is prefixed with __DEVEL__.
135 *
136 * List of changed behaviors:
137 *
138 * - Mount options "noprefix", "xattr", "clone_children", "release_agent"
139 *   and "name" are disallowed.
140 *
141 * - When mounting an existing superblock, mount options should match.
142 *
143 * - Remount is disallowed.
144 *
145 * - rename(2) is disallowed.
146 *
147 * - "tasks" is removed.  Everything should be at process granularity.  Use
148 *   "cgroup.procs" instead.
149 *
150 * - "cgroup.procs" is not sorted.  pids will be unique unless they got
151 *   recycled inbetween reads.
152 *
153 * - "release_agent" and "notify_on_release" are removed.  Replacement
154 *   notification mechanism will be implemented.
155 *
156 * - "cgroup.clone_children" is removed.
157 *
158 * - "cgroup.subtree_populated" is available.  Its value is 0 if the cgroup
159 *   and its descendants contain no task; otherwise, 1.  The file also
160 *   generates kernfs notification which can be monitored through poll and
161 *   [di]notify when the value of the file changes.
162 *
163 * - cpuset: tasks will be kept in empty cpusets when hotplug happens and
164 *   take masks of ancestors with non-empty cpus/mems, instead of being
165 *   moved to an ancestor.
166 *
167 * - cpuset: a task can be moved into an empty cpuset, and again it takes
168 *   masks of ancestors.
169 *
170 * - memcg: use_hierarchy is on by default and the cgroup file for the flag
171 *   is not created.
172 *
173 * - blkcg: blk-throttle becomes properly hierarchical.
174 *
175 * - debug: disallowed on the default hierarchy.
176 */
177static inline bool cgroup_on_dfl(const struct cgroup *cgrp)
178{
179	return cgrp->root == &cgrp_dfl_root;
180}
181
182/* no synchronization, the result can only be used as a hint */
183static inline bool cgroup_has_tasks(struct cgroup *cgrp)
184{
185	return !list_empty(&cgrp->cset_links);
186}
187
188/* returns ino associated with a cgroup */
189static inline ino_t cgroup_ino(struct cgroup *cgrp)
190{
191	return cgrp->kn->ino;
192}
193
194/* cft/css accessors for cftype->write() operation */
195static inline struct cftype *of_cft(struct kernfs_open_file *of)
196{
197	return of->kn->priv;
198}
199
200struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);
201
202/* cft/css accessors for cftype->seq_*() operations */
203static inline struct cftype *seq_cft(struct seq_file *seq)
204{
205	return of_cft(seq->private);
206}
207
208static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq)
209{
210	return of_css(seq->private);
211}
212
213/*
214 * Name / path handling functions.  All are thin wrappers around the kernfs
215 * counterparts and can be called under any context.
216 */
217
218static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
219{
220	return kernfs_name(cgrp->kn, buf, buflen);
221}
222
223static inline char * __must_check cgroup_path(struct cgroup *cgrp, char *buf,
224					      size_t buflen)
225{
226	return kernfs_path(cgrp->kn, buf, buflen);
227}
228
229static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
230{
231	pr_cont_kernfs_name(cgrp->kn);
232}
233
234static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
235{
236	pr_cont_kernfs_path(cgrp->kn);
237}
238
239char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
240
241int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
242int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
243int cgroup_rm_cftypes(struct cftype *cfts);
244
245bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
246
247struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset);
248struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset);
249
250/**
251 * cgroup_taskset_for_each - iterate cgroup_taskset
252 * @task: the loop cursor
253 * @tset: taskset to iterate
254 */
255#define cgroup_taskset_for_each(task, tset)				\
256	for ((task) = cgroup_taskset_first((tset)); (task);		\
257	     (task) = cgroup_taskset_next((tset)))
258
259#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
260#include <linux/cgroup_subsys.h>
261#undef SUBSYS
262
263/**
264 * task_css_set_check - obtain a task's css_set with extra access conditions
265 * @task: the task to obtain css_set for
266 * @__c: extra condition expression to be passed to rcu_dereference_check()
267 *
268 * A task's css_set is RCU protected, initialized and exited while holding
269 * task_lock(), and can only be modified while holding both cgroup_mutex
270 * and task_lock() while the task is alive.  This macro verifies that the
271 * caller is inside proper critical section and returns @task's css_set.
272 *
273 * The caller can also specify additional allowed conditions via @__c, such
274 * as locks used during the cgroup_subsys::attach() methods.
275 */
276#ifdef CONFIG_PROVE_RCU
277extern struct mutex cgroup_mutex;
278extern struct rw_semaphore css_set_rwsem;
279#define task_css_set_check(task, __c)					\
280	rcu_dereference_check((task)->cgroups,				\
281		lockdep_is_held(&cgroup_mutex) ||			\
282		lockdep_is_held(&css_set_rwsem) ||			\
283		((task)->flags & PF_EXITING) || (__c))
284#else
285#define task_css_set_check(task, __c)					\
286	rcu_dereference((task)->cgroups)
287#endif
288
289/**
290 * task_css_check - obtain css for (task, subsys) w/ extra access conds
291 * @task: the target task
292 * @subsys_id: the target subsystem ID
293 * @__c: extra condition expression to be passed to rcu_dereference_check()
294 *
295 * Return the cgroup_subsys_state for the (@task, @subsys_id) pair.  The
296 * synchronization rules are the same as task_css_set_check().
297 */
298#define task_css_check(task, subsys_id, __c)				\
299	task_css_set_check((task), (__c))->subsys[(subsys_id)]
300
301/**
302 * task_css_set - obtain a task's css_set
303 * @task: the task to obtain css_set for
304 *
305 * See task_css_set_check().
306 */
307static inline struct css_set *task_css_set(struct task_struct *task)
308{
309	return task_css_set_check(task, false);
310}
311
312/**
313 * task_css - obtain css for (task, subsys)
314 * @task: the target task
315 * @subsys_id: the target subsystem ID
316 *
317 * See task_css_check().
318 */
319static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
320						   int subsys_id)
321{
322	return task_css_check(task, subsys_id, false);
323}
324
325/**
326 * task_css_is_root - test whether a task belongs to the root css
327 * @task: the target task
328 * @subsys_id: the target subsystem ID
329 *
330 * Test whether @task belongs to the root css on the specified subsystem.
331 * May be invoked in any context.
332 */
333static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
334{
335	return task_css_check(task, subsys_id, true) ==
336		init_css_set.subsys[subsys_id];
337}
338
339static inline struct cgroup *task_cgroup(struct task_struct *task,
340					 int subsys_id)
341{
342	return task_css(task, subsys_id)->cgroup;
343}
344
345struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
346					   struct cgroup_subsys_state *parent);
347
348struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
349
350/**
351 * css_for_each_child - iterate through children of a css
352 * @pos: the css * to use as the loop cursor
353 * @parent: css whose children to walk
354 *
355 * Walk @parent's children.  Must be called under rcu_read_lock().
356 *
357 * If a subsystem synchronizes ->css_online() and the start of iteration, a
358 * css which finished ->css_online() is guaranteed to be visible in the
359 * future iterations and will stay visible until the last reference is put.
360 * A css which hasn't finished ->css_online() or already finished
361 * ->css_offline() may show up during traversal.  It's each subsystem's
362 * responsibility to synchronize against on/offlining.
363 *
364 * It is allowed to temporarily drop RCU read lock during iteration.  The
365 * caller is responsible for ensuring that @pos remains accessible until
366 * the start of the next iteration by, for example, bumping the css refcnt.
367 */
368#define css_for_each_child(pos, parent)					\
369	for ((pos) = css_next_child(NULL, (parent)); (pos);		\
370	     (pos) = css_next_child((pos), (parent)))
371
372struct cgroup_subsys_state *
373css_next_descendant_pre(struct cgroup_subsys_state *pos,
374			struct cgroup_subsys_state *css);
375
376struct cgroup_subsys_state *
377css_rightmost_descendant(struct cgroup_subsys_state *pos);
378
379/**
380 * css_for_each_descendant_pre - pre-order walk of a css's descendants
381 * @pos: the css * to use as the loop cursor
382 * @root: css whose descendants to walk
383 *
384 * Walk @root's descendants.  @root is included in the iteration and the
385 * first node to be visited.  Must be called under rcu_read_lock().
386 *
387 * If a subsystem synchronizes ->css_online() and the start of iteration, a
388 * css which finished ->css_online() is guaranteed to be visible in the
389 * future iterations and will stay visible until the last reference is put.
390 * A css which hasn't finished ->css_online() or already finished
391 * ->css_offline() may show up during traversal.  It's each subsystem's
392 * responsibility to synchronize against on/offlining.
393 *
394 * For example, the following guarantees that a descendant can't escape
395 * state updates of its ancestors.
396 *
397 * my_online(@css)
398 * {
399 *	Lock @css's parent and @css;
400 *	Inherit state from the parent;
401 *	Unlock both.
402 * }
403 *
404 * my_update_state(@css)
405 * {
406 *	css_for_each_descendant_pre(@pos, @css) {
407 *		Lock @pos;
408 *		if (@pos == @css)
409 *			Update @css's state;
410 *		else
411 *			Verify @pos is alive and inherit state from its parent;
412 *		Unlock @pos;
413 *	}
414 * }
415 *
416 * As long as the inheriting step, including checking the parent state, is
417 * enclosed inside @pos locking, double-locking the parent isn't necessary
418 * while inheriting.  The state update to the parent is guaranteed to be
419 * visible by walking order and, as long as inheriting operations to the
420 * same @pos are atomic to each other, multiple updates racing each other
421 * still result in the correct state.  It's guaranateed that at least one
422 * inheritance happens for any css after the latest update to its parent.
423 *
424 * If checking parent's state requires locking the parent, each inheriting
425 * iteration should lock and unlock both @pos->parent and @pos.
426 *
427 * Alternatively, a subsystem may choose to use a single global lock to
428 * synchronize ->css_online() and ->css_offline() against tree-walking
429 * operations.
430 *
431 * It is allowed to temporarily drop RCU read lock during iteration.  The
432 * caller is responsible for ensuring that @pos remains accessible until
433 * the start of the next iteration by, for example, bumping the css refcnt.
434 */
435#define css_for_each_descendant_pre(pos, css)				\
436	for ((pos) = css_next_descendant_pre(NULL, (css)); (pos);	\
437	     (pos) = css_next_descendant_pre((pos), (css)))
438
439struct cgroup_subsys_state *
440css_next_descendant_post(struct cgroup_subsys_state *pos,
441			 struct cgroup_subsys_state *css);
442
443/**
444 * css_for_each_descendant_post - post-order walk of a css's descendants
445 * @pos: the css * to use as the loop cursor
446 * @css: css whose descendants to walk
447 *
448 * Similar to css_for_each_descendant_pre() but performs post-order
449 * traversal instead.  @root is included in the iteration and the last
450 * node to be visited.
451 *
452 * If a subsystem synchronizes ->css_online() and the start of iteration, a
453 * css which finished ->css_online() is guaranteed to be visible in the
454 * future iterations and will stay visible until the last reference is put.
455 * A css which hasn't finished ->css_online() or already finished
456 * ->css_offline() may show up during traversal.  It's each subsystem's
457 * responsibility to synchronize against on/offlining.
458 *
459 * Note that the walk visibility guarantee example described in pre-order
460 * walk doesn't apply the same to post-order walks.
461 */
462#define css_for_each_descendant_post(pos, css)				\
463	for ((pos) = css_next_descendant_post(NULL, (css)); (pos);	\
464	     (pos) = css_next_descendant_post((pos), (css)))
465
466bool css_has_online_children(struct cgroup_subsys_state *css);
467
468/* A css_task_iter should be treated as an opaque object */
469struct css_task_iter {
470	struct cgroup_subsys		*ss;
471
472	struct list_head		*cset_pos;
473	struct list_head		*cset_head;
474
475	struct list_head		*task_pos;
476	struct list_head		*tasks_head;
477	struct list_head		*mg_tasks_head;
478};
479
480void css_task_iter_start(struct cgroup_subsys_state *css,
481			 struct css_task_iter *it);
482struct task_struct *css_task_iter_next(struct css_task_iter *it);
483void css_task_iter_end(struct css_task_iter *it);
484
485int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
486int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
487
488struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
489					     struct cgroup_subsys *ss);
490struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
491						       struct cgroup_subsys *ss);
492
493#else /* !CONFIG_CGROUPS */
494
495struct cgroup_subsys_state;
496
497static inline int cgroup_init_early(void) { return 0; }
498static inline int cgroup_init(void) { return 0; }
499static inline void cgroup_fork(struct task_struct *p) {}
500static inline void cgroup_post_fork(struct task_struct *p) {}
501static inline void cgroup_exit(struct task_struct *p) {}
502
503static inline int cgroupstats_build(struct cgroupstats *stats,
504					struct dentry *dentry)
505{
506	return -EINVAL;
507}
508
509static inline void css_put(struct cgroup_subsys_state *css) {}
510
511/* No cgroups - nothing to do */
512static inline int cgroup_attach_task_all(struct task_struct *from,
513					 struct task_struct *t)
514{
515	return 0;
516}
517
518#endif /* !CONFIG_CGROUPS */
519
520#endif /* _LINUX_CGROUP_H */
521