root/kernel/cgroup/pids.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. css_pids
  2. parent_pids
  3. pids_css_alloc
  4. pids_css_free
  5. pids_cancel
  6. pids_uncharge
  7. pids_charge
  8. pids_try_charge
  9. pids_can_attach
  10. pids_cancel_attach
  11. pids_can_fork
  12. pids_cancel_fork
  13. pids_release
  14. pids_max_write
  15. pids_max_show
  16. pids_current_read
  17. pids_events_show

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Process number limiting controller for cgroups.
   4  *
   5  * Used to allow a cgroup hierarchy to stop any new processes from fork()ing
   6  * after a certain limit is reached.
   7  *
   8  * Since it is trivial to hit the task limit without hitting any kmemcg limits
   9  * in place, PIDs are a fundamental resource. As such, PID exhaustion must be
  10  * preventable in the scope of a cgroup hierarchy by allowing resource limiting
  11  * of the number of tasks in a cgroup.
  12  *
  13  * In order to use the `pids` controller, set the maximum number of tasks in
  14  * pids.max (this is not available in the root cgroup for obvious reasons). The
  15  * number of processes currently in the cgroup is given by pids.current.
  16  * Organisational operations are not blocked by cgroup policies, so it is
  17  * possible to have pids.current > pids.max. However, it is not possible to
  18  * violate a cgroup policy through fork(). fork() will return -EAGAIN if forking
  19  * would cause a cgroup policy to be violated.
  20  *
  21  * To set a cgroup to have no limit, set pids.max to "max". This is the default
  22  * for all new cgroups (N.B. that PID limits are hierarchical, so the most
  23  * stringent limit in the hierarchy is followed).
  24  *
  25  * pids.current tracks all child cgroup hierarchies, so parent/pids.current is
  26  * a superset of parent/child/pids.current.
  27  *
  28  * Copyright (C) 2015 Aleksa Sarai <cyphar@cyphar.com>
  29  */
  30 
  31 #include <linux/kernel.h>
  32 #include <linux/threads.h>
  33 #include <linux/atomic.h>
  34 #include <linux/cgroup.h>
  35 #include <linux/slab.h>
  36 
  37 #define PIDS_MAX (PID_MAX_LIMIT + 1ULL)
  38 #define PIDS_MAX_STR "max"
  39 
  40 struct pids_cgroup {
  41         struct cgroup_subsys_state      css;
  42 
  43         /*
  44          * Use 64-bit types so that we can safely represent "max" as
  45          * %PIDS_MAX = (%PID_MAX_LIMIT + 1).
  46          */
  47         atomic64_t                      counter;
  48         atomic64_t                      limit;
  49 
  50         /* Handle for "pids.events" */
  51         struct cgroup_file              events_file;
  52 
  53         /* Number of times fork failed because limit was hit. */
  54         atomic64_t                      events_limit;
  55 };
  56 
  57 static struct pids_cgroup *css_pids(struct cgroup_subsys_state *css)
  58 {
  59         return container_of(css, struct pids_cgroup, css);
  60 }
  61 
  62 static struct pids_cgroup *parent_pids(struct pids_cgroup *pids)
  63 {
  64         return css_pids(pids->css.parent);
  65 }
  66 
  67 static struct cgroup_subsys_state *
  68 pids_css_alloc(struct cgroup_subsys_state *parent)
  69 {
  70         struct pids_cgroup *pids;
  71 
  72         pids = kzalloc(sizeof(struct pids_cgroup), GFP_KERNEL);
  73         if (!pids)
  74                 return ERR_PTR(-ENOMEM);
  75 
  76         atomic64_set(&pids->counter, 0);
  77         atomic64_set(&pids->limit, PIDS_MAX);
  78         atomic64_set(&pids->events_limit, 0);
  79         return &pids->css;
  80 }
  81 
  82 static void pids_css_free(struct cgroup_subsys_state *css)
  83 {
  84         kfree(css_pids(css));
  85 }
  86 
  87 /**
  88  * pids_cancel - uncharge the local pid count
  89  * @pids: the pid cgroup state
  90  * @num: the number of pids to cancel
  91  *
  92  * This function will WARN if the pid count goes under 0, because such a case is
  93  * a bug in the pids controller proper.
  94  */
  95 static void pids_cancel(struct pids_cgroup *pids, int num)
  96 {
  97         /*
  98          * A negative count (or overflow for that matter) is invalid,
  99          * and indicates a bug in the `pids` controller proper.
 100          */
 101         WARN_ON_ONCE(atomic64_add_negative(-num, &pids->counter));
 102 }
 103 
 104 /**
 105  * pids_uncharge - hierarchically uncharge the pid count
 106  * @pids: the pid cgroup state
 107  * @num: the number of pids to uncharge
 108  */
 109 static void pids_uncharge(struct pids_cgroup *pids, int num)
 110 {
 111         struct pids_cgroup *p;
 112 
 113         for (p = pids; parent_pids(p); p = parent_pids(p))
 114                 pids_cancel(p, num);
 115 }
 116 
 117 /**
 118  * pids_charge - hierarchically charge the pid count
 119  * @pids: the pid cgroup state
 120  * @num: the number of pids to charge
 121  *
 122  * This function does *not* follow the pid limit set. It cannot fail and the new
 123  * pid count may exceed the limit. This is only used for reverting failed
 124  * attaches, where there is no other way out than violating the limit.
 125  */
 126 static void pids_charge(struct pids_cgroup *pids, int num)
 127 {
 128         struct pids_cgroup *p;
 129 
 130         for (p = pids; parent_pids(p); p = parent_pids(p))
 131                 atomic64_add(num, &p->counter);
 132 }
 133 
 134 /**
 135  * pids_try_charge - hierarchically try to charge the pid count
 136  * @pids: the pid cgroup state
 137  * @num: the number of pids to charge
 138  *
 139  * This function follows the set limit. It will fail if the charge would cause
 140  * the new value to exceed the hierarchical limit. Returns 0 if the charge
 141  * succeeded, otherwise -EAGAIN.
 142  */
 143 static int pids_try_charge(struct pids_cgroup *pids, int num)
 144 {
 145         struct pids_cgroup *p, *q;
 146 
 147         for (p = pids; parent_pids(p); p = parent_pids(p)) {
 148                 int64_t new = atomic64_add_return(num, &p->counter);
 149                 int64_t limit = atomic64_read(&p->limit);
 150 
 151                 /*
 152                  * Since new is capped to the maximum number of pid_t, if
 153                  * p->limit is %PIDS_MAX then we know that this test will never
 154                  * fail.
 155                  */
 156                 if (new > limit)
 157                         goto revert;
 158         }
 159 
 160         return 0;
 161 
 162 revert:
 163         for (q = pids; q != p; q = parent_pids(q))
 164                 pids_cancel(q, num);
 165         pids_cancel(p, num);
 166 
 167         return -EAGAIN;
 168 }
 169 
 170 static int pids_can_attach(struct cgroup_taskset *tset)
 171 {
 172         struct task_struct *task;
 173         struct cgroup_subsys_state *dst_css;
 174 
 175         cgroup_taskset_for_each(task, dst_css, tset) {
 176                 struct pids_cgroup *pids = css_pids(dst_css);
 177                 struct cgroup_subsys_state *old_css;
 178                 struct pids_cgroup *old_pids;
 179 
 180                 /*
 181                  * No need to pin @old_css between here and cancel_attach()
 182                  * because cgroup core protects it from being freed before
 183                  * the migration completes or fails.
 184                  */
 185                 old_css = task_css(task, pids_cgrp_id);
 186                 old_pids = css_pids(old_css);
 187 
 188                 pids_charge(pids, 1);
 189                 pids_uncharge(old_pids, 1);
 190         }
 191 
 192         return 0;
 193 }
 194 
 195 static void pids_cancel_attach(struct cgroup_taskset *tset)
 196 {
 197         struct task_struct *task;
 198         struct cgroup_subsys_state *dst_css;
 199 
 200         cgroup_taskset_for_each(task, dst_css, tset) {
 201                 struct pids_cgroup *pids = css_pids(dst_css);
 202                 struct cgroup_subsys_state *old_css;
 203                 struct pids_cgroup *old_pids;
 204 
 205                 old_css = task_css(task, pids_cgrp_id);
 206                 old_pids = css_pids(old_css);
 207 
 208                 pids_charge(old_pids, 1);
 209                 pids_uncharge(pids, 1);
 210         }
 211 }
 212 
 213 /*
 214  * task_css_check(true) in pids_can_fork() and pids_cancel_fork() relies
 215  * on cgroup_threadgroup_change_begin() held by the copy_process().
 216  */
 217 static int pids_can_fork(struct task_struct *task)
 218 {
 219         struct cgroup_subsys_state *css;
 220         struct pids_cgroup *pids;
 221         int err;
 222 
 223         css = task_css_check(current, pids_cgrp_id, true);
 224         pids = css_pids(css);
 225         err = pids_try_charge(pids, 1);
 226         if (err) {
 227                 /* Only log the first time events_limit is incremented. */
 228                 if (atomic64_inc_return(&pids->events_limit) == 1) {
 229                         pr_info("cgroup: fork rejected by pids controller in ");
 230                         pr_cont_cgroup_path(css->cgroup);
 231                         pr_cont("\n");
 232                 }
 233                 cgroup_file_notify(&pids->events_file);
 234         }
 235         return err;
 236 }
 237 
 238 static void pids_cancel_fork(struct task_struct *task)
 239 {
 240         struct cgroup_subsys_state *css;
 241         struct pids_cgroup *pids;
 242 
 243         css = task_css_check(current, pids_cgrp_id, true);
 244         pids = css_pids(css);
 245         pids_uncharge(pids, 1);
 246 }
 247 
 248 static void pids_release(struct task_struct *task)
 249 {
 250         struct pids_cgroup *pids = css_pids(task_css(task, pids_cgrp_id));
 251 
 252         pids_uncharge(pids, 1);
 253 }
 254 
 255 static ssize_t pids_max_write(struct kernfs_open_file *of, char *buf,
 256                               size_t nbytes, loff_t off)
 257 {
 258         struct cgroup_subsys_state *css = of_css(of);
 259         struct pids_cgroup *pids = css_pids(css);
 260         int64_t limit;
 261         int err;
 262 
 263         buf = strstrip(buf);
 264         if (!strcmp(buf, PIDS_MAX_STR)) {
 265                 limit = PIDS_MAX;
 266                 goto set_limit;
 267         }
 268 
 269         err = kstrtoll(buf, 0, &limit);
 270         if (err)
 271                 return err;
 272 
 273         if (limit < 0 || limit >= PIDS_MAX)
 274                 return -EINVAL;
 275 
 276 set_limit:
 277         /*
 278          * Limit updates don't need to be mutex'd, since it isn't
 279          * critical that any racing fork()s follow the new limit.
 280          */
 281         atomic64_set(&pids->limit, limit);
 282         return nbytes;
 283 }
 284 
 285 static int pids_max_show(struct seq_file *sf, void *v)
 286 {
 287         struct cgroup_subsys_state *css = seq_css(sf);
 288         struct pids_cgroup *pids = css_pids(css);
 289         int64_t limit = atomic64_read(&pids->limit);
 290 
 291         if (limit >= PIDS_MAX)
 292                 seq_printf(sf, "%s\n", PIDS_MAX_STR);
 293         else
 294                 seq_printf(sf, "%lld\n", limit);
 295 
 296         return 0;
 297 }
 298 
 299 static s64 pids_current_read(struct cgroup_subsys_state *css,
 300                              struct cftype *cft)
 301 {
 302         struct pids_cgroup *pids = css_pids(css);
 303 
 304         return atomic64_read(&pids->counter);
 305 }
 306 
 307 static int pids_events_show(struct seq_file *sf, void *v)
 308 {
 309         struct pids_cgroup *pids = css_pids(seq_css(sf));
 310 
 311         seq_printf(sf, "max %lld\n", (s64)atomic64_read(&pids->events_limit));
 312         return 0;
 313 }
 314 
 315 static struct cftype pids_files[] = {
 316         {
 317                 .name = "max",
 318                 .write = pids_max_write,
 319                 .seq_show = pids_max_show,
 320                 .flags = CFTYPE_NOT_ON_ROOT,
 321         },
 322         {
 323                 .name = "current",
 324                 .read_s64 = pids_current_read,
 325                 .flags = CFTYPE_NOT_ON_ROOT,
 326         },
 327         {
 328                 .name = "events",
 329                 .seq_show = pids_events_show,
 330                 .file_offset = offsetof(struct pids_cgroup, events_file),
 331                 .flags = CFTYPE_NOT_ON_ROOT,
 332         },
 333         { }     /* terminate */
 334 };
 335 
 336 struct cgroup_subsys pids_cgrp_subsys = {
 337         .css_alloc      = pids_css_alloc,
 338         .css_free       = pids_css_free,
 339         .can_attach     = pids_can_attach,
 340         .cancel_attach  = pids_cancel_attach,
 341         .can_fork       = pids_can_fork,
 342         .cancel_fork    = pids_cancel_fork,
 343         .release        = pids_release,
 344         .legacy_cftypes = pids_files,
 345         .dfl_cftypes    = pids_files,
 346         .threaded       = true,
 347 };

/* [<][>][^][v][top][bottom][index][help] */