1#ifndef _LINUX_SCHED_H
2#define _LINUX_SCHED_H
3
4#include <uapi/linux/sched.h>
5
6#include <linux/sched/prio.h>
7
8
9struct sched_param {
10	int sched_priority;
11};
12
13#include <asm/param.h>	/* for HZ */
14
15#include <linux/capability.h>
16#include <linux/threads.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/timex.h>
20#include <linux/jiffies.h>
21#include <linux/plist.h>
22#include <linux/rbtree.h>
23#include <linux/thread_info.h>
24#include <linux/cpumask.h>
25#include <linux/errno.h>
26#include <linux/nodemask.h>
27#include <linux/mm_types.h>
28#include <linux/preempt_mask.h>
29
30#include <asm/page.h>
31#include <asm/ptrace.h>
32#include <linux/cputime.h>
33
34#include <linux/smp.h>
35#include <linux/sem.h>
36#include <linux/shm.h>
37#include <linux/signal.h>
38#include <linux/compiler.h>
39#include <linux/completion.h>
40#include <linux/pid.h>
41#include <linux/percpu.h>
42#include <linux/topology.h>
43#include <linux/proportions.h>
44#include <linux/seccomp.h>
45#include <linux/rcupdate.h>
46#include <linux/rculist.h>
47#include <linux/rtmutex.h>
48
49#include <linux/time.h>
50#include <linux/param.h>
51#include <linux/resource.h>
52#include <linux/timer.h>
53#include <linux/hrtimer.h>
54#include <linux/task_io_accounting.h>
55#include <linux/latencytop.h>
56#include <linux/cred.h>
57#include <linux/llist.h>
58#include <linux/uidgid.h>
59#include <linux/gfp.h>
60#include <linux/magic.h>
61
62#include <asm/processor.h>
63
64#define SCHED_ATTR_SIZE_VER0	48	/* sizeof first published struct */
65
66/*
67 * Extended scheduling parameters data structure.
68 *
69 * This is needed because the original struct sched_param can not be
70 * altered without introducing ABI issues with legacy applications
71 * (e.g., in sched_getparam()).
72 *
73 * However, the possibility of specifying more than just a priority for
74 * the tasks may be useful for a wide variety of application fields, e.g.,
75 * multimedia, streaming, automation and control, and many others.
76 *
77 * This variant (sched_attr) is meant at describing a so-called
78 * sporadic time-constrained task. In such model a task is specified by:
79 *  - the activation period or minimum instance inter-arrival time;
80 *  - the maximum (or average, depending on the actual scheduling
81 *    discipline) computation time of all instances, a.k.a. runtime;
82 *  - the deadline (relative to the actual activation time) of each
83 *    instance.
84 * Very briefly, a periodic (sporadic) task asks for the execution of
85 * some specific computation --which is typically called an instance--
86 * (at most) every period. Moreover, each instance typically lasts no more
87 * than the runtime and must be completed by time instant t equal to
88 * the instance activation time + the deadline.
89 *
90 * This is reflected by the actual fields of the sched_attr structure:
91 *
92 *  @size		size of the structure, for fwd/bwd compat.
93 *
94 *  @sched_policy	task's scheduling policy
95 *  @sched_flags	for customizing the scheduler behaviour
96 *  @sched_nice		task's nice value      (SCHED_NORMAL/BATCH)
97 *  @sched_priority	task's static priority (SCHED_FIFO/RR)
98 *  @sched_deadline	representative of the task's deadline
99 *  @sched_runtime	representative of the task's runtime
100 *  @sched_period	representative of the task's period
101 *
102 * Given this task model, there are a multiplicity of scheduling algorithms
103 * and policies, that can be used to ensure all the tasks will make their
104 * timing constraints.
105 *
106 * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the
107 * only user of this new interface. More information about the algorithm
108 * available in the scheduling class file or in Documentation/.
109 */
110struct sched_attr {
111	u32 size;
112
113	u32 sched_policy;
114	u64 sched_flags;
115
116	/* SCHED_NORMAL, SCHED_BATCH */
117	s32 sched_nice;
118
119	/* SCHED_FIFO, SCHED_RR */
120	u32 sched_priority;
121
122	/* SCHED_DEADLINE */
123	u64 sched_runtime;
124	u64 sched_deadline;
125	u64 sched_period;
126};
127
128struct futex_pi_state;
129struct robust_list_head;
130struct bio_list;
131struct fs_struct;
132struct perf_event_context;
133struct blk_plug;
134struct filename;
135
136#define VMACACHE_BITS 2
137#define VMACACHE_SIZE (1U << VMACACHE_BITS)
138#define VMACACHE_MASK (VMACACHE_SIZE - 1)
139
140/*
141 * These are the constant used to fake the fixed-point load-average
142 * counting. Some notes:
143 *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
144 *    a load-average precision of 10 bits integer + 11 bits fractional
145 *  - if you want to count load-averages more often, you need more
146 *    precision, or rounding will get you. With 2-second counting freq,
147 *    the EXP_n values would be 1981, 2034 and 2043 if still using only
148 *    11 bit fractions.
149 */
150extern unsigned long avenrun[];		/* Load averages */
151extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
152
153#define FSHIFT		11		/* nr of bits of precision */
154#define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
155#define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
156#define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
157#define EXP_5		2014		/* 1/exp(5sec/5min) */
158#define EXP_15		2037		/* 1/exp(5sec/15min) */
159
160#define CALC_LOAD(load,exp,n) \
161	load *= exp; \
162	load += n*(FIXED_1-exp); \
163	load >>= FSHIFT;
164
165extern unsigned long total_forks;
166extern int nr_threads;
167DECLARE_PER_CPU(unsigned long, process_counts);
168extern int nr_processes(void);
169extern unsigned long nr_running(void);
170extern bool single_task_running(void);
171extern unsigned long nr_iowait(void);
172extern unsigned long nr_iowait_cpu(int cpu);
173extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
174
175extern void calc_global_load(unsigned long ticks);
176extern void update_cpu_load_nohz(void);
177
178extern unsigned long get_parent_ip(unsigned long addr);
179
180extern void dump_cpu_task(int cpu);
181
182struct seq_file;
183struct cfs_rq;
184struct task_group;
185#ifdef CONFIG_SCHED_DEBUG
186extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
187extern void proc_sched_set_task(struct task_struct *p);
188extern void
189print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
190#endif
191
192/*
193 * Task state bitmask. NOTE! These bits are also
194 * encoded in fs/proc/array.c: get_task_state().
195 *
196 * We have two separate sets of flags: task->state
197 * is about runnability, while task->exit_state are
198 * about the task exiting. Confusing, but this way
199 * modifying one set can't modify the other one by
200 * mistake.
201 */
202#define TASK_RUNNING		0
203#define TASK_INTERRUPTIBLE	1
204#define TASK_UNINTERRUPTIBLE	2
205#define __TASK_STOPPED		4
206#define __TASK_TRACED		8
207/* in tsk->exit_state */
208#define EXIT_DEAD		16
209#define EXIT_ZOMBIE		32
210#define EXIT_TRACE		(EXIT_ZOMBIE | EXIT_DEAD)
211/* in tsk->state again */
212#define TASK_DEAD		64
213#define TASK_WAKEKILL		128
214#define TASK_WAKING		256
215#define TASK_PARKED		512
216#define TASK_STATE_MAX		1024
217
218#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWP"
219
220extern char ___assert_task_state[1 - 2*!!(
221		sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
222
223/* Convenience macros for the sake of set_task_state */
224#define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
225#define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
226#define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
227
228/* Convenience macros for the sake of wake_up */
229#define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
230#define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
231
232/* get_task_state() */
233#define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
234				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
235				 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
236
237#define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
238#define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
239#define task_is_stopped_or_traced(task)	\
240			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
241#define task_contributes_to_load(task)	\
242				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
243				 (task->flags & PF_FROZEN) == 0)
244
245#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
246
247#define __set_task_state(tsk, state_value)			\
248	do {							\
249		(tsk)->task_state_change = _THIS_IP_;		\
250		(tsk)->state = (state_value);			\
251	} while (0)
252#define set_task_state(tsk, state_value)			\
253	do {							\
254		(tsk)->task_state_change = _THIS_IP_;		\
255		set_mb((tsk)->state, (state_value));		\
256	} while (0)
257
258/*
259 * set_current_state() includes a barrier so that the write of current->state
260 * is correctly serialised wrt the caller's subsequent test of whether to
261 * actually sleep:
262 *
263 *	set_current_state(TASK_UNINTERRUPTIBLE);
264 *	if (do_i_need_to_sleep())
265 *		schedule();
266 *
267 * If the caller does not need such serialisation then use __set_current_state()
268 */
269#define __set_current_state(state_value)			\
270	do {							\
271		current->task_state_change = _THIS_IP_;		\
272		current->state = (state_value);			\
273	} while (0)
274#define set_current_state(state_value)				\
275	do {							\
276		current->task_state_change = _THIS_IP_;		\
277		set_mb(current->state, (state_value));		\
278	} while (0)
279
280#else
281
282#define __set_task_state(tsk, state_value)		\
283	do { (tsk)->state = (state_value); } while (0)
284#define set_task_state(tsk, state_value)		\
285	set_mb((tsk)->state, (state_value))
286
287/*
288 * set_current_state() includes a barrier so that the write of current->state
289 * is correctly serialised wrt the caller's subsequent test of whether to
290 * actually sleep:
291 *
292 *	set_current_state(TASK_UNINTERRUPTIBLE);
293 *	if (do_i_need_to_sleep())
294 *		schedule();
295 *
296 * If the caller does not need such serialisation then use __set_current_state()
297 */
298#define __set_current_state(state_value)		\
299	do { current->state = (state_value); } while (0)
300#define set_current_state(state_value)			\
301	set_mb(current->state, (state_value))
302
303#endif
304
305/* Task command name length */
306#define TASK_COMM_LEN 16
307
308#include <linux/spinlock.h>
309
310/*
311 * This serializes "schedule()" and also protects
312 * the run-queue from deletions/modifications (but
313 * _adding_ to the beginning of the run-queue has
314 * a separate lock).
315 */
316extern rwlock_t tasklist_lock;
317extern spinlock_t mmlist_lock;
318
319struct task_struct;
320
321#ifdef CONFIG_PROVE_RCU
322extern int lockdep_tasklist_lock_is_held(void);
323#endif /* #ifdef CONFIG_PROVE_RCU */
324
325extern void sched_init(void);
326extern void sched_init_smp(void);
327extern asmlinkage void schedule_tail(struct task_struct *prev);
328extern void init_idle(struct task_struct *idle, int cpu);
329extern void init_idle_bootup_task(struct task_struct *idle);
330
331extern cpumask_var_t cpu_isolated_map;
332
333extern int runqueue_is_locked(int cpu);
334
335#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
336extern void nohz_balance_enter_idle(int cpu);
337extern void set_cpu_sd_state_idle(void);
338extern int get_nohz_timer_target(int pinned);
339#else
340static inline void nohz_balance_enter_idle(int cpu) { }
341static inline void set_cpu_sd_state_idle(void) { }
342static inline int get_nohz_timer_target(int pinned)
343{
344	return smp_processor_id();
345}
346#endif
347
348/*
349 * Only dump TASK_* tasks. (0 for all tasks)
350 */
351extern void show_state_filter(unsigned long state_filter);
352
353static inline void show_state(void)
354{
355	show_state_filter(0);
356}
357
358extern void show_regs(struct pt_regs *);
359
360/*
361 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
362 * task), SP is the stack pointer of the first frame that should be shown in the back
363 * trace (or NULL if the entire call-chain of the task should be shown).
364 */
365extern void show_stack(struct task_struct *task, unsigned long *sp);
366
367extern void cpu_init (void);
368extern void trap_init(void);
369extern void update_process_times(int user);
370extern void scheduler_tick(void);
371
372extern void sched_show_task(struct task_struct *p);
373
374#ifdef CONFIG_LOCKUP_DETECTOR
375extern void touch_softlockup_watchdog(void);
376extern void touch_softlockup_watchdog_sync(void);
377extern void touch_all_softlockup_watchdogs(void);
378extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
379				  void __user *buffer,
380				  size_t *lenp, loff_t *ppos);
381extern unsigned int  softlockup_panic;
382void lockup_detector_init(void);
383#else
384static inline void touch_softlockup_watchdog(void)
385{
386}
387static inline void touch_softlockup_watchdog_sync(void)
388{
389}
390static inline void touch_all_softlockup_watchdogs(void)
391{
392}
393static inline void lockup_detector_init(void)
394{
395}
396#endif
397
398#ifdef CONFIG_DETECT_HUNG_TASK
399void reset_hung_task_detector(void);
400#else
401static inline void reset_hung_task_detector(void)
402{
403}
404#endif
405
406/* Attach to any functions which should be ignored in wchan output. */
407#define __sched		__attribute__((__section__(".sched.text")))
408
409/* Linker adds these: start and end of __sched functions */
410extern char __sched_text_start[], __sched_text_end[];
411
412/* Is this address in the __sched functions? */
413extern int in_sched_functions(unsigned long addr);
414
415#define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
416extern signed long schedule_timeout(signed long timeout);
417extern signed long schedule_timeout_interruptible(signed long timeout);
418extern signed long schedule_timeout_killable(signed long timeout);
419extern signed long schedule_timeout_uninterruptible(signed long timeout);
420asmlinkage void schedule(void);
421extern void schedule_preempt_disabled(void);
422
423extern long io_schedule_timeout(long timeout);
424
425static inline void io_schedule(void)
426{
427	io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
428}
429
430struct nsproxy;
431struct user_namespace;
432
433#ifdef CONFIG_MMU
434extern void arch_pick_mmap_layout(struct mm_struct *mm);
435extern unsigned long
436arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
437		       unsigned long, unsigned long);
438extern unsigned long
439arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
440			  unsigned long len, unsigned long pgoff,
441			  unsigned long flags);
442#else
443static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
444#endif
445
446#define SUID_DUMP_DISABLE	0	/* No setuid dumping */
447#define SUID_DUMP_USER		1	/* Dump as user of process */
448#define SUID_DUMP_ROOT		2	/* Dump as root */
449
450/* mm flags */
451
452/* for SUID_DUMP_* above */
453#define MMF_DUMPABLE_BITS 2
454#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
455
456extern void set_dumpable(struct mm_struct *mm, int value);
457/*
458 * This returns the actual value of the suid_dumpable flag. For things
459 * that are using this for checking for privilege transitions, it must
460 * test against SUID_DUMP_USER rather than treating it as a boolean
461 * value.
462 */
463static inline int __get_dumpable(unsigned long mm_flags)
464{
465	return mm_flags & MMF_DUMPABLE_MASK;
466}
467
468static inline int get_dumpable(struct mm_struct *mm)
469{
470	return __get_dumpable(mm->flags);
471}
472
473/* coredump filter bits */
474#define MMF_DUMP_ANON_PRIVATE	2
475#define MMF_DUMP_ANON_SHARED	3
476#define MMF_DUMP_MAPPED_PRIVATE	4
477#define MMF_DUMP_MAPPED_SHARED	5
478#define MMF_DUMP_ELF_HEADERS	6
479#define MMF_DUMP_HUGETLB_PRIVATE 7
480#define MMF_DUMP_HUGETLB_SHARED  8
481
482#define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
483#define MMF_DUMP_FILTER_BITS	7
484#define MMF_DUMP_FILTER_MASK \
485	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
486#define MMF_DUMP_FILTER_DEFAULT \
487	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
488	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
489
490#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
491# define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
492#else
493# define MMF_DUMP_MASK_DEFAULT_ELF	0
494#endif
495					/* leave room for more dump flags */
496#define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
497#define MMF_VM_HUGEPAGE		17	/* set when VM_HUGEPAGE is set on vma */
498#define MMF_EXE_FILE_CHANGED	18	/* see prctl_set_mm_exe_file() */
499
500#define MMF_HAS_UPROBES		19	/* has uprobes */
501#define MMF_RECALC_UPROBES	20	/* MMF_HAS_UPROBES can be wrong */
502
503#define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
504
505struct sighand_struct {
506	atomic_t		count;
507	struct k_sigaction	action[_NSIG];
508	spinlock_t		siglock;
509	wait_queue_head_t	signalfd_wqh;
510};
511
512struct pacct_struct {
513	int			ac_flag;
514	long			ac_exitcode;
515	unsigned long		ac_mem;
516	cputime_t		ac_utime, ac_stime;
517	unsigned long		ac_minflt, ac_majflt;
518};
519
520struct cpu_itimer {
521	cputime_t expires;
522	cputime_t incr;
523	u32 error;
524	u32 incr_error;
525};
526
527/**
528 * struct cputime - snaphsot of system and user cputime
529 * @utime: time spent in user mode
530 * @stime: time spent in system mode
531 *
532 * Gathers a generic snapshot of user and system time.
533 */
534struct cputime {
535	cputime_t utime;
536	cputime_t stime;
537};
538
539/**
540 * struct task_cputime - collected CPU time counts
541 * @utime:		time spent in user mode, in &cputime_t units
542 * @stime:		time spent in kernel mode, in &cputime_t units
543 * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
544 *
545 * This is an extension of struct cputime that includes the total runtime
546 * spent by the task from the scheduler point of view.
547 *
548 * As a result, this structure groups together three kinds of CPU time
549 * that are tracked for threads and thread groups.  Most things considering
550 * CPU time want to group these counts together and treat all three
551 * of them in parallel.
552 */
553struct task_cputime {
554	cputime_t utime;
555	cputime_t stime;
556	unsigned long long sum_exec_runtime;
557};
558/* Alternate field names when used to cache expirations. */
559#define prof_exp	stime
560#define virt_exp	utime
561#define sched_exp	sum_exec_runtime
562
563#define INIT_CPUTIME	\
564	(struct task_cputime) {					\
565		.utime = 0,					\
566		.stime = 0,					\
567		.sum_exec_runtime = 0,				\
568	}
569
570#ifdef CONFIG_PREEMPT_COUNT
571#define PREEMPT_DISABLED	(1 + PREEMPT_ENABLED)
572#else
573#define PREEMPT_DISABLED	PREEMPT_ENABLED
574#endif
575
576/*
577 * Disable preemption until the scheduler is running.
578 * Reset by start_kernel()->sched_init()->init_idle().
579 *
580 * We include PREEMPT_ACTIVE to avoid cond_resched() from working
581 * before the scheduler is active -- see should_resched().
582 */
583#define INIT_PREEMPT_COUNT	(PREEMPT_DISABLED + PREEMPT_ACTIVE)
584
585/**
586 * struct thread_group_cputimer - thread group interval timer counts
587 * @cputime:		thread group interval timers.
588 * @running:		non-zero when there are timers running and
589 * 			@cputime receives updates.
590 * @lock:		lock for fields in this struct.
591 *
592 * This structure contains the version of task_cputime, above, that is
593 * used for thread group CPU timer calculations.
594 */
595struct thread_group_cputimer {
596	struct task_cputime cputime;
597	int running;
598	raw_spinlock_t lock;
599};
600
601#include <linux/rwsem.h>
602struct autogroup;
603
604/*
605 * NOTE! "signal_struct" does not have its own
606 * locking, because a shared signal_struct always
607 * implies a shared sighand_struct, so locking
608 * sighand_struct is always a proper superset of
609 * the locking of signal_struct.
610 */
611struct signal_struct {
612	atomic_t		sigcnt;
613	atomic_t		live;
614	int			nr_threads;
615	struct list_head	thread_head;
616
617	wait_queue_head_t	wait_chldexit;	/* for wait4() */
618
619	/* current thread group signal load-balancing target: */
620	struct task_struct	*curr_target;
621
622	/* shared signal handling: */
623	struct sigpending	shared_pending;
624
625	/* thread group exit support */
626	int			group_exit_code;
627	/* overloaded:
628	 * - notify group_exit_task when ->count is equal to notify_count
629	 * - everyone except group_exit_task is stopped during signal delivery
630	 *   of fatal signals, group_exit_task processes the signal.
631	 */
632	int			notify_count;
633	struct task_struct	*group_exit_task;
634
635	/* thread group stop support, overloads group_exit_code too */
636	int			group_stop_count;
637	unsigned int		flags; /* see SIGNAL_* flags below */
638
639	/*
640	 * PR_SET_CHILD_SUBREAPER marks a process, like a service
641	 * manager, to re-parent orphan (double-forking) child processes
642	 * to this process instead of 'init'. The service manager is
643	 * able to receive SIGCHLD signals and is able to investigate
644	 * the process until it calls wait(). All children of this
645	 * process will inherit a flag if they should look for a
646	 * child_subreaper process at exit.
647	 */
648	unsigned int		is_child_subreaper:1;
649	unsigned int		has_child_subreaper:1;
650
651	/* POSIX.1b Interval Timers */
652	int			posix_timer_id;
653	struct list_head	posix_timers;
654
655	/* ITIMER_REAL timer for the process */
656	struct hrtimer real_timer;
657	struct pid *leader_pid;
658	ktime_t it_real_incr;
659
660	/*
661	 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
662	 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
663	 * values are defined to 0 and 1 respectively
664	 */
665	struct cpu_itimer it[2];
666
667	/*
668	 * Thread group totals for process CPU timers.
669	 * See thread_group_cputimer(), et al, for details.
670	 */
671	struct thread_group_cputimer cputimer;
672
673	/* Earliest-expiration cache. */
674	struct task_cputime cputime_expires;
675
676	struct list_head cpu_timers[3];
677
678	struct pid *tty_old_pgrp;
679
680	/* boolean value for session group leader */
681	int leader;
682
683	struct tty_struct *tty; /* NULL if no tty */
684
685#ifdef CONFIG_SCHED_AUTOGROUP
686	struct autogroup *autogroup;
687#endif
688	/*
689	 * Cumulative resource counters for dead threads in the group,
690	 * and for reaped dead child processes forked by this group.
691	 * Live threads maintain their own counters and add to these
692	 * in __exit_signal, except for the group leader.
693	 */
694	seqlock_t stats_lock;
695	cputime_t utime, stime, cutime, cstime;
696	cputime_t gtime;
697	cputime_t cgtime;
698#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
699	struct cputime prev_cputime;
700#endif
701	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
702	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
703	unsigned long inblock, oublock, cinblock, coublock;
704	unsigned long maxrss, cmaxrss;
705	struct task_io_accounting ioac;
706
707	/*
708	 * Cumulative ns of schedule CPU time fo dead threads in the
709	 * group, not including a zombie group leader, (This only differs
710	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
711	 * other than jiffies.)
712	 */
713	unsigned long long sum_sched_runtime;
714
715	/*
716	 * We don't bother to synchronize most readers of this at all,
717	 * because there is no reader checking a limit that actually needs
718	 * to get both rlim_cur and rlim_max atomically, and either one
719	 * alone is a single word that can safely be read normally.
720	 * getrlimit/setrlimit use task_lock(current->group_leader) to
721	 * protect this instead of the siglock, because they really
722	 * have no need to disable irqs.
723	 */
724	struct rlimit rlim[RLIM_NLIMITS];
725
726#ifdef CONFIG_BSD_PROCESS_ACCT
727	struct pacct_struct pacct;	/* per-process accounting information */
728#endif
729#ifdef CONFIG_TASKSTATS
730	struct taskstats *stats;
731#endif
732#ifdef CONFIG_AUDIT
733	unsigned audit_tty;
734	unsigned audit_tty_log_passwd;
735	struct tty_audit_buf *tty_audit_buf;
736#endif
737#ifdef CONFIG_CGROUPS
738	/*
739	 * group_rwsem prevents new tasks from entering the threadgroup and
740	 * member tasks from exiting,a more specifically, setting of
741	 * PF_EXITING.  fork and exit paths are protected with this rwsem
742	 * using threadgroup_change_begin/end().  Users which require
743	 * threadgroup to remain stable should use threadgroup_[un]lock()
744	 * which also takes care of exec path.  Currently, cgroup is the
745	 * only user.
746	 */
747	struct rw_semaphore group_rwsem;
748#endif
749
750	oom_flags_t oom_flags;
751	short oom_score_adj;		/* OOM kill score adjustment */
752	short oom_score_adj_min;	/* OOM kill score adjustment min value.
753					 * Only settable by CAP_SYS_RESOURCE. */
754
755	struct mutex cred_guard_mutex;	/* guard against foreign influences on
756					 * credential calculations
757					 * (notably. ptrace) */
758};
759
760/*
761 * Bits in flags field of signal_struct.
762 */
763#define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
764#define SIGNAL_STOP_CONTINUED	0x00000002 /* SIGCONT since WCONTINUED reap */
765#define SIGNAL_GROUP_EXIT	0x00000004 /* group exit in progress */
766#define SIGNAL_GROUP_COREDUMP	0x00000008 /* coredump in progress */
767/*
768 * Pending notifications to parent.
769 */
770#define SIGNAL_CLD_STOPPED	0x00000010
771#define SIGNAL_CLD_CONTINUED	0x00000020
772#define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
773
774#define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */
775
776/* If true, all threads except ->group_exit_task have pending SIGKILL */
777static inline int signal_group_exit(const struct signal_struct *sig)
778{
779	return	(sig->flags & SIGNAL_GROUP_EXIT) ||
780		(sig->group_exit_task != NULL);
781}
782
783/*
784 * Some day this will be a full-fledged user tracking system..
785 */
786struct user_struct {
787	atomic_t __count;	/* reference count */
788	atomic_t processes;	/* How many processes does this user have? */
789	atomic_t sigpending;	/* How many pending signals does this user have? */
790#ifdef CONFIG_INOTIFY_USER
791	atomic_t inotify_watches; /* How many inotify watches does this user have? */
792	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */
793#endif
794#ifdef CONFIG_FANOTIFY
795	atomic_t fanotify_listeners;
796#endif
797#ifdef CONFIG_EPOLL
798	atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
799#endif
800#ifdef CONFIG_POSIX_MQUEUE
801	/* protected by mq_lock	*/
802	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
803#endif
804	unsigned long locked_shm; /* How many pages of mlocked shm ? */
805	unsigned long unix_inflight;	/* How many files in flight in unix sockets */
806
807#ifdef CONFIG_KEYS
808	struct key *uid_keyring;	/* UID specific keyring */
809	struct key *session_keyring;	/* UID's default session keyring */
810#endif
811
812	/* Hash table maintenance information */
813	struct hlist_node uidhash_node;
814	kuid_t uid;
815
816#ifdef CONFIG_PERF_EVENTS
817	atomic_long_t locked_vm;
818#endif
819};
820
821extern int uids_sysfs_init(void);
822
823extern struct user_struct *find_user(kuid_t);
824
825extern struct user_struct root_user;
826#define INIT_USER (&root_user)
827
828
829struct backing_dev_info;
830struct reclaim_state;
831
832#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
833struct sched_info {
834	/* cumulative counters */
835	unsigned long pcount;	      /* # of times run on this cpu */
836	unsigned long long run_delay; /* time spent waiting on a runqueue */
837
838	/* timestamps */
839	unsigned long long last_arrival,/* when we last ran on a cpu */
840			   last_queued;	/* when we were last queued to run */
841};
842#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
843
844#ifdef CONFIG_TASK_DELAY_ACCT
845struct task_delay_info {
846	spinlock_t	lock;
847	unsigned int	flags;	/* Private per-task flags */
848
849	/* For each stat XXX, add following, aligned appropriately
850	 *
851	 * struct timespec XXX_start, XXX_end;
852	 * u64 XXX_delay;
853	 * u32 XXX_count;
854	 *
855	 * Atomicity of updates to XXX_delay, XXX_count protected by
856	 * single lock above (split into XXX_lock if contention is an issue).
857	 */
858
859	/*
860	 * XXX_count is incremented on every XXX operation, the delay
861	 * associated with the operation is added to XXX_delay.
862	 * XXX_delay contains the accumulated delay time in nanoseconds.
863	 */
864	u64 blkio_start;	/* Shared by blkio, swapin */
865	u64 blkio_delay;	/* wait for sync block io completion */
866	u64 swapin_delay;	/* wait for swapin block io completion */
867	u32 blkio_count;	/* total count of the number of sync block */
868				/* io operations performed */
869	u32 swapin_count;	/* total count of the number of swapin block */
870				/* io operations performed */
871
872	u64 freepages_start;
873	u64 freepages_delay;	/* wait for memory reclaim */
874	u32 freepages_count;	/* total count of memory reclaim */
875};
876#endif	/* CONFIG_TASK_DELAY_ACCT */
877
878static inline int sched_info_on(void)
879{
880#ifdef CONFIG_SCHEDSTATS
881	return 1;
882#elif defined(CONFIG_TASK_DELAY_ACCT)
883	extern int delayacct_on;
884	return delayacct_on;
885#else
886	return 0;
887#endif
888}
889
890enum cpu_idle_type {
891	CPU_IDLE,
892	CPU_NOT_IDLE,
893	CPU_NEWLY_IDLE,
894	CPU_MAX_IDLE_TYPES
895};
896
897/*
898 * Increase resolution of cpu_capacity calculations
899 */
900#define SCHED_CAPACITY_SHIFT	10
901#define SCHED_CAPACITY_SCALE	(1L << SCHED_CAPACITY_SHIFT)
902
903/*
904 * sched-domains (multiprocessor balancing) declarations:
905 */
906#ifdef CONFIG_SMP
907#define SD_LOAD_BALANCE		0x0001	/* Do load balancing on this domain. */
908#define SD_BALANCE_NEWIDLE	0x0002	/* Balance when about to become idle */
909#define SD_BALANCE_EXEC		0x0004	/* Balance on exec */
910#define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */
911#define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */
912#define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */
913#define SD_SHARE_CPUCAPACITY	0x0080	/* Domain members share cpu power */
914#define SD_SHARE_POWERDOMAIN	0x0100	/* Domain members share power domain */
915#define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share cpu pkg resources */
916#define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */
917#define SD_ASYM_PACKING		0x0800  /* Place busy groups earlier in the domain */
918#define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
919#define SD_OVERLAP		0x2000	/* sched_domains of this level overlap */
920#define SD_NUMA			0x4000	/* cross-node balancing */
921
922#ifdef CONFIG_SCHED_SMT
923static inline int cpu_smt_flags(void)
924{
925	return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
926}
927#endif
928
929#ifdef CONFIG_SCHED_MC
930static inline int cpu_core_flags(void)
931{
932	return SD_SHARE_PKG_RESOURCES;
933}
934#endif
935
936#ifdef CONFIG_NUMA
937static inline int cpu_numa_flags(void)
938{
939	return SD_NUMA;
940}
941#endif
942
943struct sched_domain_attr {
944	int relax_domain_level;
945};
946
947#define SD_ATTR_INIT	(struct sched_domain_attr) {	\
948	.relax_domain_level = -1,			\
949}
950
951extern int sched_domain_level_max;
952
953struct sched_group;
954
955struct sched_domain {
956	/* These fields must be setup */
957	struct sched_domain *parent;	/* top domain must be null terminated */
958	struct sched_domain *child;	/* bottom domain must be null terminated */
959	struct sched_group *groups;	/* the balancing groups of the domain */
960	unsigned long min_interval;	/* Minimum balance interval ms */
961	unsigned long max_interval;	/* Maximum balance interval ms */
962	unsigned int busy_factor;	/* less balancing by factor if busy */
963	unsigned int imbalance_pct;	/* No balance until over watermark */
964	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
965	unsigned int busy_idx;
966	unsigned int idle_idx;
967	unsigned int newidle_idx;
968	unsigned int wake_idx;
969	unsigned int forkexec_idx;
970	unsigned int smt_gain;
971
972	int nohz_idle;			/* NOHZ IDLE status */
973	int flags;			/* See SD_* */
974	int level;
975
976	/* Runtime fields. */
977	unsigned long last_balance;	/* init to jiffies. units in jiffies */
978	unsigned int balance_interval;	/* initialise to 1. units in ms. */
979	unsigned int nr_balance_failed; /* initialise to 0 */
980
981	/* idle_balance() stats */
982	u64 max_newidle_lb_cost;
983	unsigned long next_decay_max_lb_cost;
984
985#ifdef CONFIG_SCHEDSTATS
986	/* load_balance() stats */
987	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
988	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
989	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
990	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
991	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
992	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
993	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
994	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
995
996	/* Active load balancing */
997	unsigned int alb_count;
998	unsigned int alb_failed;
999	unsigned int alb_pushed;
1000
1001	/* SD_BALANCE_EXEC stats */
1002	unsigned int sbe_count;
1003	unsigned int sbe_balanced;
1004	unsigned int sbe_pushed;
1005
1006	/* SD_BALANCE_FORK stats */
1007	unsigned int sbf_count;
1008	unsigned int sbf_balanced;
1009	unsigned int sbf_pushed;
1010
1011	/* try_to_wake_up() stats */
1012	unsigned int ttwu_wake_remote;
1013	unsigned int ttwu_move_affine;
1014	unsigned int ttwu_move_balance;
1015#endif
1016#ifdef CONFIG_SCHED_DEBUG
1017	char *name;
1018#endif
1019	union {
1020		void *private;		/* used during construction */
1021		struct rcu_head rcu;	/* used during destruction */
1022	};
1023
1024	unsigned int span_weight;
1025	/*
1026	 * Span of all CPUs in this domain.
1027	 *
1028	 * NOTE: this field is variable length. (Allocated dynamically
1029	 * by attaching extra space to the end of the structure,
1030	 * depending on how many CPUs the kernel has booted up with)
1031	 */
1032	unsigned long span[0];
1033};
1034
1035static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
1036{
1037	return to_cpumask(sd->span);
1038}
1039
1040extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1041				    struct sched_domain_attr *dattr_new);
1042
1043/* Allocate an array of sched domains, for partition_sched_domains(). */
1044cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
1045void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
1046
1047bool cpus_share_cache(int this_cpu, int that_cpu);
1048
1049typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
1050typedef int (*sched_domain_flags_f)(void);
1051
1052#define SDTL_OVERLAP	0x01
1053
1054struct sd_data {
1055	struct sched_domain **__percpu sd;
1056	struct sched_group **__percpu sg;
1057	struct sched_group_capacity **__percpu sgc;
1058};
1059
1060struct sched_domain_topology_level {
1061	sched_domain_mask_f mask;
1062	sched_domain_flags_f sd_flags;
1063	int		    flags;
1064	int		    numa_level;
1065	struct sd_data      data;
1066#ifdef CONFIG_SCHED_DEBUG
1067	char                *name;
1068#endif
1069};
1070
1071extern struct sched_domain_topology_level *sched_domain_topology;
1072
1073extern void set_sched_topology(struct sched_domain_topology_level *tl);
1074extern void wake_up_if_idle(int cpu);
1075
1076#ifdef CONFIG_SCHED_DEBUG
1077# define SD_INIT_NAME(type)		.name = #type
1078#else
1079# define SD_INIT_NAME(type)
1080#endif
1081
1082#else /* CONFIG_SMP */
1083
1084struct sched_domain_attr;
1085
1086static inline void
1087partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1088			struct sched_domain_attr *dattr_new)
1089{
1090}
1091
1092static inline bool cpus_share_cache(int this_cpu, int that_cpu)
1093{
1094	return true;
1095}
1096
1097#endif	/* !CONFIG_SMP */
1098
1099
1100struct io_context;			/* See blkdev.h */
1101
1102
1103#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1104extern void prefetch_stack(struct task_struct *t);
1105#else
1106static inline void prefetch_stack(struct task_struct *t) { }
1107#endif
1108
1109struct audit_context;		/* See audit.c */
1110struct mempolicy;
1111struct pipe_inode_info;
1112struct uts_namespace;
1113
1114struct load_weight {
1115	unsigned long weight;
1116	u32 inv_weight;
1117};
1118
1119struct sched_avg {
1120	u64 last_runnable_update;
1121	s64 decay_count;
1122	/*
1123	 * utilization_avg_contrib describes the amount of time that a
1124	 * sched_entity is running on a CPU. It is based on running_avg_sum
1125	 * and is scaled in the range [0..SCHED_LOAD_SCALE].
1126	 * load_avg_contrib described the amount of time that a sched_entity
1127	 * is runnable on a rq. It is based on both runnable_avg_sum and the
1128	 * weight of the task.
1129	 */
1130	unsigned long load_avg_contrib, utilization_avg_contrib;
1131	/*
1132	 * These sums represent an infinite geometric series and so are bound
1133	 * above by 1024/(1-y).  Thus we only need a u32 to store them for all
1134	 * choices of y < 1-2^(-32)*1024.
1135	 * running_avg_sum reflects the time that the sched_entity is
1136	 * effectively running on the CPU.
1137	 * runnable_avg_sum represents the amount of time a sched_entity is on
1138	 * a runqueue which includes the running time that is monitored by
1139	 * running_avg_sum.
1140	 */
1141	u32 runnable_avg_sum, avg_period, running_avg_sum;
1142};
1143
1144#ifdef CONFIG_SCHEDSTATS
1145struct sched_statistics {
1146	u64			wait_start;
1147	u64			wait_max;
1148	u64			wait_count;
1149	u64			wait_sum;
1150	u64			iowait_count;
1151	u64			iowait_sum;
1152
1153	u64			sleep_start;
1154	u64			sleep_max;
1155	s64			sum_sleep_runtime;
1156
1157	u64			block_start;
1158	u64			block_max;
1159	u64			exec_max;
1160	u64			slice_max;
1161
1162	u64			nr_migrations_cold;
1163	u64			nr_failed_migrations_affine;
1164	u64			nr_failed_migrations_running;
1165	u64			nr_failed_migrations_hot;
1166	u64			nr_forced_migrations;
1167
1168	u64			nr_wakeups;
1169	u64			nr_wakeups_sync;
1170	u64			nr_wakeups_migrate;
1171	u64			nr_wakeups_local;
1172	u64			nr_wakeups_remote;
1173	u64			nr_wakeups_affine;
1174	u64			nr_wakeups_affine_attempts;
1175	u64			nr_wakeups_passive;
1176	u64			nr_wakeups_idle;
1177};
1178#endif
1179
1180struct sched_entity {
1181	struct load_weight	load;		/* for load-balancing */
1182	struct rb_node		run_node;
1183	struct list_head	group_node;
1184	unsigned int		on_rq;
1185
1186	u64			exec_start;
1187	u64			sum_exec_runtime;
1188	u64			vruntime;
1189	u64			prev_sum_exec_runtime;
1190
1191	u64			nr_migrations;
1192
1193#ifdef CONFIG_SCHEDSTATS
1194	struct sched_statistics statistics;
1195#endif
1196
1197#ifdef CONFIG_FAIR_GROUP_SCHED
1198	int			depth;
1199	struct sched_entity	*parent;
1200	/* rq on which this entity is (to be) queued: */
1201	struct cfs_rq		*cfs_rq;
1202	/* rq "owned" by this entity/group: */
1203	struct cfs_rq		*my_q;
1204#endif
1205
1206#ifdef CONFIG_SMP
1207	/* Per-entity load-tracking */
1208	struct sched_avg	avg;
1209#endif
1210};
1211
1212struct sched_rt_entity {
1213	struct list_head run_list;
1214	unsigned long timeout;
1215	unsigned long watchdog_stamp;
1216	unsigned int time_slice;
1217
1218	struct sched_rt_entity *back;
1219#ifdef CONFIG_RT_GROUP_SCHED
1220	struct sched_rt_entity	*parent;
1221	/* rq on which this entity is (to be) queued: */
1222	struct rt_rq		*rt_rq;
1223	/* rq "owned" by this entity/group: */
1224	struct rt_rq		*my_q;
1225#endif
1226};
1227
1228struct sched_dl_entity {
1229	struct rb_node	rb_node;
1230
1231	/*
1232	 * Original scheduling parameters. Copied here from sched_attr
1233	 * during sched_setattr(), they will remain the same until
1234	 * the next sched_setattr().
1235	 */
1236	u64 dl_runtime;		/* maximum runtime for each instance	*/
1237	u64 dl_deadline;	/* relative deadline of each instance	*/
1238	u64 dl_period;		/* separation of two instances (period) */
1239	u64 dl_bw;		/* dl_runtime / dl_deadline		*/
1240
1241	/*
1242	 * Actual scheduling parameters. Initialized with the values above,
1243	 * they are continously updated during task execution. Note that
1244	 * the remaining runtime could be < 0 in case we are in overrun.
1245	 */
1246	s64 runtime;		/* remaining runtime for this instance	*/
1247	u64 deadline;		/* absolute deadline for this instance	*/
1248	unsigned int flags;	/* specifying the scheduler behaviour	*/
1249
1250	/*
1251	 * Some bool flags:
1252	 *
1253	 * @dl_throttled tells if we exhausted the runtime. If so, the
1254	 * task has to wait for a replenishment to be performed at the
1255	 * next firing of dl_timer.
1256	 *
1257	 * @dl_new tells if a new instance arrived. If so we must
1258	 * start executing it with full runtime and reset its absolute
1259	 * deadline;
1260	 *
1261	 * @dl_boosted tells if we are boosted due to DI. If so we are
1262	 * outside bandwidth enforcement mechanism (but only until we
1263	 * exit the critical section);
1264	 *
1265	 * @dl_yielded tells if task gave up the cpu before consuming
1266	 * all its available runtime during the last job.
1267	 */
1268	int dl_throttled, dl_new, dl_boosted, dl_yielded;
1269
1270	/*
1271	 * Bandwidth enforcement timer. Each -deadline task has its
1272	 * own bandwidth to be enforced, thus we need one timer per task.
1273	 */
1274	struct hrtimer dl_timer;
1275};
1276
1277union rcu_special {
1278	struct {
1279		bool blocked;
1280		bool need_qs;
1281	} b;
1282	short s;
1283};
1284struct rcu_node;
1285
1286enum perf_event_task_context {
1287	perf_invalid_context = -1,
1288	perf_hw_context = 0,
1289	perf_sw_context,
1290	perf_nr_task_contexts,
1291};
1292
1293struct task_struct {
1294	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
1295	void *stack;
1296	atomic_t usage;
1297	unsigned int flags;	/* per process flags, defined below */
1298	unsigned int ptrace;
1299
1300#ifdef CONFIG_SMP
1301	struct llist_node wake_entry;
1302	int on_cpu;
1303	struct task_struct *last_wakee;
1304	unsigned long wakee_flips;
1305	unsigned long wakee_flip_decay_ts;
1306
1307	int wake_cpu;
1308#endif
1309	int on_rq;
1310
1311	int prio, static_prio, normal_prio;
1312	unsigned int rt_priority;
1313	const struct sched_class *sched_class;
1314	struct sched_entity se;
1315	struct sched_rt_entity rt;
1316#ifdef CONFIG_CGROUP_SCHED
1317	struct task_group *sched_task_group;
1318#endif
1319	struct sched_dl_entity dl;
1320
1321#ifdef CONFIG_PREEMPT_NOTIFIERS
1322	/* list of struct preempt_notifier: */
1323	struct hlist_head preempt_notifiers;
1324#endif
1325
1326#ifdef CONFIG_BLK_DEV_IO_TRACE
1327	unsigned int btrace_seq;
1328#endif
1329
1330	unsigned int policy;
1331	int nr_cpus_allowed;
1332	cpumask_t cpus_allowed;
1333
1334#ifdef CONFIG_PREEMPT_RCU
1335	int rcu_read_lock_nesting;
1336	union rcu_special rcu_read_unlock_special;
1337	struct list_head rcu_node_entry;
1338#endif /* #ifdef CONFIG_PREEMPT_RCU */
1339#ifdef CONFIG_PREEMPT_RCU
1340	struct rcu_node *rcu_blocked_node;
1341#endif /* #ifdef CONFIG_PREEMPT_RCU */
1342#ifdef CONFIG_TASKS_RCU
1343	unsigned long rcu_tasks_nvcsw;
1344	bool rcu_tasks_holdout;
1345	struct list_head rcu_tasks_holdout_list;
1346	int rcu_tasks_idle_cpu;
1347#endif /* #ifdef CONFIG_TASKS_RCU */
1348
1349#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1350	struct sched_info sched_info;
1351#endif
1352
1353	struct list_head tasks;
1354#ifdef CONFIG_SMP
1355	struct plist_node pushable_tasks;
1356	struct rb_node pushable_dl_tasks;
1357#endif
1358
1359	struct mm_struct *mm, *active_mm;
1360#ifdef CONFIG_COMPAT_BRK
1361	unsigned brk_randomized:1;
1362#endif
1363	/* per-thread vma caching */
1364	u32 vmacache_seqnum;
1365	struct vm_area_struct *vmacache[VMACACHE_SIZE];
1366#if defined(SPLIT_RSS_COUNTING)
1367	struct task_rss_stat	rss_stat;
1368#endif
1369/* task state */
1370	int exit_state;
1371	int exit_code, exit_signal;
1372	int pdeath_signal;  /*  The signal sent when the parent dies  */
1373	unsigned int jobctl;	/* JOBCTL_*, siglock protected */
1374
1375	/* Used for emulating ABI behavior of previous Linux versions */
1376	unsigned int personality;
1377
1378	unsigned in_execve:1;	/* Tell the LSMs that the process is doing an
1379				 * execve */
1380	unsigned in_iowait:1;
1381
1382	/* Revert to default priority/policy when forking */
1383	unsigned sched_reset_on_fork:1;
1384	unsigned sched_contributes_to_load:1;
1385
1386#ifdef CONFIG_MEMCG_KMEM
1387	unsigned memcg_kmem_skip_account:1;
1388#endif
1389
1390	unsigned long atomic_flags; /* Flags needing atomic access. */
1391
1392	struct restart_block restart_block;
1393
1394	pid_t pid;
1395	pid_t tgid;
1396
1397#ifdef CONFIG_CC_STACKPROTECTOR
1398	/* Canary value for the -fstack-protector gcc feature */
1399	unsigned long stack_canary;
1400#endif
1401	/*
1402	 * pointers to (original) parent process, youngest child, younger sibling,
1403	 * older sibling, respectively.  (p->father can be replaced with
1404	 * p->real_parent->pid)
1405	 */
1406	struct task_struct __rcu *real_parent; /* real parent process */
1407	struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
1408	/*
1409	 * children/sibling forms the list of my natural children
1410	 */
1411	struct list_head children;	/* list of my children */
1412	struct list_head sibling;	/* linkage in my parent's children list */
1413	struct task_struct *group_leader;	/* threadgroup leader */
1414
1415	/*
1416	 * ptraced is the list of tasks this task is using ptrace on.
1417	 * This includes both natural children and PTRACE_ATTACH targets.
1418	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1419	 */
1420	struct list_head ptraced;
1421	struct list_head ptrace_entry;
1422
1423	/* PID/PID hash table linkage. */
1424	struct pid_link pids[PIDTYPE_MAX];
1425	struct list_head thread_group;
1426	struct list_head thread_node;
1427
1428	struct completion *vfork_done;		/* for vfork() */
1429	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
1430	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
1431
1432	cputime_t utime, stime, utimescaled, stimescaled;
1433	cputime_t gtime;
1434#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1435	struct cputime prev_cputime;
1436#endif
1437#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1438	seqlock_t vtime_seqlock;
1439	unsigned long long vtime_snap;
1440	enum {
1441		VTIME_SLEEPING = 0,
1442		VTIME_USER,
1443		VTIME_SYS,
1444	} vtime_snap_whence;
1445#endif
1446	unsigned long nvcsw, nivcsw; /* context switch counts */
1447	u64 start_time;		/* monotonic time in nsec */
1448	u64 real_start_time;	/* boot based time in nsec */
1449/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1450	unsigned long min_flt, maj_flt;
1451
1452	struct task_cputime cputime_expires;
1453	struct list_head cpu_timers[3];
1454
1455/* process credentials */
1456	const struct cred __rcu *real_cred; /* objective and real subjective task
1457					 * credentials (COW) */
1458	const struct cred __rcu *cred;	/* effective (overridable) subjective task
1459					 * credentials (COW) */
1460	char comm[TASK_COMM_LEN]; /* executable name excluding path
1461				     - access with [gs]et_task_comm (which lock
1462				       it with task_lock())
1463				     - initialized normally by setup_new_exec */
1464/* file system info */
1465	int link_count, total_link_count;
1466#ifdef CONFIG_SYSVIPC
1467/* ipc stuff */
1468	struct sysv_sem sysvsem;
1469	struct sysv_shm sysvshm;
1470#endif
1471#ifdef CONFIG_DETECT_HUNG_TASK
1472/* hung task detection */
1473	unsigned long last_switch_count;
1474#endif
1475/* CPU-specific state of this task */
1476	struct thread_struct thread;
1477/* filesystem information */
1478	struct fs_struct *fs;
1479/* open file information */
1480	struct files_struct *files;
1481/* namespaces */
1482	struct nsproxy *nsproxy;
1483/* signal handlers */
1484	struct signal_struct *signal;
1485	struct sighand_struct *sighand;
1486
1487	sigset_t blocked, real_blocked;
1488	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
1489	struct sigpending pending;
1490
1491	unsigned long sas_ss_sp;
1492	size_t sas_ss_size;
1493	int (*notifier)(void *priv);
1494	void *notifier_data;
1495	sigset_t *notifier_mask;
1496	struct callback_head *task_works;
1497
1498	struct audit_context *audit_context;
1499#ifdef CONFIG_AUDITSYSCALL
1500	kuid_t loginuid;
1501	unsigned int sessionid;
1502#endif
1503	struct seccomp seccomp;
1504
1505/* Thread group tracking */
1506   	u32 parent_exec_id;
1507   	u32 self_exec_id;
1508/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
1509 * mempolicy */
1510	spinlock_t alloc_lock;
1511
1512	/* Protection of the PI data structures: */
1513	raw_spinlock_t pi_lock;
1514
1515#ifdef CONFIG_RT_MUTEXES
1516	/* PI waiters blocked on a rt_mutex held by this task */
1517	struct rb_root pi_waiters;
1518	struct rb_node *pi_waiters_leftmost;
1519	/* Deadlock detection and priority inheritance handling */
1520	struct rt_mutex_waiter *pi_blocked_on;
1521#endif
1522
1523#ifdef CONFIG_DEBUG_MUTEXES
1524	/* mutex deadlock detection */
1525	struct mutex_waiter *blocked_on;
1526#endif
1527#ifdef CONFIG_TRACE_IRQFLAGS
1528	unsigned int irq_events;
1529	unsigned long hardirq_enable_ip;
1530	unsigned long hardirq_disable_ip;
1531	unsigned int hardirq_enable_event;
1532	unsigned int hardirq_disable_event;
1533	int hardirqs_enabled;
1534	int hardirq_context;
1535	unsigned long softirq_disable_ip;
1536	unsigned long softirq_enable_ip;
1537	unsigned int softirq_disable_event;
1538	unsigned int softirq_enable_event;
1539	int softirqs_enabled;
1540	int softirq_context;
1541#endif
1542#ifdef CONFIG_LOCKDEP
1543# define MAX_LOCK_DEPTH 48UL
1544	u64 curr_chain_key;
1545	int lockdep_depth;
1546	unsigned int lockdep_recursion;
1547	struct held_lock held_locks[MAX_LOCK_DEPTH];
1548	gfp_t lockdep_reclaim_gfp;
1549#endif
1550
1551/* journalling filesystem info */
1552	void *journal_info;
1553
1554/* stacked block device info */
1555	struct bio_list *bio_list;
1556
1557#ifdef CONFIG_BLOCK
1558/* stack plugging */
1559	struct blk_plug *plug;
1560#endif
1561
1562/* VM state */
1563	struct reclaim_state *reclaim_state;
1564
1565	struct backing_dev_info *backing_dev_info;
1566
1567	struct io_context *io_context;
1568
1569	unsigned long ptrace_message;
1570	siginfo_t *last_siginfo; /* For ptrace use.  */
1571	struct task_io_accounting ioac;
1572#if defined(CONFIG_TASK_XACCT)
1573	u64 acct_rss_mem1;	/* accumulated rss usage */
1574	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
1575	cputime_t acct_timexpd;	/* stime + utime since last update */
1576#endif
1577#ifdef CONFIG_CPUSETS
1578	nodemask_t mems_allowed;	/* Protected by alloc_lock */
1579	seqcount_t mems_allowed_seq;	/* Seqence no to catch updates */
1580	int cpuset_mem_spread_rotor;
1581	int cpuset_slab_spread_rotor;
1582#endif
1583#ifdef CONFIG_CGROUPS
1584	/* Control Group info protected by css_set_lock */
1585	struct css_set __rcu *cgroups;
1586	/* cg_list protected by css_set_lock and tsk->alloc_lock */
1587	struct list_head cg_list;
1588#endif
1589#ifdef CONFIG_FUTEX
1590	struct robust_list_head __user *robust_list;
1591#ifdef CONFIG_COMPAT
1592	struct compat_robust_list_head __user *compat_robust_list;
1593#endif
1594	struct list_head pi_state_list;
1595	struct futex_pi_state *pi_state_cache;
1596#endif
1597#ifdef CONFIG_PERF_EVENTS
1598	struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1599	struct mutex perf_event_mutex;
1600	struct list_head perf_event_list;
1601#endif
1602#ifdef CONFIG_DEBUG_PREEMPT
1603	unsigned long preempt_disable_ip;
1604#endif
1605#ifdef CONFIG_NUMA
1606	struct mempolicy *mempolicy;	/* Protected by alloc_lock */
1607	short il_next;
1608	short pref_node_fork;
1609#endif
1610#ifdef CONFIG_NUMA_BALANCING
1611	int numa_scan_seq;
1612	unsigned int numa_scan_period;
1613	unsigned int numa_scan_period_max;
1614	int numa_preferred_nid;
1615	unsigned long numa_migrate_retry;
1616	u64 node_stamp;			/* migration stamp  */
1617	u64 last_task_numa_placement;
1618	u64 last_sum_exec_runtime;
1619	struct callback_head numa_work;
1620
1621	struct list_head numa_entry;
1622	struct numa_group *numa_group;
1623
1624	/*
1625	 * numa_faults is an array split into four regions:
1626	 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
1627	 * in this precise order.
1628	 *
1629	 * faults_memory: Exponential decaying average of faults on a per-node
1630	 * basis. Scheduling placement decisions are made based on these
1631	 * counts. The values remain static for the duration of a PTE scan.
1632	 * faults_cpu: Track the nodes the process was running on when a NUMA
1633	 * hinting fault was incurred.
1634	 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
1635	 * during the current scan window. When the scan completes, the counts
1636	 * in faults_memory and faults_cpu decay and these values are copied.
1637	 */
1638	unsigned long *numa_faults;
1639	unsigned long total_numa_faults;
1640
1641	/*
1642	 * numa_faults_locality tracks if faults recorded during the last
1643	 * scan window were remote/local or failed to migrate. The task scan
1644	 * period is adapted based on the locality of the faults with different
1645	 * weights depending on whether they were shared or private faults
1646	 */
1647	unsigned long numa_faults_locality[3];
1648
1649	unsigned long numa_pages_migrated;
1650#endif /* CONFIG_NUMA_BALANCING */
1651
1652	struct rcu_head rcu;
1653
1654	/*
1655	 * cache last used pipe for splice
1656	 */
1657	struct pipe_inode_info *splice_pipe;
1658
1659	struct page_frag task_frag;
1660
1661#ifdef	CONFIG_TASK_DELAY_ACCT
1662	struct task_delay_info *delays;
1663#endif
1664#ifdef CONFIG_FAULT_INJECTION
1665	int make_it_fail;
1666#endif
1667	/*
1668	 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
1669	 * balance_dirty_pages() for some dirty throttling pause
1670	 */
1671	int nr_dirtied;
1672	int nr_dirtied_pause;
1673	unsigned long dirty_paused_when; /* start of a write-and-pause period */
1674
1675#ifdef CONFIG_LATENCYTOP
1676	int latency_record_count;
1677	struct latency_record latency_record[LT_SAVECOUNT];
1678#endif
1679	/*
1680	 * time slack values; these are used to round up poll() and
1681	 * select() etc timeout values. These are in nanoseconds.
1682	 */
1683	unsigned long timer_slack_ns;
1684	unsigned long default_timer_slack_ns;
1685
1686#ifdef CONFIG_KASAN
1687	unsigned int kasan_depth;
1688#endif
1689#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1690	/* Index of current stored address in ret_stack */
1691	int curr_ret_stack;
1692	/* Stack of return addresses for return function tracing */
1693	struct ftrace_ret_stack	*ret_stack;
1694	/* time stamp for last schedule */
1695	unsigned long long ftrace_timestamp;
1696	/*
1697	 * Number of functions that haven't been traced
1698	 * because of depth overrun.
1699	 */
1700	atomic_t trace_overrun;
1701	/* Pause for the tracing */
1702	atomic_t tracing_graph_pause;
1703#endif
1704#ifdef CONFIG_TRACING
1705	/* state flags for use by tracers */
1706	unsigned long trace;
1707	/* bitmask and counter of trace recursion */
1708	unsigned long trace_recursion;
1709#endif /* CONFIG_TRACING */
1710#ifdef CONFIG_MEMCG
1711	struct memcg_oom_info {
1712		struct mem_cgroup *memcg;
1713		gfp_t gfp_mask;
1714		int order;
1715		unsigned int may_oom:1;
1716	} memcg_oom;
1717#endif
1718#ifdef CONFIG_UPROBES
1719	struct uprobe_task *utask;
1720#endif
1721#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1722	unsigned int	sequential_io;
1723	unsigned int	sequential_io_avg;
1724#endif
1725#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1726	unsigned long	task_state_change;
1727#endif
1728};
1729
1730/* Future-safe accessor for struct task_struct's cpus_allowed. */
1731#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1732
1733#define TNF_MIGRATED	0x01
1734#define TNF_NO_GROUP	0x02
1735#define TNF_SHARED	0x04
1736#define TNF_FAULT_LOCAL	0x08
1737#define TNF_MIGRATE_FAIL 0x10
1738
1739#ifdef CONFIG_NUMA_BALANCING
1740extern void task_numa_fault(int last_node, int node, int pages, int flags);
1741extern pid_t task_numa_group_id(struct task_struct *p);
1742extern void set_numabalancing_state(bool enabled);
1743extern void task_numa_free(struct task_struct *p);
1744extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
1745					int src_nid, int dst_cpu);
1746#else
1747static inline void task_numa_fault(int last_node, int node, int pages,
1748				   int flags)
1749{
1750}
1751static inline pid_t task_numa_group_id(struct task_struct *p)
1752{
1753	return 0;
1754}
1755static inline void set_numabalancing_state(bool enabled)
1756{
1757}
1758static inline void task_numa_free(struct task_struct *p)
1759{
1760}
1761static inline bool should_numa_migrate_memory(struct task_struct *p,
1762				struct page *page, int src_nid, int dst_cpu)
1763{
1764	return true;
1765}
1766#endif
1767
1768static inline struct pid *task_pid(struct task_struct *task)
1769{
1770	return task->pids[PIDTYPE_PID].pid;
1771}
1772
1773static inline struct pid *task_tgid(struct task_struct *task)
1774{
1775	return task->group_leader->pids[PIDTYPE_PID].pid;
1776}
1777
1778/*
1779 * Without tasklist or rcu lock it is not safe to dereference
1780 * the result of task_pgrp/task_session even if task == current,
1781 * we can race with another thread doing sys_setsid/sys_setpgid.
1782 */
1783static inline struct pid *task_pgrp(struct task_struct *task)
1784{
1785	return task->group_leader->pids[PIDTYPE_PGID].pid;
1786}
1787
1788static inline struct pid *task_session(struct task_struct *task)
1789{
1790	return task->group_leader->pids[PIDTYPE_SID].pid;
1791}
1792
1793struct pid_namespace;
1794
1795/*
1796 * the helpers to get the task's different pids as they are seen
1797 * from various namespaces
1798 *
1799 * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
1800 * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
1801 *                     current.
1802 * task_xid_nr_ns()  : id seen from the ns specified;
1803 *
1804 * set_task_vxid()   : assigns a virtual id to a task;
1805 *
1806 * see also pid_nr() etc in include/linux/pid.h
1807 */
1808pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1809			struct pid_namespace *ns);
1810
1811static inline pid_t task_pid_nr(struct task_struct *tsk)
1812{
1813	return tsk->pid;
1814}
1815
1816static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1817					struct pid_namespace *ns)
1818{
1819	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1820}
1821
1822static inline pid_t task_pid_vnr(struct task_struct *tsk)
1823{
1824	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1825}
1826
1827
1828static inline pid_t task_tgid_nr(struct task_struct *tsk)
1829{
1830	return tsk->tgid;
1831}
1832
1833pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1834
1835static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1836{
1837	return pid_vnr(task_tgid(tsk));
1838}
1839
1840
1841static inline int pid_alive(const struct task_struct *p);
1842static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1843{
1844	pid_t pid = 0;
1845
1846	rcu_read_lock();
1847	if (pid_alive(tsk))
1848		pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1849	rcu_read_unlock();
1850
1851	return pid;
1852}
1853
1854static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1855{
1856	return task_ppid_nr_ns(tsk, &init_pid_ns);
1857}
1858
1859static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1860					struct pid_namespace *ns)
1861{
1862	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1863}
1864
1865static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1866{
1867	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1868}
1869
1870
1871static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1872					struct pid_namespace *ns)
1873{
1874	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1875}
1876
1877static inline pid_t task_session_vnr(struct task_struct *tsk)
1878{
1879	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1880}
1881
1882/* obsolete, do not use */
1883static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1884{
1885	return task_pgrp_nr_ns(tsk, &init_pid_ns);
1886}
1887
1888/**
1889 * pid_alive - check that a task structure is not stale
1890 * @p: Task structure to be checked.
1891 *
1892 * Test if a process is not yet dead (at most zombie state)
1893 * If pid_alive fails, then pointers within the task structure
1894 * can be stale and must not be dereferenced.
1895 *
1896 * Return: 1 if the process is alive. 0 otherwise.
1897 */
1898static inline int pid_alive(const struct task_struct *p)
1899{
1900	return p->pids[PIDTYPE_PID].pid != NULL;
1901}
1902
1903/**
1904 * is_global_init - check if a task structure is init
1905 * @tsk: Task structure to be checked.
1906 *
1907 * Check if a task structure is the first user space task the kernel created.
1908 *
1909 * Return: 1 if the task structure is init. 0 otherwise.
1910 */
1911static inline int is_global_init(struct task_struct *tsk)
1912{
1913	return tsk->pid == 1;
1914}
1915
1916extern struct pid *cad_pid;
1917
1918extern void free_task(struct task_struct *tsk);
1919#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1920
1921extern void __put_task_struct(struct task_struct *t);
1922
1923static inline void put_task_struct(struct task_struct *t)
1924{
1925	if (atomic_dec_and_test(&t->usage))
1926		__put_task_struct(t);
1927}
1928
1929#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1930extern void task_cputime(struct task_struct *t,
1931			 cputime_t *utime, cputime_t *stime);
1932extern void task_cputime_scaled(struct task_struct *t,
1933				cputime_t *utimescaled, cputime_t *stimescaled);
1934extern cputime_t task_gtime(struct task_struct *t);
1935#else
1936static inline void task_cputime(struct task_struct *t,
1937				cputime_t *utime, cputime_t *stime)
1938{
1939	if (utime)
1940		*utime = t->utime;
1941	if (stime)
1942		*stime = t->stime;
1943}
1944
1945static inline void task_cputime_scaled(struct task_struct *t,
1946				       cputime_t *utimescaled,
1947				       cputime_t *stimescaled)
1948{
1949	if (utimescaled)
1950		*utimescaled = t->utimescaled;
1951	if (stimescaled)
1952		*stimescaled = t->stimescaled;
1953}
1954
1955static inline cputime_t task_gtime(struct task_struct *t)
1956{
1957	return t->gtime;
1958}
1959#endif
1960extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1961extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1962
1963/*
1964 * Per process flags
1965 */
1966#define PF_EXITING	0x00000004	/* getting shut down */
1967#define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
1968#define PF_VCPU		0x00000010	/* I'm a virtual CPU */
1969#define PF_WQ_WORKER	0x00000020	/* I'm a workqueue worker */
1970#define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
1971#define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
1972#define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
1973#define PF_DUMPCORE	0x00000200	/* dumped core */
1974#define PF_SIGNALED	0x00000400	/* killed by a signal */
1975#define PF_MEMALLOC	0x00000800	/* Allocating memory */
1976#define PF_NPROC_EXCEEDED 0x00001000	/* set_user noticed that RLIMIT_NPROC was exceeded */
1977#define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
1978#define PF_USED_ASYNC	0x00004000	/* used async_schedule*(), used by module init */
1979#define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
1980#define PF_FROZEN	0x00010000	/* frozen for system suspend */
1981#define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
1982#define PF_KSWAPD	0x00040000	/* I am kswapd */
1983#define PF_MEMALLOC_NOIO 0x00080000	/* Allocating memory without IO involved */
1984#define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
1985#define PF_KTHREAD	0x00200000	/* I am a kernel thread */
1986#define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
1987#define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
1988#define PF_NO_SETAFFINITY 0x04000000	/* Userland is not allowed to meddle with cpus_allowed */
1989#define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
1990#define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
1991#define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
1992#define PF_SUSPEND_TASK 0x80000000      /* this thread called freeze_processes and should not be frozen */
1993
1994/*
1995 * Only the _current_ task can read/write to tsk->flags, but other
1996 * tasks can access tsk->flags in readonly mode for example
1997 * with tsk_used_math (like during threaded core dumping).
1998 * There is however an exception to this rule during ptrace
1999 * or during fork: the ptracer task is allowed to write to the
2000 * child->flags of its traced child (same goes for fork, the parent
2001 * can write to the child->flags), because we're guaranteed the
2002 * child is not running and in turn not changing child->flags
2003 * at the same time the parent does it.
2004 */
2005#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
2006#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
2007#define clear_used_math() clear_stopped_child_used_math(current)
2008#define set_used_math() set_stopped_child_used_math(current)
2009#define conditional_stopped_child_used_math(condition, child) \
2010	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
2011#define conditional_used_math(condition) \
2012	conditional_stopped_child_used_math(condition, current)
2013#define copy_to_stopped_child_used_math(child) \
2014	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
2015/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
2016#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
2017#define used_math() tsk_used_math(current)
2018
2019/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
2020 * __GFP_FS is also cleared as it implies __GFP_IO.
2021 */
2022static inline gfp_t memalloc_noio_flags(gfp_t flags)
2023{
2024	if (unlikely(current->flags & PF_MEMALLOC_NOIO))
2025		flags &= ~(__GFP_IO | __GFP_FS);
2026	return flags;
2027}
2028
2029static inline unsigned int memalloc_noio_save(void)
2030{
2031	unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
2032	current->flags |= PF_MEMALLOC_NOIO;
2033	return flags;
2034}
2035
2036static inline void memalloc_noio_restore(unsigned int flags)
2037{
2038	current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
2039}
2040
2041/* Per-process atomic flags. */
2042#define PFA_NO_NEW_PRIVS 0	/* May not gain new privileges. */
2043#define PFA_SPREAD_PAGE  1      /* Spread page cache over cpuset */
2044#define PFA_SPREAD_SLAB  2      /* Spread some slab caches over cpuset */
2045
2046
2047#define TASK_PFA_TEST(name, func)					\
2048	static inline bool task_##func(struct task_struct *p)		\
2049	{ return test_bit(PFA_##name, &p->atomic_flags); }
2050#define TASK_PFA_SET(name, func)					\
2051	static inline void task_set_##func(struct task_struct *p)	\
2052	{ set_bit(PFA_##name, &p->atomic_flags); }
2053#define TASK_PFA_CLEAR(name, func)					\
2054	static inline void task_clear_##func(struct task_struct *p)	\
2055	{ clear_bit(PFA_##name, &p->atomic_flags); }
2056
2057TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
2058TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
2059
2060TASK_PFA_TEST(SPREAD_PAGE, spread_page)
2061TASK_PFA_SET(SPREAD_PAGE, spread_page)
2062TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
2063
2064TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
2065TASK_PFA_SET(SPREAD_SLAB, spread_slab)
2066TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
2067
2068/*
2069 * task->jobctl flags
2070 */
2071#define JOBCTL_STOP_SIGMASK	0xffff	/* signr of the last group stop */
2072
2073#define JOBCTL_STOP_DEQUEUED_BIT 16	/* stop signal dequeued */
2074#define JOBCTL_STOP_PENDING_BIT	17	/* task should stop for group stop */
2075#define JOBCTL_STOP_CONSUME_BIT	18	/* consume group stop count */
2076#define JOBCTL_TRAP_STOP_BIT	19	/* trap for STOP */
2077#define JOBCTL_TRAP_NOTIFY_BIT	20	/* trap for NOTIFY */
2078#define JOBCTL_TRAPPING_BIT	21	/* switching to TRACED */
2079#define JOBCTL_LISTENING_BIT	22	/* ptracer is listening for events */
2080
2081#define JOBCTL_STOP_DEQUEUED	(1 << JOBCTL_STOP_DEQUEUED_BIT)
2082#define JOBCTL_STOP_PENDING	(1 << JOBCTL_STOP_PENDING_BIT)
2083#define JOBCTL_STOP_CONSUME	(1 << JOBCTL_STOP_CONSUME_BIT)
2084#define JOBCTL_TRAP_STOP	(1 << JOBCTL_TRAP_STOP_BIT)
2085#define JOBCTL_TRAP_NOTIFY	(1 << JOBCTL_TRAP_NOTIFY_BIT)
2086#define JOBCTL_TRAPPING		(1 << JOBCTL_TRAPPING_BIT)
2087#define JOBCTL_LISTENING	(1 << JOBCTL_LISTENING_BIT)
2088
2089#define JOBCTL_TRAP_MASK	(JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
2090#define JOBCTL_PENDING_MASK	(JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
2091
2092extern bool task_set_jobctl_pending(struct task_struct *task,
2093				    unsigned int mask);
2094extern void task_clear_jobctl_trapping(struct task_struct *task);
2095extern void task_clear_jobctl_pending(struct task_struct *task,
2096				      unsigned int mask);
2097
2098static inline void rcu_copy_process(struct task_struct *p)
2099{
2100#ifdef CONFIG_PREEMPT_RCU
2101	p->rcu_read_lock_nesting = 0;
2102	p->rcu_read_unlock_special.s = 0;
2103	p->rcu_blocked_node = NULL;
2104	INIT_LIST_HEAD(&p->rcu_node_entry);
2105#endif /* #ifdef CONFIG_PREEMPT_RCU */
2106#ifdef CONFIG_TASKS_RCU
2107	p->rcu_tasks_holdout = false;
2108	INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
2109	p->rcu_tasks_idle_cpu = -1;
2110#endif /* #ifdef CONFIG_TASKS_RCU */
2111}
2112
2113static inline void tsk_restore_flags(struct task_struct *task,
2114				unsigned long orig_flags, unsigned long flags)
2115{
2116	task->flags &= ~flags;
2117	task->flags |= orig_flags & flags;
2118}
2119
2120extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
2121				     const struct cpumask *trial);
2122extern int task_can_attach(struct task_struct *p,
2123			   const struct cpumask *cs_cpus_allowed);
2124#ifdef CONFIG_SMP
2125extern void do_set_cpus_allowed(struct task_struct *p,
2126			       const struct cpumask *new_mask);
2127
2128extern int set_cpus_allowed_ptr(struct task_struct *p,
2129				const struct cpumask *new_mask);
2130#else
2131static inline void do_set_cpus_allowed(struct task_struct *p,
2132				      const struct cpumask *new_mask)
2133{
2134}
2135static inline int set_cpus_allowed_ptr(struct task_struct *p,
2136				       const struct cpumask *new_mask)
2137{
2138	if (!cpumask_test_cpu(0, new_mask))
2139		return -EINVAL;
2140	return 0;
2141}
2142#endif
2143
2144#ifdef CONFIG_NO_HZ_COMMON
2145void calc_load_enter_idle(void);
2146void calc_load_exit_idle(void);
2147#else
2148static inline void calc_load_enter_idle(void) { }
2149static inline void calc_load_exit_idle(void) { }
2150#endif /* CONFIG_NO_HZ_COMMON */
2151
2152#ifndef CONFIG_CPUMASK_OFFSTACK
2153static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
2154{
2155	return set_cpus_allowed_ptr(p, &new_mask);
2156}
2157#endif
2158
2159/*
2160 * Do not use outside of architecture code which knows its limitations.
2161 *
2162 * sched_clock() has no promise of monotonicity or bounded drift between
2163 * CPUs, use (which you should not) requires disabling IRQs.
2164 *
2165 * Please use one of the three interfaces below.
2166 */
2167extern unsigned long long notrace sched_clock(void);
2168/*
2169 * See the comment in kernel/sched/clock.c
2170 */
2171extern u64 cpu_clock(int cpu);
2172extern u64 local_clock(void);
2173extern u64 running_clock(void);
2174extern u64 sched_clock_cpu(int cpu);
2175
2176
2177extern void sched_clock_init(void);
2178
2179#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2180static inline void sched_clock_tick(void)
2181{
2182}
2183
2184static inline void sched_clock_idle_sleep_event(void)
2185{
2186}
2187
2188static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
2189{
2190}
2191#else
2192/*
2193 * Architectures can set this to 1 if they have specified
2194 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
2195 * but then during bootup it turns out that sched_clock()
2196 * is reliable after all:
2197 */
2198extern int sched_clock_stable(void);
2199extern void set_sched_clock_stable(void);
2200extern void clear_sched_clock_stable(void);
2201
2202extern void sched_clock_tick(void);
2203extern void sched_clock_idle_sleep_event(void);
2204extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2205#endif
2206
2207#ifdef CONFIG_IRQ_TIME_ACCOUNTING
2208/*
2209 * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
2210 * The reason for this explicit opt-in is not to have perf penalty with
2211 * slow sched_clocks.
2212 */
2213extern void enable_sched_clock_irqtime(void);
2214extern void disable_sched_clock_irqtime(void);
2215#else
2216static inline void enable_sched_clock_irqtime(void) {}
2217static inline void disable_sched_clock_irqtime(void) {}
2218#endif
2219
2220extern unsigned long long
2221task_sched_runtime(struct task_struct *task);
2222
2223/* sched_exec is called by processes performing an exec */
2224#ifdef CONFIG_SMP
2225extern void sched_exec(void);
2226#else
2227#define sched_exec()   {}
2228#endif
2229
2230extern void sched_clock_idle_sleep_event(void);
2231extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2232
2233#ifdef CONFIG_HOTPLUG_CPU
2234extern void idle_task_exit(void);
2235#else
2236static inline void idle_task_exit(void) {}
2237#endif
2238
2239#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
2240extern void wake_up_nohz_cpu(int cpu);
2241#else
2242static inline void wake_up_nohz_cpu(int cpu) { }
2243#endif
2244
2245#ifdef CONFIG_NO_HZ_FULL
2246extern bool sched_can_stop_tick(void);
2247extern u64 scheduler_tick_max_deferment(void);
2248#else
2249static inline bool sched_can_stop_tick(void) { return false; }
2250#endif
2251
2252#ifdef CONFIG_SCHED_AUTOGROUP
2253extern void sched_autogroup_create_attach(struct task_struct *p);
2254extern void sched_autogroup_detach(struct task_struct *p);
2255extern void sched_autogroup_fork(struct signal_struct *sig);
2256extern void sched_autogroup_exit(struct signal_struct *sig);
2257#ifdef CONFIG_PROC_FS
2258extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2259extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2260#endif
2261#else
2262static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2263static inline void sched_autogroup_detach(struct task_struct *p) { }
2264static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2265static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2266#endif
2267
2268extern int yield_to(struct task_struct *p, bool preempt);
2269extern void set_user_nice(struct task_struct *p, long nice);
2270extern int task_prio(const struct task_struct *p);
2271/**
2272 * task_nice - return the nice value of a given task.
2273 * @p: the task in question.
2274 *
2275 * Return: The nice value [ -20 ... 0 ... 19 ].
2276 */
2277static inline int task_nice(const struct task_struct *p)
2278{
2279	return PRIO_TO_NICE((p)->static_prio);
2280}
2281extern int can_nice(const struct task_struct *p, const int nice);
2282extern int task_curr(const struct task_struct *p);
2283extern int idle_cpu(int cpu);
2284extern int sched_setscheduler(struct task_struct *, int,
2285			      const struct sched_param *);
2286extern int sched_setscheduler_nocheck(struct task_struct *, int,
2287				      const struct sched_param *);
2288extern int sched_setattr(struct task_struct *,
2289			 const struct sched_attr *);
2290extern struct task_struct *idle_task(int cpu);
2291/**
2292 * is_idle_task - is the specified task an idle task?
2293 * @p: the task in question.
2294 *
2295 * Return: 1 if @p is an idle task. 0 otherwise.
2296 */
2297static inline bool is_idle_task(const struct task_struct *p)
2298{
2299	return p->pid == 0;
2300}
2301extern struct task_struct *curr_task(int cpu);
2302extern void set_curr_task(int cpu, struct task_struct *p);
2303
2304void yield(void);
2305
2306union thread_union {
2307	struct thread_info thread_info;
2308	unsigned long stack[THREAD_SIZE/sizeof(long)];
2309};
2310
2311#ifndef __HAVE_ARCH_KSTACK_END
2312static inline int kstack_end(void *addr)
2313{
2314	/* Reliable end of stack detection:
2315	 * Some APM bios versions misalign the stack
2316	 */
2317	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2318}
2319#endif
2320
2321extern union thread_union init_thread_union;
2322extern struct task_struct init_task;
2323
2324extern struct   mm_struct init_mm;
2325
2326extern struct pid_namespace init_pid_ns;
2327
2328/*
2329 * find a task by one of its numerical ids
2330 *
2331 * find_task_by_pid_ns():
2332 *      finds a task by its pid in the specified namespace
2333 * find_task_by_vpid():
2334 *      finds a task by its virtual pid
2335 *
2336 * see also find_vpid() etc in include/linux/pid.h
2337 */
2338
2339extern struct task_struct *find_task_by_vpid(pid_t nr);
2340extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2341		struct pid_namespace *ns);
2342
2343/* per-UID process charging. */
2344extern struct user_struct * alloc_uid(kuid_t);
2345static inline struct user_struct *get_uid(struct user_struct *u)
2346{
2347	atomic_inc(&u->__count);
2348	return u;
2349}
2350extern void free_uid(struct user_struct *);
2351
2352#include <asm/current.h>
2353
2354extern void xtime_update(unsigned long ticks);
2355
2356extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2357extern int wake_up_process(struct task_struct *tsk);
2358extern void wake_up_new_task(struct task_struct *tsk);
2359#ifdef CONFIG_SMP
2360 extern void kick_process(struct task_struct *tsk);
2361#else
2362 static inline void kick_process(struct task_struct *tsk) { }
2363#endif
2364extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
2365extern void sched_dead(struct task_struct *p);
2366
2367extern void proc_caches_init(void);
2368extern void flush_signals(struct task_struct *);
2369extern void __flush_signals(struct task_struct *);
2370extern void ignore_signals(struct task_struct *);
2371extern void flush_signal_handlers(struct task_struct *, int force_default);
2372extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2373
2374static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
2375{
2376	unsigned long flags;
2377	int ret;
2378
2379	spin_lock_irqsave(&tsk->sighand->siglock, flags);
2380	ret = dequeue_signal(tsk, mask, info);
2381	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
2382
2383	return ret;
2384}
2385
2386extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2387			      sigset_t *mask);
2388extern void unblock_all_signals(void);
2389extern void release_task(struct task_struct * p);
2390extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2391extern int force_sigsegv(int, struct task_struct *);
2392extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2393extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2394extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2395extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2396				const struct cred *, u32);
2397extern int kill_pgrp(struct pid *pid, int sig, int priv);
2398extern int kill_pid(struct pid *pid, int sig, int priv);
2399extern int kill_proc_info(int, struct siginfo *, pid_t);
2400extern __must_check bool do_notify_parent(struct task_struct *, int);
2401extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2402extern void force_sig(int, struct task_struct *);
2403extern int send_sig(int, struct task_struct *, int);
2404extern int zap_other_threads(struct task_struct *p);
2405extern struct sigqueue *sigqueue_alloc(void);
2406extern void sigqueue_free(struct sigqueue *);
2407extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
2408extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2409
2410static inline void restore_saved_sigmask(void)
2411{
2412	if (test_and_clear_restore_sigmask())
2413		__set_current_blocked(&current->saved_sigmask);
2414}
2415
2416static inline sigset_t *sigmask_to_save(void)
2417{
2418	sigset_t *res = &current->blocked;
2419	if (unlikely(test_restore_sigmask()))
2420		res = &current->saved_sigmask;
2421	return res;
2422}
2423
2424static inline int kill_cad_pid(int sig, int priv)
2425{
2426	return kill_pid(cad_pid, sig, priv);
2427}
2428
2429/* These can be the second arg to send_sig_info/send_group_sig_info.  */
2430#define SEND_SIG_NOINFO ((struct siginfo *) 0)
2431#define SEND_SIG_PRIV	((struct siginfo *) 1)
2432#define SEND_SIG_FORCED	((struct siginfo *) 2)
2433
2434/*
2435 * True if we are on the alternate signal stack.
2436 */
2437static inline int on_sig_stack(unsigned long sp)
2438{
2439#ifdef CONFIG_STACK_GROWSUP
2440	return sp >= current->sas_ss_sp &&
2441		sp - current->sas_ss_sp < current->sas_ss_size;
2442#else
2443	return sp > current->sas_ss_sp &&
2444		sp - current->sas_ss_sp <= current->sas_ss_size;
2445#endif
2446}
2447
2448static inline int sas_ss_flags(unsigned long sp)
2449{
2450	if (!current->sas_ss_size)
2451		return SS_DISABLE;
2452
2453	return on_sig_stack(sp) ? SS_ONSTACK : 0;
2454}
2455
2456static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
2457{
2458	if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
2459#ifdef CONFIG_STACK_GROWSUP
2460		return current->sas_ss_sp;
2461#else
2462		return current->sas_ss_sp + current->sas_ss_size;
2463#endif
2464	return sp;
2465}
2466
2467/*
2468 * Routines for handling mm_structs
2469 */
2470extern struct mm_struct * mm_alloc(void);
2471
2472/* mmdrop drops the mm and the page tables */
2473extern void __mmdrop(struct mm_struct *);
2474static inline void mmdrop(struct mm_struct * mm)
2475{
2476	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2477		__mmdrop(mm);
2478}
2479
2480/* mmput gets rid of the mappings and all user-space */
2481extern void mmput(struct mm_struct *);
2482/* Grab a reference to a task's mm, if it is not already going away */
2483extern struct mm_struct *get_task_mm(struct task_struct *task);
2484/*
2485 * Grab a reference to a task's mm, if it is not already going away
2486 * and ptrace_may_access with the mode parameter passed to it
2487 * succeeds.
2488 */
2489extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2490/* Remove the current tasks stale references to the old mm_struct */
2491extern void mm_release(struct task_struct *, struct mm_struct *);
2492
2493extern int copy_thread(unsigned long, unsigned long, unsigned long,
2494			struct task_struct *);
2495extern void flush_thread(void);
2496extern void exit_thread(void);
2497
2498extern void exit_files(struct task_struct *);
2499extern void __cleanup_sighand(struct sighand_struct *);
2500
2501extern void exit_itimers(struct signal_struct *);
2502extern void flush_itimer_signals(void);
2503
2504extern void do_group_exit(int);
2505
2506extern int do_execve(struct filename *,
2507		     const char __user * const __user *,
2508		     const char __user * const __user *);
2509extern int do_execveat(int, struct filename *,
2510		       const char __user * const __user *,
2511		       const char __user * const __user *,
2512		       int);
2513extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
2514struct task_struct *fork_idle(int);
2515extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
2516
2517extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
2518static inline void set_task_comm(struct task_struct *tsk, const char *from)
2519{
2520	__set_task_comm(tsk, from, false);
2521}
2522extern char *get_task_comm(char *to, struct task_struct *tsk);
2523
2524#ifdef CONFIG_SMP
2525void scheduler_ipi(void);
2526extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2527#else
2528static inline void scheduler_ipi(void) { }
2529static inline unsigned long wait_task_inactive(struct task_struct *p,
2530					       long match_state)
2531{
2532	return 1;
2533}
2534#endif
2535
2536#define next_task(p) \
2537	list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2538
2539#define for_each_process(p) \
2540	for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2541
2542extern bool current_is_single_threaded(void);
2543
2544/*
2545 * Careful: do_each_thread/while_each_thread is a double loop so
2546 *          'break' will not work as expected - use goto instead.
2547 */
2548#define do_each_thread(g, t) \
2549	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2550
2551#define while_each_thread(g, t) \
2552	while ((t = next_thread(t)) != g)
2553
2554#define __for_each_thread(signal, t)	\
2555	list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
2556
2557#define for_each_thread(p, t)		\
2558	__for_each_thread((p)->signal, t)
2559
2560/* Careful: this is a double loop, 'break' won't work as expected. */
2561#define for_each_process_thread(p, t)	\
2562	for_each_process(p) for_each_thread(p, t)
2563
2564static inline int get_nr_threads(struct task_struct *tsk)
2565{
2566	return tsk->signal->nr_threads;
2567}
2568
2569static inline bool thread_group_leader(struct task_struct *p)
2570{
2571	return p->exit_signal >= 0;
2572}
2573
2574/* Do to the insanities of de_thread it is possible for a process
2575 * to have the pid of the thread group leader without actually being
2576 * the thread group leader.  For iteration through the pids in proc
2577 * all we care about is that we have a task with the appropriate
2578 * pid, we don't actually care if we have the right task.
2579 */
2580static inline bool has_group_leader_pid(struct task_struct *p)
2581{
2582	return task_pid(p) == p->signal->leader_pid;
2583}
2584
2585static inline
2586bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
2587{
2588	return p1->signal == p2->signal;
2589}
2590
2591static inline struct task_struct *next_thread(const struct task_struct *p)
2592{
2593	return list_entry_rcu(p->thread_group.next,
2594			      struct task_struct, thread_group);
2595}
2596
2597static inline int thread_group_empty(struct task_struct *p)
2598{
2599	return list_empty(&p->thread_group);
2600}
2601
2602#define delay_group_leader(p) \
2603		(thread_group_leader(p) && !thread_group_empty(p))
2604
2605/*
2606 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2607 * subscriptions and synchronises with wait4().  Also used in procfs.  Also
2608 * pins the final release of task.io_context.  Also protects ->cpuset and
2609 * ->cgroup.subsys[]. And ->vfork_done.
2610 *
2611 * Nests both inside and outside of read_lock(&tasklist_lock).
2612 * It must not be nested with write_lock_irq(&tasklist_lock),
2613 * neither inside nor outside.
2614 */
2615static inline void task_lock(struct task_struct *p)
2616{
2617	spin_lock(&p->alloc_lock);
2618}
2619
2620static inline void task_unlock(struct task_struct *p)
2621{
2622	spin_unlock(&p->alloc_lock);
2623}
2624
2625extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2626							unsigned long *flags);
2627
2628static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
2629						       unsigned long *flags)
2630{
2631	struct sighand_struct *ret;
2632
2633	ret = __lock_task_sighand(tsk, flags);
2634	(void)__cond_lock(&tsk->sighand->siglock, ret);
2635	return ret;
2636}
2637
2638static inline void unlock_task_sighand(struct task_struct *tsk,
2639						unsigned long *flags)
2640{
2641	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2642}
2643
2644#ifdef CONFIG_CGROUPS
2645static inline void threadgroup_change_begin(struct task_struct *tsk)
2646{
2647	down_read(&tsk->signal->group_rwsem);
2648}
2649static inline void threadgroup_change_end(struct task_struct *tsk)
2650{
2651	up_read(&tsk->signal->group_rwsem);
2652}
2653
2654/**
2655 * threadgroup_lock - lock threadgroup
2656 * @tsk: member task of the threadgroup to lock
2657 *
2658 * Lock the threadgroup @tsk belongs to.  No new task is allowed to enter
2659 * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
2660 * change ->group_leader/pid.  This is useful for cases where the threadgroup
2661 * needs to stay stable across blockable operations.
2662 *
2663 * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
2664 * synchronization.  While held, no new task will be added to threadgroup
2665 * and no existing live task will have its PF_EXITING set.
2666 *
2667 * de_thread() does threadgroup_change_{begin|end}() when a non-leader
2668 * sub-thread becomes a new leader.
2669 */
2670static inline void threadgroup_lock(struct task_struct *tsk)
2671{
2672	down_write(&tsk->signal->group_rwsem);
2673}
2674
2675/**
2676 * threadgroup_unlock - unlock threadgroup
2677 * @tsk: member task of the threadgroup to unlock
2678 *
2679 * Reverse threadgroup_lock().
2680 */
2681static inline void threadgroup_unlock(struct task_struct *tsk)
2682{
2683	up_write(&tsk->signal->group_rwsem);
2684}
2685#else
2686static inline void threadgroup_change_begin(struct task_struct *tsk) {}
2687static inline void threadgroup_change_end(struct task_struct *tsk) {}
2688static inline void threadgroup_lock(struct task_struct *tsk) {}
2689static inline void threadgroup_unlock(struct task_struct *tsk) {}
2690#endif
2691
2692#ifndef __HAVE_THREAD_FUNCTIONS
2693
2694#define task_thread_info(task)	((struct thread_info *)(task)->stack)
2695#define task_stack_page(task)	((task)->stack)
2696
2697static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
2698{
2699	*task_thread_info(p) = *task_thread_info(org);
2700	task_thread_info(p)->task = p;
2701}
2702
2703/*
2704 * Return the address of the last usable long on the stack.
2705 *
2706 * When the stack grows down, this is just above the thread
2707 * info struct. Going any lower will corrupt the threadinfo.
2708 *
2709 * When the stack grows up, this is the highest address.
2710 * Beyond that position, we corrupt data on the next page.
2711 */
2712static inline unsigned long *end_of_stack(struct task_struct *p)
2713{
2714#ifdef CONFIG_STACK_GROWSUP
2715	return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
2716#else
2717	return (unsigned long *)(task_thread_info(p) + 1);
2718#endif
2719}
2720
2721#endif
2722#define task_stack_end_corrupted(task) \
2723		(*(end_of_stack(task)) != STACK_END_MAGIC)
2724
2725static inline int object_is_on_stack(void *obj)
2726{
2727	void *stack = task_stack_page(current);
2728
2729	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2730}
2731
2732extern void thread_info_cache_init(void);
2733
2734#ifdef CONFIG_DEBUG_STACK_USAGE
2735static inline unsigned long stack_not_used(struct task_struct *p)
2736{
2737	unsigned long *n = end_of_stack(p);
2738
2739	do { 	/* Skip over canary */
2740		n++;
2741	} while (!*n);
2742
2743	return (unsigned long)n - (unsigned long)end_of_stack(p);
2744}
2745#endif
2746extern void set_task_stack_end_magic(struct task_struct *tsk);
2747
2748/* set thread flags in other task's structures
2749 * - see asm/thread_info.h for TIF_xxxx flags available
2750 */
2751static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2752{
2753	set_ti_thread_flag(task_thread_info(tsk), flag);
2754}
2755
2756static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2757{
2758	clear_ti_thread_flag(task_thread_info(tsk), flag);
2759}
2760
2761static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2762{
2763	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2764}
2765
2766static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2767{
2768	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2769}
2770
2771static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2772{
2773	return test_ti_thread_flag(task_thread_info(tsk), flag);
2774}
2775
2776static inline void set_tsk_need_resched(struct task_struct *tsk)
2777{
2778	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2779}
2780
2781static inline void clear_tsk_need_resched(struct task_struct *tsk)
2782{
2783	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2784}
2785
2786static inline int test_tsk_need_resched(struct task_struct *tsk)
2787{
2788	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2789}
2790
2791static inline int restart_syscall(void)
2792{
2793	set_tsk_thread_flag(current, TIF_SIGPENDING);
2794	return -ERESTARTNOINTR;
2795}
2796
2797static inline int signal_pending(struct task_struct *p)
2798{
2799	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2800}
2801
2802static inline int __fatal_signal_pending(struct task_struct *p)
2803{
2804	return unlikely(sigismember(&p->pending.signal, SIGKILL));
2805}
2806
2807static inline int fatal_signal_pending(struct task_struct *p)
2808{
2809	return signal_pending(p) && __fatal_signal_pending(p);
2810}
2811
2812static inline int signal_pending_state(long state, struct task_struct *p)
2813{
2814	if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
2815		return 0;
2816	if (!signal_pending(p))
2817		return 0;
2818
2819	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2820}
2821
2822/*
2823 * cond_resched() and cond_resched_lock(): latency reduction via
2824 * explicit rescheduling in places that are safe. The return
2825 * value indicates whether a reschedule was done in fact.
2826 * cond_resched_lock() will drop the spinlock before scheduling,
2827 * cond_resched_softirq() will enable bhs before scheduling.
2828 */
2829extern int _cond_resched(void);
2830
2831#define cond_resched() ({			\
2832	___might_sleep(__FILE__, __LINE__, 0);	\
2833	_cond_resched();			\
2834})
2835
2836extern int __cond_resched_lock(spinlock_t *lock);
2837
2838#define cond_resched_lock(lock) ({				\
2839	___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
2840	__cond_resched_lock(lock);				\
2841})
2842
2843extern int __cond_resched_softirq(void);
2844
2845#define cond_resched_softirq() ({					\
2846	___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
2847	__cond_resched_softirq();					\
2848})
2849
2850static inline void cond_resched_rcu(void)
2851{
2852#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
2853	rcu_read_unlock();
2854	cond_resched();
2855	rcu_read_lock();
2856#endif
2857}
2858
2859/*
2860 * Does a critical section need to be broken due to another
2861 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
2862 * but a general need for low latency)
2863 */
2864static inline int spin_needbreak(spinlock_t *lock)
2865{
2866#ifdef CONFIG_PREEMPT
2867	return spin_is_contended(lock);
2868#else
2869	return 0;
2870#endif
2871}
2872
2873/*
2874 * Idle thread specific functions to determine the need_resched
2875 * polling state.
2876 */
2877#ifdef TIF_POLLING_NRFLAG
2878static inline int tsk_is_polling(struct task_struct *p)
2879{
2880	return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
2881}
2882
2883static inline void __current_set_polling(void)
2884{
2885	set_thread_flag(TIF_POLLING_NRFLAG);
2886}
2887
2888static inline bool __must_check current_set_polling_and_test(void)
2889{
2890	__current_set_polling();
2891
2892	/*
2893	 * Polling state must be visible before we test NEED_RESCHED,
2894	 * paired by resched_curr()
2895	 */
2896	smp_mb__after_atomic();
2897
2898	return unlikely(tif_need_resched());
2899}
2900
2901static inline void __current_clr_polling(void)
2902{
2903	clear_thread_flag(TIF_POLLING_NRFLAG);
2904}
2905
2906static inline bool __must_check current_clr_polling_and_test(void)
2907{
2908	__current_clr_polling();
2909
2910	/*
2911	 * Polling state must be visible before we test NEED_RESCHED,
2912	 * paired by resched_curr()
2913	 */
2914	smp_mb__after_atomic();
2915
2916	return unlikely(tif_need_resched());
2917}
2918
2919#else
2920static inline int tsk_is_polling(struct task_struct *p) { return 0; }
2921static inline void __current_set_polling(void) { }
2922static inline void __current_clr_polling(void) { }
2923
2924static inline bool __must_check current_set_polling_and_test(void)
2925{
2926	return unlikely(tif_need_resched());
2927}
2928static inline bool __must_check current_clr_polling_and_test(void)
2929{
2930	return unlikely(tif_need_resched());
2931}
2932#endif
2933
2934static inline void current_clr_polling(void)
2935{
2936	__current_clr_polling();
2937
2938	/*
2939	 * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
2940	 * Once the bit is cleared, we'll get IPIs with every new
2941	 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
2942	 * fold.
2943	 */
2944	smp_mb(); /* paired with resched_curr() */
2945
2946	preempt_fold_need_resched();
2947}
2948
2949static __always_inline bool need_resched(void)
2950{
2951	return unlikely(tif_need_resched());
2952}
2953
2954/*
2955 * Thread group CPU time accounting.
2956 */
2957void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2958void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2959
2960static inline void thread_group_cputime_init(struct signal_struct *sig)
2961{
2962	raw_spin_lock_init(&sig->cputimer.lock);
2963}
2964
2965/*
2966 * Reevaluate whether the task has signals pending delivery.
2967 * Wake the task if so.
2968 * This is required every time the blocked sigset_t changes.
2969 * callers must hold sighand->siglock.
2970 */
2971extern void recalc_sigpending_and_wake(struct task_struct *t);
2972extern void recalc_sigpending(void);
2973
2974extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
2975
2976static inline void signal_wake_up(struct task_struct *t, bool resume)
2977{
2978	signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
2979}
2980static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
2981{
2982	signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
2983}
2984
2985/*
2986 * Wrappers for p->thread_info->cpu access. No-op on UP.
2987 */
2988#ifdef CONFIG_SMP
2989
2990static inline unsigned int task_cpu(const struct task_struct *p)
2991{
2992	return task_thread_info(p)->cpu;
2993}
2994
2995static inline int task_node(const struct task_struct *p)
2996{
2997	return cpu_to_node(task_cpu(p));
2998}
2999
3000extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
3001
3002#else
3003
3004static inline unsigned int task_cpu(const struct task_struct *p)
3005{
3006	return 0;
3007}
3008
3009static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
3010{
3011}
3012
3013#endif /* CONFIG_SMP */
3014
3015extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
3016extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
3017
3018#ifdef CONFIG_CGROUP_SCHED
3019extern struct task_group root_task_group;
3020#endif /* CONFIG_CGROUP_SCHED */
3021
3022extern int task_can_switch_user(struct user_struct *up,
3023					struct task_struct *tsk);
3024
3025#ifdef CONFIG_TASK_XACCT
3026static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3027{
3028	tsk->ioac.rchar += amt;
3029}
3030
3031static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3032{
3033	tsk->ioac.wchar += amt;
3034}
3035
3036static inline void inc_syscr(struct task_struct *tsk)
3037{
3038	tsk->ioac.syscr++;
3039}
3040
3041static inline void inc_syscw(struct task_struct *tsk)
3042{
3043	tsk->ioac.syscw++;
3044}
3045#else
3046static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3047{
3048}
3049
3050static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3051{
3052}
3053
3054static inline void inc_syscr(struct task_struct *tsk)
3055{
3056}
3057
3058static inline void inc_syscw(struct task_struct *tsk)
3059{
3060}
3061#endif
3062
3063#ifndef TASK_SIZE_OF
3064#define TASK_SIZE_OF(tsk)	TASK_SIZE
3065#endif
3066
3067#ifdef CONFIG_MEMCG
3068extern void mm_update_next_owner(struct mm_struct *mm);
3069#else
3070static inline void mm_update_next_owner(struct mm_struct *mm)
3071{
3072}
3073#endif /* CONFIG_MEMCG */
3074
3075static inline unsigned long task_rlimit(const struct task_struct *tsk,
3076		unsigned int limit)
3077{
3078	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
3079}
3080
3081static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
3082		unsigned int limit)
3083{
3084	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
3085}
3086
3087static inline unsigned long rlimit(unsigned int limit)
3088{
3089	return task_rlimit(current, limit);
3090}
3091
3092static inline unsigned long rlimit_max(unsigned int limit)
3093{
3094	return task_rlimit_max(current, limit);
3095}
3096
3097#endif
3098