Lines Matching defs:task

78  * sporadic time-constrained task. In such model a task is specified by:
84 * Very briefly, a periodic (sporadic) task asks for the execution of
94 * @sched_policy task's scheduling policy
96 * @sched_nice task's nice value (SCHED_NORMAL/BATCH)
97 * @sched_priority task's static priority (SCHED_FIFO/RR)
98 * @sched_deadline representative of the task's deadline
99 * @sched_runtime representative of the task's runtime
100 * @sched_period representative of the task's period
102 * Given this task model, there are a multiplicity of scheduling algorithms
196 * We have two separate sets of flags: task->state
197 * is about runnability, while task->exit_state are
198 * about the task exiting. Confusing, but this way
237 #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
238 #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
239 #define task_is_stopped_or_traced(task) \
240 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
241 #define task_contributes_to_load(task) \
242 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
243 (task->flags & PF_FROZEN) == 0)
361 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
362 * task), SP is the stack pointer of the first frame that should be shown in the back
363 * trace (or NULL if the entire call-chain of the task should be shown).
365 extern void show_stack(struct task_struct *task, unsigned long *sp);
546 * spent by the task from the scheduler point of view.
847 unsigned int flags; /* Private per-task flags */
912 #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
1128 * weight of the task.
1243 * they are continously updated during task execution. Note that
1254 * task has to wait for a replenishment to be performed at the
1265 * @dl_yielded tells if task gave up the cpu before consuming
1271 * Bandwidth enforcement timer. Each -deadline task has its
1272 * own bandwidth to be enforced, thus we need one timer per task.
1369 /* task state */
1416 * ptraced is the list of tasks this task is using ptrace on.
1456 const struct cred __rcu *real_cred; /* objective and real subjective task
1458 const struct cred __rcu *cred; /* effective (overridable) subjective task
1472 /* hung task detection */
1475 /* CPU-specific state of this task */
1516 /* PI waiters blocked on a rt_mutex held by this task */
1643 * scan window were remote/local or failed to migrate. The task scan
1768 static inline struct pid *task_pid(struct task_struct *task)
1770 return task->pids[PIDTYPE_PID].pid;
1773 static inline struct pid *task_tgid(struct task_struct *task)
1775 return task->group_leader->pids[PIDTYPE_PID].pid;
1780 * the result of task_pgrp/task_session even if task == current,
1783 static inline struct pid *task_pgrp(struct task_struct *task)
1785 return task->group_leader->pids[PIDTYPE_PGID].pid;
1788 static inline struct pid *task_session(struct task_struct *task)
1790 return task->group_leader->pids[PIDTYPE_SID].pid;
1796 * the helpers to get the task's different pids as they are seen
1804 * set_task_vxid() : assigns a virtual id to a task;
1808 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1889 * pid_alive - check that a task structure is not stale
1893 * If pid_alive fails, then pointers within the task structure
1904 * is_global_init - check if a task structure is init
1907 * Check if a task structure is the first user space task the kernel created.
1909 * Return: 1 if the task structure is init. 0 otherwise.
1995 * Only the _current_ task can read/write to tsk->flags, but other
1999 * or during fork: the ptracer task is allowed to write to the
2069 * task->jobctl flags
2074 #define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */
2092 extern bool task_set_jobctl_pending(struct task_struct *task,
2094 extern void task_clear_jobctl_trapping(struct task_struct *task);
2095 extern void task_clear_jobctl_pending(struct task_struct *task,
2113 static inline void tsk_restore_flags(struct task_struct *task,
2116 task->flags &= ~flags;
2117 task->flags |= orig_flags & flags;
2221 task_sched_runtime(struct task_struct *task);
2272 * task_nice - return the nice value of a given task.
2273 * @p: the task in question.
2292 * is_idle_task - is the specified task an idle task?
2293 * @p: the task in question.
2295 * Return: 1 if @p is an idle task. 0 otherwise.
2329 * find a task by one of its numerical ids
2332 * finds a task by its pid in the specified namespace
2334 * finds a task by its virtual pid
2482 /* Grab a reference to a task's mm, if it is not already going away */
2483 extern struct mm_struct *get_task_mm(struct task_struct *task);
2485 * Grab a reference to a task's mm, if it is not already going away
2489 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2577 * all we care about is that we have a task with the appropriate
2578 * pid, we don't actually care if we have the right task.
2608 * pins the final release of task.io_context. Also protects ->cpuset and
2656 * @tsk: member task of the threadgroup to lock
2658 * Lock the threadgroup @tsk belongs to. No new task is allowed to enter
2664 * synchronization. While held, no new task will be added to threadgroup
2665 * and no existing live task will have its PF_EXITING set.
2677 * @tsk: member task of the threadgroup to unlock
2694 #define task_thread_info(task) ((struct thread_info *)(task)->stack)
2695 #define task_stack_page(task) ((task)->stack)
2700 task_thread_info(p)->task = p;
2722 #define task_stack_end_corrupted(task) \
2723 (*(end_of_stack(task)) != STACK_END_MAGIC)
2748 /* set thread flags in other task's structures
2861 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
2966 * Reevaluate whether the task has signals pending delivery.
2967 * Wake the task if so.