1 2#ifdef CONFIG_SCHEDSTATS 3 4/* 5 * Expects runqueue lock to be held for atomicity of update 6 */ 7static inline void 8rq_sched_info_arrive(struct rq *rq, unsigned long long delta) 9{ 10 if (rq) { 11 rq->rq_sched_info.run_delay += delta; 12 rq->rq_sched_info.pcount++; 13 } 14} 15 16/* 17 * Expects runqueue lock to be held for atomicity of update 18 */ 19static inline void 20rq_sched_info_depart(struct rq *rq, unsigned long long delta) 21{ 22 if (rq) 23 rq->rq_cpu_time += delta; 24} 25 26static inline void 27rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) 28{ 29 if (rq) 30 rq->rq_sched_info.run_delay += delta; 31} 32# define schedstat_inc(rq, field) do { (rq)->field++; } while (0) 33# define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0) 34# define schedstat_set(var, val) do { var = (val); } while (0) 35#else /* !CONFIG_SCHEDSTATS */ 36static inline void 37rq_sched_info_arrive(struct rq *rq, unsigned long long delta) 38{} 39static inline void 40rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) 41{} 42static inline void 43rq_sched_info_depart(struct rq *rq, unsigned long long delta) 44{} 45# define schedstat_inc(rq, field) do { } while (0) 46# define schedstat_add(rq, field, amt) do { } while (0) 47# define schedstat_set(var, val) do { } while (0) 48#endif 49 50#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 51static inline void sched_info_reset_dequeued(struct task_struct *t) 52{ 53 t->sched_info.last_queued = 0; 54} 55 56/* 57 * We are interested in knowing how long it was from the *first* time a 58 * task was queued to the time that it finally hit a cpu, we call this routine 59 * from dequeue_task() to account for possible rq->clock skew across cpus. The 60 * delta taken on each cpu would annul the skew. 61 */ 62static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t) 63{ 64 unsigned long long now = rq_clock(rq), delta = 0; 65 66 if (unlikely(sched_info_on())) 67 if (t->sched_info.last_queued) 68 delta = now - t->sched_info.last_queued; 69 sched_info_reset_dequeued(t); 70 t->sched_info.run_delay += delta; 71 72 rq_sched_info_dequeued(rq, delta); 73} 74 75/* 76 * Called when a task finally hits the cpu. We can now calculate how 77 * long it was waiting to run. We also note when it began so that we 78 * can keep stats on how long its timeslice is. 79 */ 80static void sched_info_arrive(struct rq *rq, struct task_struct *t) 81{ 82 unsigned long long now = rq_clock(rq), delta = 0; 83 84 if (t->sched_info.last_queued) 85 delta = now - t->sched_info.last_queued; 86 sched_info_reset_dequeued(t); 87 t->sched_info.run_delay += delta; 88 t->sched_info.last_arrival = now; 89 t->sched_info.pcount++; 90 91 rq_sched_info_arrive(rq, delta); 92} 93 94/* 95 * This function is only called from enqueue_task(), but also only updates 96 * the timestamp if it is already not set. It's assumed that 97 * sched_info_dequeued() will clear that stamp when appropriate. 98 */ 99static inline void sched_info_queued(struct rq *rq, struct task_struct *t) 100{ 101 if (unlikely(sched_info_on())) 102 if (!t->sched_info.last_queued) 103 t->sched_info.last_queued = rq_clock(rq); 104} 105 106/* 107 * Called when a process ceases being the active-running process involuntarily 108 * due, typically, to expiring its time slice (this may also be called when 109 * switching to the idle task). Now we can calculate how long we ran. 110 * Also, if the process is still in the TASK_RUNNING state, call 111 * sched_info_queued() to mark that it has now again started waiting on 112 * the runqueue. 113 */ 114static inline void sched_info_depart(struct rq *rq, struct task_struct *t) 115{ 116 unsigned long long delta = rq_clock(rq) - 117 t->sched_info.last_arrival; 118 119 rq_sched_info_depart(rq, delta); 120 121 if (t->state == TASK_RUNNING) 122 sched_info_queued(rq, t); 123} 124 125/* 126 * Called when tasks are switched involuntarily due, typically, to expiring 127 * their time slice. (This may also be called when switching to or from 128 * the idle task.) We are only called when prev != next. 129 */ 130static inline void 131__sched_info_switch(struct rq *rq, 132 struct task_struct *prev, struct task_struct *next) 133{ 134 /* 135 * prev now departs the cpu. It's not interesting to record 136 * stats about how efficient we were at scheduling the idle 137 * process, however. 138 */ 139 if (prev != rq->idle) 140 sched_info_depart(rq, prev); 141 142 if (next != rq->idle) 143 sched_info_arrive(rq, next); 144} 145static inline void 146sched_info_switch(struct rq *rq, 147 struct task_struct *prev, struct task_struct *next) 148{ 149 if (unlikely(sched_info_on())) 150 __sched_info_switch(rq, prev, next); 151} 152#else 153#define sched_info_queued(rq, t) do { } while (0) 154#define sched_info_reset_dequeued(t) do { } while (0) 155#define sched_info_dequeued(rq, t) do { } while (0) 156#define sched_info_depart(rq, t) do { } while (0) 157#define sched_info_arrive(rq, next) do { } while (0) 158#define sched_info_switch(rq, t, next) do { } while (0) 159#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */ 160 161/* 162 * The following are functions that support scheduler-internal time accounting. 163 * These functions are generally called at the timer tick. None of this depends 164 * on CONFIG_SCHEDSTATS. 165 */ 166 167/** 168 * cputimer_running - return true if cputimer is running 169 * 170 * @tsk: Pointer to target task. 171 */ 172static inline bool cputimer_running(struct task_struct *tsk) 173 174{ 175 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 176 177 if (!cputimer->running) 178 return false; 179 180 /* 181 * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime 182 * in __exit_signal(), we won't account to the signal struct further 183 * cputime consumed by that task, even though the task can still be 184 * ticking after __exit_signal(). 185 * 186 * In order to keep a consistent behaviour between thread group cputime 187 * and thread group cputimer accounting, lets also ignore the cputime 188 * elapsing after __exit_signal() in any thread group timer running. 189 * 190 * This makes sure that POSIX CPU clocks and timers are synchronized, so 191 * that a POSIX CPU timer won't expire while the corresponding POSIX CPU 192 * clock delta is behind the expiring timer value. 193 */ 194 if (unlikely(!tsk->sighand)) 195 return false; 196 197 return true; 198} 199 200/** 201 * account_group_user_time - Maintain utime for a thread group. 202 * 203 * @tsk: Pointer to task structure. 204 * @cputime: Time value by which to increment the utime field of the 205 * thread_group_cputime structure. 206 * 207 * If thread group time is being maintained, get the structure for the 208 * running CPU and update the utime field there. 209 */ 210static inline void account_group_user_time(struct task_struct *tsk, 211 cputime_t cputime) 212{ 213 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 214 215 if (!cputimer_running(tsk)) 216 return; 217 218 raw_spin_lock(&cputimer->lock); 219 cputimer->cputime.utime += cputime; 220 raw_spin_unlock(&cputimer->lock); 221} 222 223/** 224 * account_group_system_time - Maintain stime for a thread group. 225 * 226 * @tsk: Pointer to task structure. 227 * @cputime: Time value by which to increment the stime field of the 228 * thread_group_cputime structure. 229 * 230 * If thread group time is being maintained, get the structure for the 231 * running CPU and update the stime field there. 232 */ 233static inline void account_group_system_time(struct task_struct *tsk, 234 cputime_t cputime) 235{ 236 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 237 238 if (!cputimer_running(tsk)) 239 return; 240 241 raw_spin_lock(&cputimer->lock); 242 cputimer->cputime.stime += cputime; 243 raw_spin_unlock(&cputimer->lock); 244} 245 246/** 247 * account_group_exec_runtime - Maintain exec runtime for a thread group. 248 * 249 * @tsk: Pointer to task structure. 250 * @ns: Time value by which to increment the sum_exec_runtime field 251 * of the thread_group_cputime structure. 252 * 253 * If thread group time is being maintained, get the structure for the 254 * running CPU and update the sum_exec_runtime field there. 255 */ 256static inline void account_group_exec_runtime(struct task_struct *tsk, 257 unsigned long long ns) 258{ 259 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 260 261 if (!cputimer_running(tsk)) 262 return; 263 264 raw_spin_lock(&cputimer->lock); 265 cputimer->cputime.sum_exec_runtime += ns; 266 raw_spin_unlock(&cputimer->lock); 267} 268