This source file includes following definitions.
- decay_load
- __accumulate_pelt_segments
- accumulate_sum
- ___update_load_sum
- ___update_load_avg
- __update_load_avg_blocked_se
- __update_load_avg_se
- __update_load_avg_cfs_rq
- update_rt_rq_load_avg
- update_dl_rq_load_avg
- update_irq_load_avg
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27 #include <linux/sched.h>
28 #include "sched.h"
29 #include "pelt.h"
30
31 #include <trace/events/sched.h>
32
33
34
35
36
37 static u64 decay_load(u64 val, u64 n)
38 {
39 unsigned int local_n;
40
41 if (unlikely(n > LOAD_AVG_PERIOD * 63))
42 return 0;
43
44
45 local_n = n;
46
47
48
49
50
51
52
53
54 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
55 val >>= local_n / LOAD_AVG_PERIOD;
56 local_n %= LOAD_AVG_PERIOD;
57 }
58
59 val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32);
60 return val;
61 }
62
63 static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
64 {
65 u32 c1, c2, c3 = d3;
66
67
68
69
70 c1 = decay_load((u64)d1, periods);
71
72
73
74
75
76
77
78
79
80
81 c2 = LOAD_AVG_MAX - decay_load(LOAD_AVG_MAX, periods) - 1024;
82
83 return c1 + c2 + c3;
84 }
85
86 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109 static __always_inline u32
110 accumulate_sum(u64 delta, struct sched_avg *sa,
111 unsigned long load, unsigned long runnable, int running)
112 {
113 u32 contrib = (u32)delta;
114 u64 periods;
115
116 delta += sa->period_contrib;
117 periods = delta / 1024;
118
119
120
121
122 if (periods) {
123 sa->load_sum = decay_load(sa->load_sum, periods);
124 sa->runnable_load_sum =
125 decay_load(sa->runnable_load_sum, periods);
126 sa->util_sum = decay_load((u64)(sa->util_sum), periods);
127
128
129
130
131 delta %= 1024;
132 contrib = __accumulate_pelt_segments(periods,
133 1024 - sa->period_contrib, delta);
134 }
135 sa->period_contrib = delta;
136
137 if (load)
138 sa->load_sum += load * contrib;
139 if (runnable)
140 sa->runnable_load_sum += runnable * contrib;
141 if (running)
142 sa->util_sum += contrib << SCHED_CAPACITY_SHIFT;
143
144 return periods;
145 }
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175 static __always_inline int
176 ___update_load_sum(u64 now, struct sched_avg *sa,
177 unsigned long load, unsigned long runnable, int running)
178 {
179 u64 delta;
180
181 delta = now - sa->last_update_time;
182
183
184
185
186 if ((s64)delta < 0) {
187 sa->last_update_time = now;
188 return 0;
189 }
190
191
192
193
194
195 delta >>= 10;
196 if (!delta)
197 return 0;
198
199 sa->last_update_time += delta << 10;
200
201
202
203
204
205
206
207
208
209
210 if (!load)
211 runnable = running = 0;
212
213
214
215
216
217
218
219
220 if (!accumulate_sum(delta, sa, load, runnable, running))
221 return 0;
222
223 return 1;
224 }
225
226 static __always_inline void
227 ___update_load_avg(struct sched_avg *sa, unsigned long load, unsigned long runnable)
228 {
229 u32 divider = LOAD_AVG_MAX - 1024 + sa->period_contrib;
230
231
232
233
234 sa->load_avg = div_u64(load * sa->load_sum, divider);
235 sa->runnable_load_avg = div_u64(runnable * sa->runnable_load_sum, divider);
236 WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
237 }
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266 int __update_load_avg_blocked_se(u64 now, struct sched_entity *se)
267 {
268 if (___update_load_sum(now, &se->avg, 0, 0, 0)) {
269 ___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
270 trace_pelt_se_tp(se);
271 return 1;
272 }
273
274 return 0;
275 }
276
277 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se)
278 {
279 if (___update_load_sum(now, &se->avg, !!se->on_rq, !!se->on_rq,
280 cfs_rq->curr == se)) {
281
282 ___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
283 cfs_se_util_change(&se->avg);
284 trace_pelt_se_tp(se);
285 return 1;
286 }
287
288 return 0;
289 }
290
291 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
292 {
293 if (___update_load_sum(now, &cfs_rq->avg,
294 scale_load_down(cfs_rq->load.weight),
295 scale_load_down(cfs_rq->runnable_weight),
296 cfs_rq->curr != NULL)) {
297
298 ___update_load_avg(&cfs_rq->avg, 1, 1);
299 trace_pelt_cfs_tp(cfs_rq);
300 return 1;
301 }
302
303 return 0;
304 }
305
306
307
308
309
310
311
312
313
314
315
316
317 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
318 {
319 if (___update_load_sum(now, &rq->avg_rt,
320 running,
321 running,
322 running)) {
323
324 ___update_load_avg(&rq->avg_rt, 1, 1);
325 trace_pelt_rt_tp(rq);
326 return 1;
327 }
328
329 return 0;
330 }
331
332
333
334
335
336
337
338
339
340
341 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
342 {
343 if (___update_load_sum(now, &rq->avg_dl,
344 running,
345 running,
346 running)) {
347
348 ___update_load_avg(&rq->avg_dl, 1, 1);
349 trace_pelt_dl_tp(rq);
350 return 1;
351 }
352
353 return 0;
354 }
355
356 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
357
358
359
360
361
362
363
364
365
366 int update_irq_load_avg(struct rq *rq, u64 running)
367 {
368 int ret = 0;
369
370
371
372
373
374
375 running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq)));
376 running = cap_scale(running, arch_scale_cpu_capacity(cpu_of(rq)));
377
378
379
380
381
382
383
384
385
386
387
388
389 ret = ___update_load_sum(rq->clock - running, &rq->avg_irq,
390 0,
391 0,
392 0);
393 ret += ___update_load_sum(rq->clock, &rq->avg_irq,
394 1,
395 1,
396 1);
397
398 if (ret) {
399 ___update_load_avg(&rq->avg_irq, 1, 1);
400 trace_pelt_irq_tp(rq);
401 }
402
403 return ret;
404 }
405 #endif