This source file includes following definitions.
- update_irq_load_avg
- cfs_se_util_change
- update_rq_clock_pelt
- update_idle_rq_clock_pelt
- rq_clock_pelt
- cfs_rq_clock_pelt
- cfs_rq_clock_pelt
- update_cfs_rq_load_avg
- update_rt_rq_load_avg
- update_dl_rq_load_avg
- update_irq_load_avg
- rq_clock_pelt
- update_rq_clock_pelt
- update_idle_rq_clock_pelt
1 #ifdef CONFIG_SMP
2 #include "sched-pelt.h"
3
4 int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
5 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
6 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
7 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
8 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
9
10 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
11 int update_irq_load_avg(struct rq *rq, u64 running);
12 #else
13 static inline int
14 update_irq_load_avg(struct rq *rq, u64 running)
15 {
16 return 0;
17 }
18 #endif
19
20
21
22
23
24
25
26
27 #define UTIL_AVG_UNCHANGED 0x1
28
29 static inline void cfs_se_util_change(struct sched_avg *avg)
30 {
31 unsigned int enqueued;
32
33 if (!sched_feat(UTIL_EST))
34 return;
35
36
37 enqueued = avg->util_est.enqueued;
38 if (!(enqueued & UTIL_AVG_UNCHANGED))
39 return;
40
41
42 enqueued &= ~UTIL_AVG_UNCHANGED;
43 WRITE_ONCE(avg->util_est.enqueued, enqueued);
44 }
45
46
47
48
49
50
51
52
53
54
55
56
57
58 static inline void update_rq_clock_pelt(struct rq *rq, s64 delta)
59 {
60 if (unlikely(is_idle_task(rq->curr))) {
61
62 rq->clock_pelt = rq_clock_task(rq);
63 return;
64 }
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82 delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq)));
83 delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq)));
84
85 rq->clock_pelt += delta;
86 }
87
88
89
90
91
92
93
94
95
96
97 static inline void update_idle_rq_clock_pelt(struct rq *rq)
98 {
99 u32 divider = ((LOAD_AVG_MAX - 1024) << SCHED_CAPACITY_SHIFT) - LOAD_AVG_MAX;
100 u32 util_sum = rq->cfs.avg.util_sum;
101 util_sum += rq->avg_rt.util_sum;
102 util_sum += rq->avg_dl.util_sum;
103
104
105
106
107
108
109
110
111
112
113 if (util_sum >= divider)
114 rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt;
115 }
116
117 static inline u64 rq_clock_pelt(struct rq *rq)
118 {
119 lockdep_assert_held(&rq->lock);
120 assert_clock_updated(rq);
121
122 return rq->clock_pelt - rq->lost_idle_time;
123 }
124
125 #ifdef CONFIG_CFS_BANDWIDTH
126
127 static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
128 {
129 if (unlikely(cfs_rq->throttle_count))
130 return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
131
132 return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
133 }
134 #else
135 static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
136 {
137 return rq_clock_pelt(rq_of(cfs_rq));
138 }
139 #endif
140
141 #else
142
143 static inline int
144 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
145 {
146 return 0;
147 }
148
149 static inline int
150 update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
151 {
152 return 0;
153 }
154
155 static inline int
156 update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
157 {
158 return 0;
159 }
160
161 static inline int
162 update_irq_load_avg(struct rq *rq, u64 running)
163 {
164 return 0;
165 }
166
167 static inline u64 rq_clock_pelt(struct rq *rq)
168 {
169 return rq_clock_task(rq);
170 }
171
172 static inline void
173 update_rq_clock_pelt(struct rq *rq, s64 delta) { }
174
175 static inline void
176 update_idle_rq_clock_pelt(struct rq *rq) { }
177
178 #endif
179
180