rt_se              31 kernel/sched/autogroup.c 	ag->tg->rt_se = NULL;
rt_se              88 kernel/sched/autogroup.c 	tg->rt_se = root_task_group.rt_se;
rt_se            6581 kernel/sched/core.c 		root_task_group.rt_se = (struct sched_rt_entity **)ptr;
rt_se             111 kernel/sched/rt.c #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
rt_se             113 kernel/sched/rt.c static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
rt_se             116 kernel/sched/rt.c 	WARN_ON_ONCE(!rt_entity_is_task(rt_se));
rt_se             118 kernel/sched/rt.c 	return container_of(rt_se, struct task_struct, rt);
rt_se             126 kernel/sched/rt.c static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
rt_se             128 kernel/sched/rt.c 	return rt_se->rt_rq;
rt_se             131 kernel/sched/rt.c static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
rt_se             133 kernel/sched/rt.c 	struct rt_rq *rt_rq = rt_se->rt_rq;
rt_se             142 kernel/sched/rt.c 	if (tg->rt_se)
rt_se             148 kernel/sched/rt.c 		if (tg->rt_se)
rt_se             149 kernel/sched/rt.c 			kfree(tg->rt_se[i]);
rt_se             153 kernel/sched/rt.c 	kfree(tg->rt_se);
rt_se             157 kernel/sched/rt.c 		struct sched_rt_entity *rt_se, int cpu,
rt_se             168 kernel/sched/rt.c 	tg->rt_se[cpu] = rt_se;
rt_se             170 kernel/sched/rt.c 	if (!rt_se)
rt_se             174 kernel/sched/rt.c 		rt_se->rt_rq = &rq->rt;
rt_se             176 kernel/sched/rt.c 		rt_se->rt_rq = parent->my_q;
rt_se             178 kernel/sched/rt.c 	rt_se->my_q = rt_rq;
rt_se             179 kernel/sched/rt.c 	rt_se->parent = parent;
rt_se             180 kernel/sched/rt.c 	INIT_LIST_HEAD(&rt_se->run_list);
rt_se             186 kernel/sched/rt.c 	struct sched_rt_entity *rt_se;
rt_se             192 kernel/sched/rt.c 	tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
rt_se             193 kernel/sched/rt.c 	if (!tg->rt_se)
rt_se             205 kernel/sched/rt.c 		rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
rt_se             207 kernel/sched/rt.c 		if (!rt_se)
rt_se             212 kernel/sched/rt.c 		init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
rt_se             225 kernel/sched/rt.c #define rt_entity_is_task(rt_se) (1)
rt_se             227 kernel/sched/rt.c static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
rt_se             229 kernel/sched/rt.c 	return container_of(rt_se, struct task_struct, rt);
rt_se             237 kernel/sched/rt.c static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
rt_se             239 kernel/sched/rt.c 	struct task_struct *p = rt_task_of(rt_se);
rt_se             244 kernel/sched/rt.c static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
rt_se             246 kernel/sched/rt.c 	struct rq *rq = rq_of_rt_se(rt_se);
rt_se             316 kernel/sched/rt.c static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
rt_se             320 kernel/sched/rt.c 	if (!rt_entity_is_task(rt_se))
rt_se             323 kernel/sched/rt.c 	p = rt_task_of(rt_se);
rt_se             333 kernel/sched/rt.c static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
rt_se             337 kernel/sched/rt.c 	if (!rt_entity_is_task(rt_se))
rt_se             340 kernel/sched/rt.c 	p = rt_task_of(rt_se);
rt_se             409 kernel/sched/rt.c void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
rt_se             414 kernel/sched/rt.c void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
rt_se             435 kernel/sched/rt.c static inline int on_rt_rq(struct sched_rt_entity *rt_se)
rt_se             437 kernel/sched/rt.c 	return rt_se->on_rq;
rt_se             475 kernel/sched/rt.c #define for_each_sched_rt_entity(rt_se) \
rt_se             476 kernel/sched/rt.c 	for (; rt_se; rt_se = rt_se->parent)
rt_se             478 kernel/sched/rt.c static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
rt_se             480 kernel/sched/rt.c 	return rt_se->my_q;
rt_se             483 kernel/sched/rt.c static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
rt_se             484 kernel/sched/rt.c static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
rt_se             490 kernel/sched/rt.c 	struct sched_rt_entity *rt_se;
rt_se             494 kernel/sched/rt.c 	rt_se = rt_rq->tg->rt_se[cpu];
rt_se             497 kernel/sched/rt.c 		if (!rt_se)
rt_se             499 kernel/sched/rt.c 		else if (!on_rt_rq(rt_se))
rt_se             500 kernel/sched/rt.c 			enqueue_rt_entity(rt_se, 0);
rt_se             509 kernel/sched/rt.c 	struct sched_rt_entity *rt_se;
rt_se             512 kernel/sched/rt.c 	rt_se = rt_rq->tg->rt_se[cpu];
rt_se             514 kernel/sched/rt.c 	if (!rt_se) {
rt_se             519 kernel/sched/rt.c 	else if (on_rt_rq(rt_se))
rt_se             520 kernel/sched/rt.c 		dequeue_rt_entity(rt_se, 0);
rt_se             528 kernel/sched/rt.c static int rt_se_boosted(struct sched_rt_entity *rt_se)
rt_se             530 kernel/sched/rt.c 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
rt_se             536 kernel/sched/rt.c 	p = rt_task_of(rt_se);
rt_se             580 kernel/sched/rt.c #define for_each_sched_rt_entity(rt_se) \
rt_se             581 kernel/sched/rt.c 	for (; rt_se; rt_se = NULL)
rt_se             583 kernel/sched/rt.c static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
rt_se             896 kernel/sched/rt.c static inline int rt_se_prio(struct sched_rt_entity *rt_se)
rt_se             899 kernel/sched/rt.c 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
rt_se             905 kernel/sched/rt.c 	return rt_task_of(rt_se)->prio;
rt_se             958 kernel/sched/rt.c 	struct sched_rt_entity *rt_se = &curr->rt;
rt_se             982 kernel/sched/rt.c 	for_each_sched_rt_entity(rt_se) {
rt_se             983 kernel/sched/rt.c 		struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
rt_se            1125 kernel/sched/rt.c inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
rt_se            1127 kernel/sched/rt.c 	if (rt_se_boosted(rt_se))
rt_se            1135 kernel/sched/rt.c dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
rt_se            1137 kernel/sched/rt.c 	if (rt_se_boosted(rt_se))
rt_se            1146 kernel/sched/rt.c inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
rt_se            1152 kernel/sched/rt.c void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
rt_se            1157 kernel/sched/rt.c unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
rt_se            1159 kernel/sched/rt.c 	struct rt_rq *group_rq = group_rt_rq(rt_se);
rt_se            1168 kernel/sched/rt.c unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
rt_se            1170 kernel/sched/rt.c 	struct rt_rq *group_rq = group_rt_rq(rt_se);
rt_se            1176 kernel/sched/rt.c 	tsk = rt_task_of(rt_se);
rt_se            1182 kernel/sched/rt.c void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
rt_se            1184 kernel/sched/rt.c 	int prio = rt_se_prio(rt_se);
rt_se            1187 kernel/sched/rt.c 	rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
rt_se            1188 kernel/sched/rt.c 	rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
rt_se            1191 kernel/sched/rt.c 	inc_rt_migration(rt_se, rt_rq);
rt_se            1192 kernel/sched/rt.c 	inc_rt_group(rt_se, rt_rq);
rt_se            1196 kernel/sched/rt.c void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
rt_se            1198 kernel/sched/rt.c 	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
rt_se            1200 kernel/sched/rt.c 	rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
rt_se            1201 kernel/sched/rt.c 	rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
rt_se            1203 kernel/sched/rt.c 	dec_rt_prio(rt_rq, rt_se_prio(rt_se));
rt_se            1204 kernel/sched/rt.c 	dec_rt_migration(rt_se, rt_rq);
rt_se            1205 kernel/sched/rt.c 	dec_rt_group(rt_se, rt_rq);
rt_se            1221 kernel/sched/rt.c static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
rt_se            1223 kernel/sched/rt.c 	list_del_init(&rt_se->run_list);
rt_se            1225 kernel/sched/rt.c 	if (list_empty(array->queue + rt_se_prio(rt_se)))
rt_se            1226 kernel/sched/rt.c 		__clear_bit(rt_se_prio(rt_se), array->bitmap);
rt_se            1228 kernel/sched/rt.c 	rt_se->on_list = 0;
rt_se            1231 kernel/sched/rt.c static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
rt_se            1233 kernel/sched/rt.c 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
rt_se            1235 kernel/sched/rt.c 	struct rt_rq *group_rq = group_rt_rq(rt_se);
rt_se            1236 kernel/sched/rt.c 	struct list_head *queue = array->queue + rt_se_prio(rt_se);
rt_se            1245 kernel/sched/rt.c 		if (rt_se->on_list)
rt_se            1246 kernel/sched/rt.c 			__delist_rt_entity(rt_se, array);
rt_se            1251 kernel/sched/rt.c 		WARN_ON_ONCE(rt_se->on_list);
rt_se            1253 kernel/sched/rt.c 			list_add(&rt_se->run_list, queue);
rt_se            1255 kernel/sched/rt.c 			list_add_tail(&rt_se->run_list, queue);
rt_se            1257 kernel/sched/rt.c 		__set_bit(rt_se_prio(rt_se), array->bitmap);
rt_se            1258 kernel/sched/rt.c 		rt_se->on_list = 1;
rt_se            1260 kernel/sched/rt.c 	rt_se->on_rq = 1;
rt_se            1262 kernel/sched/rt.c 	inc_rt_tasks(rt_se, rt_rq);
rt_se            1265 kernel/sched/rt.c static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
rt_se            1267 kernel/sched/rt.c 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
rt_se            1271 kernel/sched/rt.c 		WARN_ON_ONCE(!rt_se->on_list);
rt_se            1272 kernel/sched/rt.c 		__delist_rt_entity(rt_se, array);
rt_se            1274 kernel/sched/rt.c 	rt_se->on_rq = 0;
rt_se            1276 kernel/sched/rt.c 	dec_rt_tasks(rt_se, rt_rq);
rt_se            1283 kernel/sched/rt.c static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
rt_se            1287 kernel/sched/rt.c 	for_each_sched_rt_entity(rt_se) {
rt_se            1288 kernel/sched/rt.c 		rt_se->back = back;
rt_se            1289 kernel/sched/rt.c 		back = rt_se;
rt_se            1294 kernel/sched/rt.c 	for (rt_se = back; rt_se; rt_se = rt_se->back) {
rt_se            1295 kernel/sched/rt.c 		if (on_rt_rq(rt_se))
rt_se            1296 kernel/sched/rt.c 			__dequeue_rt_entity(rt_se, flags);
rt_se            1300 kernel/sched/rt.c static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
rt_se            1302 kernel/sched/rt.c 	struct rq *rq = rq_of_rt_se(rt_se);
rt_se            1304 kernel/sched/rt.c 	dequeue_rt_stack(rt_se, flags);
rt_se            1305 kernel/sched/rt.c 	for_each_sched_rt_entity(rt_se)
rt_se            1306 kernel/sched/rt.c 		__enqueue_rt_entity(rt_se, flags);
rt_se            1310 kernel/sched/rt.c static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
rt_se            1312 kernel/sched/rt.c 	struct rq *rq = rq_of_rt_se(rt_se);
rt_se            1314 kernel/sched/rt.c 	dequeue_rt_stack(rt_se, flags);
rt_se            1316 kernel/sched/rt.c 	for_each_sched_rt_entity(rt_se) {
rt_se            1317 kernel/sched/rt.c 		struct rt_rq *rt_rq = group_rt_rq(rt_se);
rt_se            1320 kernel/sched/rt.c 			__enqueue_rt_entity(rt_se, flags);
rt_se            1331 kernel/sched/rt.c 	struct sched_rt_entity *rt_se = &p->rt;
rt_se            1334 kernel/sched/rt.c 		rt_se->timeout = 0;
rt_se            1336 kernel/sched/rt.c 	enqueue_rt_entity(rt_se, flags);
rt_se            1344 kernel/sched/rt.c 	struct sched_rt_entity *rt_se = &p->rt;
rt_se            1347 kernel/sched/rt.c 	dequeue_rt_entity(rt_se, flags);
rt_se            1357 kernel/sched/rt.c requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
rt_se            1359 kernel/sched/rt.c 	if (on_rt_rq(rt_se)) {
rt_se            1361 kernel/sched/rt.c 		struct list_head *queue = array->queue + rt_se_prio(rt_se);
rt_se            1364 kernel/sched/rt.c 			list_move(&rt_se->run_list, queue);
rt_se            1366 kernel/sched/rt.c 			list_move_tail(&rt_se->run_list, queue);
rt_se            1372 kernel/sched/rt.c 	struct sched_rt_entity *rt_se = &p->rt;
rt_se            1375 kernel/sched/rt.c 	for_each_sched_rt_entity(rt_se) {
rt_se            1376 kernel/sched/rt.c 		rt_rq = rt_rq_of_se(rt_se);
rt_se            1377 kernel/sched/rt.c 		requeue_rt_entity(rt_rq, rt_se, head);
rt_se            1558 kernel/sched/rt.c 	struct sched_rt_entity *rt_se;
rt_se            1562 kernel/sched/rt.c 		rt_se = pick_next_rt_entity(rq, rt_rq);
rt_se            1563 kernel/sched/rt.c 		BUG_ON(!rt_se);
rt_se            1564 kernel/sched/rt.c 		rt_rq = group_rt_rq(rt_se);
rt_se            1567 kernel/sched/rt.c 	return rt_task_of(rt_se);
rt_se            2317 kernel/sched/rt.c 	struct sched_rt_entity *rt_se = &p->rt;
rt_se            2340 kernel/sched/rt.c 	for_each_sched_rt_entity(rt_se) {
rt_se            2341 kernel/sched/rt.c 		if (rt_se->run_list.prev != rt_se->run_list.next) {
rt_se             382 kernel/sched/sched.h 	struct sched_rt_entity	**rt_se;
rt_se             461 kernel/sched/sched.h 		struct sched_rt_entity *rt_se, int cpu,
rt_se            1516 kernel/sched/sched.h 	p->rt.parent = tg->rt_se[cpu];