cfs_b            7382 kernel/sched/core.c 	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
cfs_b            7414 kernel/sched/core.c 	runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
cfs_b            7421 kernel/sched/core.c 	raw_spin_lock_irq(&cfs_b->lock);
cfs_b            7422 kernel/sched/core.c 	cfs_b->period = ns_to_ktime(period);
cfs_b            7423 kernel/sched/core.c 	cfs_b->quota = quota;
cfs_b            7425 kernel/sched/core.c 	__refill_cfs_bandwidth_runtime(cfs_b);
cfs_b            7429 kernel/sched/core.c 		start_cfs_bandwidth(cfs_b);
cfs_b            7431 kernel/sched/core.c 	raw_spin_unlock_irq(&cfs_b->lock);
cfs_b            7562 kernel/sched/core.c 	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
cfs_b            7587 kernel/sched/core.c 	cfs_b->hierarchical_quota = quota;
cfs_b            7616 kernel/sched/core.c 	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
cfs_b            7618 kernel/sched/core.c 	seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
cfs_b            7619 kernel/sched/core.c 	seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
cfs_b            7620 kernel/sched/core.c 	seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
cfs_b            7722 kernel/sched/core.c 		struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
cfs_b            7725 kernel/sched/core.c 		throttled_usec = cfs_b->throttled_time;
cfs_b            7731 kernel/sched/core.c 			   cfs_b->nr_periods, cfs_b->nr_throttled,
cfs_b            4370 kernel/sched/fair.c void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
cfs_b            4372 kernel/sched/fair.c 	if (cfs_b->quota != RUNTIME_INF)
cfs_b            4373 kernel/sched/fair.c 		cfs_b->runtime = cfs_b->quota;
cfs_b            4385 kernel/sched/fair.c 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
cfs_b            4391 kernel/sched/fair.c 	raw_spin_lock(&cfs_b->lock);
cfs_b            4392 kernel/sched/fair.c 	if (cfs_b->quota == RUNTIME_INF)
cfs_b            4395 kernel/sched/fair.c 		start_cfs_bandwidth(cfs_b);
cfs_b            4397 kernel/sched/fair.c 		if (cfs_b->runtime > 0) {
cfs_b            4398 kernel/sched/fair.c 			amount = min(cfs_b->runtime, min_amount);
cfs_b            4399 kernel/sched/fair.c 			cfs_b->runtime -= amount;
cfs_b            4400 kernel/sched/fair.c 			cfs_b->idle = 0;
cfs_b            4403 kernel/sched/fair.c 	raw_spin_unlock(&cfs_b->lock);
cfs_b            4501 kernel/sched/fair.c 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
cfs_b            4535 kernel/sched/fair.c 	raw_spin_lock(&cfs_b->lock);
cfs_b            4536 kernel/sched/fair.c 	empty = list_empty(&cfs_b->throttled_cfs_rq);
cfs_b            4543 kernel/sched/fair.c 	if (cfs_b->distribute_running)
cfs_b            4544 kernel/sched/fair.c 		list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
cfs_b            4546 kernel/sched/fair.c 		list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
cfs_b            4553 kernel/sched/fair.c 		start_cfs_bandwidth(cfs_b);
cfs_b            4555 kernel/sched/fair.c 	raw_spin_unlock(&cfs_b->lock);
cfs_b            4561 kernel/sched/fair.c 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
cfs_b            4572 kernel/sched/fair.c 	raw_spin_lock(&cfs_b->lock);
cfs_b            4573 kernel/sched/fair.c 	cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
cfs_b            4575 kernel/sched/fair.c 	raw_spin_unlock(&cfs_b->lock);
cfs_b            4620 kernel/sched/fair.c static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining)
cfs_b            4627 kernel/sched/fair.c 	list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
cfs_b            4667 kernel/sched/fair.c static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags)
cfs_b            4673 kernel/sched/fair.c 	if (cfs_b->quota == RUNTIME_INF)
cfs_b            4676 kernel/sched/fair.c 	throttled = !list_empty(&cfs_b->throttled_cfs_rq);
cfs_b            4677 kernel/sched/fair.c 	cfs_b->nr_periods += overrun;
cfs_b            4683 kernel/sched/fair.c 	if (cfs_b->idle && !throttled)
cfs_b            4686 kernel/sched/fair.c 	__refill_cfs_bandwidth_runtime(cfs_b);
cfs_b            4690 kernel/sched/fair.c 		cfs_b->idle = 1;
cfs_b            4695 kernel/sched/fair.c 	cfs_b->nr_throttled += overrun;
cfs_b            4704 kernel/sched/fair.c 	while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) {
cfs_b            4705 kernel/sched/fair.c 		runtime = cfs_b->runtime;
cfs_b            4706 kernel/sched/fair.c 		cfs_b->distribute_running = 1;
cfs_b            4707 kernel/sched/fair.c 		raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
cfs_b            4709 kernel/sched/fair.c 		runtime = distribute_cfs_runtime(cfs_b, runtime);
cfs_b            4710 kernel/sched/fair.c 		raw_spin_lock_irqsave(&cfs_b->lock, flags);
cfs_b            4712 kernel/sched/fair.c 		cfs_b->distribute_running = 0;
cfs_b            4713 kernel/sched/fair.c 		throttled = !list_empty(&cfs_b->throttled_cfs_rq);
cfs_b            4715 kernel/sched/fair.c 		lsub_positive(&cfs_b->runtime, runtime);
cfs_b            4724 kernel/sched/fair.c 	cfs_b->idle = 0;
cfs_b            4746 kernel/sched/fair.c static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
cfs_b            4748 kernel/sched/fair.c 	struct hrtimer *refresh_timer = &cfs_b->period_timer;
cfs_b            4763 kernel/sched/fair.c static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
cfs_b            4768 kernel/sched/fair.c 	if (runtime_refresh_within(cfs_b, min_left))
cfs_b            4772 kernel/sched/fair.c 	if (cfs_b->slack_started)
cfs_b            4774 kernel/sched/fair.c 	cfs_b->slack_started = true;
cfs_b            4776 kernel/sched/fair.c 	hrtimer_start(&cfs_b->slack_timer,
cfs_b            4784 kernel/sched/fair.c 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
cfs_b            4790 kernel/sched/fair.c 	raw_spin_lock(&cfs_b->lock);
cfs_b            4791 kernel/sched/fair.c 	if (cfs_b->quota != RUNTIME_INF) {
cfs_b            4792 kernel/sched/fair.c 		cfs_b->runtime += slack_runtime;
cfs_b            4795 kernel/sched/fair.c 		if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
cfs_b            4796 kernel/sched/fair.c 		    !list_empty(&cfs_b->throttled_cfs_rq))
cfs_b            4797 kernel/sched/fair.c 			start_cfs_slack_bandwidth(cfs_b);
cfs_b            4799 kernel/sched/fair.c 	raw_spin_unlock(&cfs_b->lock);
cfs_b            4820 kernel/sched/fair.c static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
cfs_b            4826 kernel/sched/fair.c 	raw_spin_lock_irqsave(&cfs_b->lock, flags);
cfs_b            4827 kernel/sched/fair.c 	cfs_b->slack_started = false;
cfs_b            4828 kernel/sched/fair.c 	if (cfs_b->distribute_running) {
cfs_b            4829 kernel/sched/fair.c 		raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
cfs_b            4833 kernel/sched/fair.c 	if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
cfs_b            4834 kernel/sched/fair.c 		raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
cfs_b            4838 kernel/sched/fair.c 	if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
cfs_b            4839 kernel/sched/fair.c 		runtime = cfs_b->runtime;
cfs_b            4842 kernel/sched/fair.c 		cfs_b->distribute_running = 1;
cfs_b            4844 kernel/sched/fair.c 	raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
cfs_b            4849 kernel/sched/fair.c 	runtime = distribute_cfs_runtime(cfs_b, runtime);
cfs_b            4851 kernel/sched/fair.c 	raw_spin_lock_irqsave(&cfs_b->lock, flags);
cfs_b            4852 kernel/sched/fair.c 	lsub_positive(&cfs_b->runtime, runtime);
cfs_b            4853 kernel/sched/fair.c 	cfs_b->distribute_running = 0;
cfs_b            4854 kernel/sched/fair.c 	raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
cfs_b            4920 kernel/sched/fair.c 	struct cfs_bandwidth *cfs_b =
cfs_b            4923 kernel/sched/fair.c 	do_sched_cfs_slack_timer(cfs_b);
cfs_b            4932 kernel/sched/fair.c 	struct cfs_bandwidth *cfs_b =
cfs_b            4939 kernel/sched/fair.c 	raw_spin_lock_irqsave(&cfs_b->lock, flags);
cfs_b            4941 kernel/sched/fair.c 		overrun = hrtimer_forward_now(timer, cfs_b->period);
cfs_b            4946 kernel/sched/fair.c 			u64 new, old = ktime_to_ns(cfs_b->period);
cfs_b            4955 kernel/sched/fair.c 				cfs_b->period = ns_to_ktime(new);
cfs_b            4956 kernel/sched/fair.c 				cfs_b->quota *= 2;
cfs_b            4962 kernel/sched/fair.c 					div_u64(cfs_b->quota, NSEC_PER_USEC));
cfs_b            4968 kernel/sched/fair.c 					div_u64(cfs_b->quota, NSEC_PER_USEC));
cfs_b            4975 kernel/sched/fair.c 		idle = do_sched_cfs_period_timer(cfs_b, overrun, flags);
cfs_b            4978 kernel/sched/fair.c 		cfs_b->period_active = 0;
cfs_b            4979 kernel/sched/fair.c 	raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
cfs_b            4984 kernel/sched/fair.c void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
cfs_b            4986 kernel/sched/fair.c 	raw_spin_lock_init(&cfs_b->lock);
cfs_b            4987 kernel/sched/fair.c 	cfs_b->runtime = 0;
cfs_b            4988 kernel/sched/fair.c 	cfs_b->quota = RUNTIME_INF;
cfs_b            4989 kernel/sched/fair.c 	cfs_b->period = ns_to_ktime(default_cfs_period());
cfs_b            4991 kernel/sched/fair.c 	INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
cfs_b            4992 kernel/sched/fair.c 	hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
cfs_b            4993 kernel/sched/fair.c 	cfs_b->period_timer.function = sched_cfs_period_timer;
cfs_b            4994 kernel/sched/fair.c 	hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
cfs_b            4995 kernel/sched/fair.c 	cfs_b->slack_timer.function = sched_cfs_slack_timer;
cfs_b            4996 kernel/sched/fair.c 	cfs_b->distribute_running = 0;
cfs_b            4997 kernel/sched/fair.c 	cfs_b->slack_started = false;
cfs_b            5006 kernel/sched/fair.c void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
cfs_b            5008 kernel/sched/fair.c 	lockdep_assert_held(&cfs_b->lock);
cfs_b            5010 kernel/sched/fair.c 	if (cfs_b->period_active)
cfs_b            5013 kernel/sched/fair.c 	cfs_b->period_active = 1;
cfs_b            5014 kernel/sched/fair.c 	hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
cfs_b            5015 kernel/sched/fair.c 	hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
cfs_b            5018 kernel/sched/fair.c static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
cfs_b            5021 kernel/sched/fair.c 	if (!cfs_b->throttled_cfs_rq.next)
cfs_b            5024 kernel/sched/fair.c 	hrtimer_cancel(&cfs_b->period_timer);
cfs_b            5025 kernel/sched/fair.c 	hrtimer_cancel(&cfs_b->slack_timer);
cfs_b            5044 kernel/sched/fair.c 		struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
cfs_b            5047 kernel/sched/fair.c 		raw_spin_lock(&cfs_b->lock);
cfs_b            5048 kernel/sched/fair.c 		cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
cfs_b            5049 kernel/sched/fair.c 		raw_spin_unlock(&cfs_b->lock);
cfs_b            5114 kernel/sched/fair.c void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
cfs_b            5124 kernel/sched/fair.c static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
cfs_b             452 kernel/sched/sched.h extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
cfs_b             454 kernel/sched/sched.h extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
cfs_b             455 kernel/sched/sched.h extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);