Lines Matching refs:period
751 s64 period = sched_avg_period(); in sched_avg_update() local
753 while ((s64)(rq_clock(rq) - rq->age_stamp) > period) { in sched_avg_update()
760 rq->age_stamp += period; in sched_avg_update()
2266 unsigned long to_ratio(u64 period, u64 runtime) in to_ratio() argument
2276 if (period == 0) in to_ratio()
2279 return div64_u64(runtime << 20, period); in to_ratio()
2330 u64 period = attr->sched_period ?: attr->sched_deadline; in dl_overflow() local
2332 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; in dl_overflow()
7854 u64 period, runtime; in tg_rt_schedulable() local
7856 period = ktime_to_ns(tg->rt_bandwidth.rt_period); in tg_rt_schedulable()
7860 period = d->rt_period; in tg_rt_schedulable()
7867 if (runtime > period && runtime != RUNTIME_INF) in tg_rt_schedulable()
7876 total = to_ratio(period, runtime); in tg_rt_schedulable()
7888 period = ktime_to_ns(child->rt_bandwidth.rt_period); in tg_rt_schedulable()
7892 period = d->rt_period; in tg_rt_schedulable()
7896 sum += to_ratio(period, runtime); in tg_rt_schedulable()
7905 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) in __rt_schedulable() argument
7911 .rt_period = period, in __rt_schedulable()
8053 u64 period = global_rt_period(); in sched_dl_global_validate() local
8054 u64 new_bw = to_ratio(period, runtime); in sched_dl_global_validate()
8288 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
8290 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) in tg_set_cfs_bandwidth() argument
8303 if (quota < min_cfs_quota_period || period < min_cfs_quota_period) in tg_set_cfs_bandwidth()
8311 if (period > max_cfs_quota_period) in tg_set_cfs_bandwidth()
8320 ret = __cfs_schedulable(tg, period, quota); in tg_set_cfs_bandwidth()
8333 cfs_b->period = ns_to_ktime(period); in tg_set_cfs_bandwidth()
8365 u64 quota, period; in tg_set_cfs_quota() local
8367 period = ktime_to_ns(tg->cfs_bandwidth.period); in tg_set_cfs_quota()
8373 return tg_set_cfs_bandwidth(tg, period, quota); in tg_set_cfs_quota()
8391 u64 quota, period; in tg_set_cfs_period() local
8393 period = (u64)cfs_period_us * NSEC_PER_USEC; in tg_set_cfs_period()
8396 return tg_set_cfs_bandwidth(tg, period, quota); in tg_set_cfs_period()
8403 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); in tg_get_cfs_period()
8435 u64 period, quota; member
8445 u64 quota, period; in normalize_cfs_quota() local
8448 period = d->period; in normalize_cfs_quota()
8451 period = tg_get_cfs_period(tg); in normalize_cfs_quota()
8459 return to_ratio(period, quota); in normalize_cfs_quota()
8490 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) in __cfs_schedulable() argument
8495 .period = period, in __cfs_schedulable()
8500 do_div(data.period, NSEC_PER_USEC); in __cfs_schedulable()