nr_running        551 arch/s390/appldata/appldata_base.c EXPORT_SYMBOL_GPL(nr_running);
nr_running         68 arch/s390/appldata/appldata_os.c 	u32 nr_running;		/* number of runnable threads      */
nr_running        106 arch/s390/appldata/appldata_os.c 	os_data->nr_running = nr_running();
nr_running         23 fs/proc/loadavg.c 		nr_running(), nr_threads,
nr_running        199 fs/proc/stat.c 		nr_running(),
nr_running         19 include/linux/sched/stat.h extern unsigned long nr_running(void);
nr_running         34 include/uapi/linux/cgroupstats.h 	__u64	nr_running;		/* Number of tasks running */
nr_running        722 kernel/cgroup/cgroup-v1.c 			stats->nr_running++;
nr_running        694 kernel/sched/core.c 	if (rq->nr_running > 1)
nr_running       3397 kernel/sched/core.c 		sum += cpu_rq(i)->nr_running;
nr_running       3417 kernel/sched/core.c 	return raw_rq()->nr_running == 1;
nr_running       3917 kernel/sched/core.c 		   rq->nr_running == rq->cfs.h_nr_running)) {
nr_running       4623 kernel/sched/core.c 	if (rq->nr_running)
nr_running       5708 kernel/sched/core.c 	if (rq->nr_running == 1 && p_rq->nr_running == 1) {
nr_running       6260 kernel/sched/core.c 		if (rq->nr_running == 1)
nr_running       6485 kernel/sched/core.c 	BUG_ON(rq->nr_running != 1);
nr_running       6624 kernel/sched/core.c 		rq->nr_running = 0;
nr_running        524 kernel/sched/debug.c 	SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
nr_running        641 kernel/sched/debug.c 	P(nr_running);
nr_running        678 kernel/sched/fair.c static u64 __sched_period(unsigned long nr_running)
nr_running        680 kernel/sched/fair.c 	if (unlikely(nr_running > sched_nr_latency))
nr_running        681 kernel/sched/fair.c 		return nr_running * sysctl_sched_min_granularity;
nr_running        694 kernel/sched/fair.c 	u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
nr_running       2768 kernel/sched/fair.c 	cfs_rq->nr_running++;
nr_running       2781 kernel/sched/fair.c 	cfs_rq->nr_running--;
nr_running       4014 kernel/sched/fair.c 	if (cfs_rq->nr_running == 1 || cfs_bandwidth_used())
nr_running       4017 kernel/sched/fair.c 	if (cfs_rq->nr_running == 1)
nr_running       4311 kernel/sched/fair.c 	if (cfs_rq->nr_running > 1)
nr_running       4476 kernel/sched/fair.c 		if (cfs_rq->nr_running >= 1)
nr_running       4616 kernel/sched/fair.c 	if (rq->curr == rq->idle && rq->cfs.nr_running)
nr_running       4810 kernel/sched/fair.c 	if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
nr_running       5168 kernel/sched/fair.c 	if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
nr_running       5394 kernel/sched/fair.c 	return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running &&
nr_running       5395 kernel/sched/fair.c 			rq->nr_running);
nr_running       5411 kernel/sched/fair.c 	unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
nr_running       5414 kernel/sched/fair.c 	if (nr_running)
nr_running       5415 kernel/sched/fair.c 		return load_avg / nr_running;
nr_running       5497 kernel/sched/fair.c 	if (sync && cpu_rq(this_cpu)->nr_running == 1)
nr_running       6603 kernel/sched/fair.c 	if (rq->nr_running)
nr_running       6697 kernel/sched/fair.c 	int scale = cfs_rq->nr_running >= sched_nr_latency;
nr_running       6823 kernel/sched/fair.c 				if (!cfs_rq->nr_running)
nr_running       6947 kernel/sched/fair.c 	if (unlikely(rq->nr_running == 1))
nr_running       7167 kernel/sched/fair.c 	if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
nr_running       7208 kernel/sched/fair.c 		if (env->src_rq->nr_running > env->src_rq->nr_preferred_running)
nr_running       7390 kernel/sched/fair.c 		if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1)
nr_running       8057 kernel/sched/fair.c 	int i, nr_running;
nr_running       8071 kernel/sched/fair.c 		nr_running = rq->nr_running;
nr_running       8072 kernel/sched/fair.c 		if (nr_running > 1)
nr_running       8085 kernel/sched/fair.c 		if (!nr_running && idle_cpu(i))
nr_running       8207 kernel/sched/fair.c 	if (rq->nr_running > rq->nr_numa_running)
nr_running       8209 kernel/sched/fair.c 	if (rq->nr_running > rq->nr_preferred_running)
nr_running       8695 kernel/sched/fair.c 		    rq->nr_running == 1)
nr_running       8705 kernel/sched/fair.c 		if (rq->nr_running == 1 && load > env->imbalance &&
nr_running       8882 kernel/sched/fair.c 	if (busiest->nr_running > 1) {
nr_running       8890 kernel/sched/fair.c 		env.loop_max  = min(sysctl_sched_nr_migrate, busiest->nr_running);
nr_running       9162 kernel/sched/fair.c 	if (busiest_rq->nr_running <= 1)
nr_running       9431 kernel/sched/fair.c 	if (rq->nr_running >= 2) {
nr_running       9878 kernel/sched/fair.c 		if (pulled_task || this_rq->nr_running > 0)
nr_running       9902 kernel/sched/fair.c 	if (this_rq->nr_running != this_rq->cfs.h_nr_running)
nr_running         83 kernel/sched/loadavg.c 	nr_active = this_rq->nr_running - adjust;
nr_running       1005 kernel/sched/rt.c 	BUG_ON(!rq->nr_running);
nr_running        499 kernel/sched/sched.h 	unsigned int		nr_running;
nr_running        861 kernel/sched/sched.h 	unsigned int		nr_running;
nr_running       1827 kernel/sched/sched.h 	return rq->cfs.nr_running > 0;
nr_running       1930 kernel/sched/sched.h 	unsigned prev_nr = rq->nr_running;
nr_running       1932 kernel/sched/sched.h 	rq->nr_running = prev_nr + count;
nr_running       1935 kernel/sched/sched.h 	if (prev_nr < 2 && rq->nr_running >= 2) {
nr_running       1946 kernel/sched/sched.h 	rq->nr_running -= count;
nr_running        184 kernel/workqueue.c 	atomic_t		nr_running ____cacheline_aligned_in_smp;
nr_running        770 kernel/workqueue.c 	return !atomic_read(&pool->nr_running);
nr_running        796 kernel/workqueue.c 		atomic_read(&pool->nr_running) <= 1;
nr_running        858 kernel/workqueue.c 		atomic_inc(&worker->pool->nr_running);
nr_running        901 kernel/workqueue.c 	if (atomic_dec_and_test(&pool->nr_running) &&
nr_running        960 kernel/workqueue.c 		atomic_dec(&pool->nr_running);
nr_running        992 kernel/workqueue.c 			atomic_inc(&pool->nr_running);
nr_running       1796 kernel/workqueue.c 		     atomic_read(&pool->nr_running));
nr_running       4903 kernel/workqueue.c 		atomic_set(&pool->nr_running, 0);
nr_running        247 tools/accounting/getdelays.c 		(unsigned long long)c->nr_running,