nr_running 101 fs/proc/proc_misc.c nr_running(), nr_threads, nr_running 592 fs/proc/proc_misc.c nr_running(), nr_running 32 include/linux/cgroupstats.h __u64 nr_running; /* Number of tasks running */ nr_running 134 include/linux/sched.h extern unsigned long nr_running(void); nr_running 2062 kernel/cgroup.c stats->nr_running++; nr_running 384 kernel/sched.c unsigned long nr_running; nr_running 521 kernel/sched.c unsigned long nr_running; nr_running 1442 kernel/sched.c if (rq->nr_running) nr_running 1443 kernel/sched.c rq->avg_load_per_task = rq->load.weight / rq->nr_running; nr_running 1624 kernel/sched.c rq->nr_running++; nr_running 1629 kernel/sched.c rq->nr_running--; nr_running 2669 kernel/sched.c sum += cpu_rq(i)->nr_running; nr_running 2717 kernel/sched.c running += cpu_rq(i)->nr_running; nr_running 3029 kernel/sched.c if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) nr_running 3146 kernel/sched.c if (*sd_idle && rq->nr_running) nr_running 3166 kernel/sched.c sum_nr_running += rq->nr_running; nr_running 3426 kernel/sched.c if (rq->nr_running == 1 && wl > imbalance) nr_running 3496 kernel/sched.c if (busiest->nr_running > 1) { nr_running 3653 kernel/sched.c if (busiest->nr_running > 1) { nr_running 3743 kernel/sched.c if (busiest_rq->nr_running <= 1) nr_running 4399 kernel/sched.c if (likely(rq->nr_running == rq->cfs.nr_running)) { nr_running 4465 kernel/sched.c if (unlikely(!rq->nr_running)) nr_running 6277 kernel/sched.c if (!rq->nr_running) nr_running 6560 kernel/sched.c BUG_ON(rq->nr_running != 0); nr_running 8202 kernel/sched.c rq->nr_running = 0; nr_running 162 kernel/sched_debug.c SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running); nr_running 245 kernel/sched_debug.c P(nr_running); nr_running 377 kernel/sched_fair.c if (unlikely(nr_running > nr_latency)) { nr_running 379 kernel/sched_fair.c period *= nr_running; nr_running 393 kernel/sched_fair.c return calc_delta_weight(__sched_period(cfs_rq->nr_running), se); nr_running 403 kernel/sched_fair.c unsigned long nr_running = cfs_rq->nr_running; nr_running 406 kernel/sched_fair.c nr_running++; nr_running 408 kernel/sched_fair.c return __sched_period(nr_running); nr_running 535 kernel/sched_fair.c cfs_rq->nr_running++; nr_running 549 kernel/sched_fair.c cfs_rq->nr_running--; nr_running 814 kernel/sched_fair.c if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT)) nr_running 830 kernel/sched_fair.c if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) { nr_running 915 kernel/sched_fair.c if (unlikely(cfs_rq->nr_running == 1)) nr_running 971 kernel/sched_fair.c if (idle_cpu(cpu) || cpu_rq(cpu)->cfs.nr_running > 1) nr_running 1291 kernel/sched_fair.c if (unlikely(!cfs_rq->nr_running))