cpu_rq            634 kernel/sched.c 	for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
cpu_rq            638 kernel/sched.c #define task_rq(p)		cpu_rq(task_cpu(p))
cpu_rq            639 kernel/sched.c #define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
cpu_rq            665 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
cpu_rq           1088 kernel/sched.c 		hrtick_clear(cpu_rq(cpu));
cpu_rq           1179 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
cpu_rq           1201 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
cpu_rq           1440 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
cpu_rq           1530 kernel/sched.c 		struct rq *rq = cpu_rq(i);
cpu_rq           1552 kernel/sched.c 		load = cpu_rq(cpu)->load.weight;
cpu_rq           1789 kernel/sched.c 	return cpu_rq(cpu)->load.weight;
cpu_rq           1823 kernel/sched.c 	struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu);
cpu_rq           2021 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
cpu_rq           2036 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
cpu_rq           2669 kernel/sched.c 		sum += cpu_rq(i)->nr_running;
cpu_rq           2679 kernel/sched.c 		sum += cpu_rq(i)->nr_uninterruptible;
cpu_rq           2697 kernel/sched.c 		sum += cpu_rq(i)->nr_switches;
cpu_rq           2707 kernel/sched.c 		sum += atomic_read(&cpu_rq(i)->nr_iowait);
cpu_rq           2717 kernel/sched.c 		running += cpu_rq(i)->nr_running;
cpu_rq           2718 kernel/sched.c 		uninterruptible += cpu_rq(i)->nr_uninterruptible;
cpu_rq           3144 kernel/sched.c 			rq = cpu_rq(i);
cpu_rq           3423 kernel/sched.c 		rq = cpu_rq(i);
cpu_rq           3746 kernel/sched.c 	target_rq = cpu_rq(target_cpu);
cpu_rq           3814 kernel/sched.c 		cpu_rq(cpu)->in_nohz_recently = 1;
cpu_rq           3864 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
cpu_rq           3940 kernel/sched.c 	struct rq *this_rq = cpu_rq(this_cpu);
cpu_rq           3970 kernel/sched.c 			rq = cpu_rq(balance_cpu);
cpu_rq           4265 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
cpu_rq           4431 kernel/sched.c 	rq = cpu_rq(cpu);
cpu_rq           4484 kernel/sched.c 		rq = cpu_rq(cpu);
cpu_rq           5075 kernel/sched.c 	return cpu_curr(cpu) == cpu_rq(cpu)->idle;
cpu_rq           5084 kernel/sched.c 	return cpu_rq(cpu)->idle;
cpu_rq           5856 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
cpu_rq           6011 kernel/sched.c 	rq_src = cpu_rq(src_cpu);
cpu_rq           6012 kernel/sched.c 	rq_dest = cpu_rq(dest_cpu);
cpu_rq           6048 kernel/sched.c 	rq = cpu_rq(cpu);
cpu_rq           6172 kernel/sched.c 	struct rq *rq_dest = cpu_rq(any_online_cpu(*CPU_MASK_ALL_PTR));
cpu_rq           6209 kernel/sched.c 	struct rq *rq = cpu_rq(this_cpu);
cpu_rq           6248 kernel/sched.c 	struct rq *rq = cpu_rq(dead_cpu);
cpu_rq           6273 kernel/sched.c 	struct rq *rq = cpu_rq(dead_cpu);
cpu_rq           6511 kernel/sched.c 		cpu_rq(cpu)->migration_thread = p;
cpu_rq           6517 kernel/sched.c 		wake_up_process(cpu_rq(cpu)->migration_thread);
cpu_rq           6520 kernel/sched.c 		rq = cpu_rq(cpu);
cpu_rq           6533 kernel/sched.c 		if (!cpu_rq(cpu)->migration_thread)
cpu_rq           6536 kernel/sched.c 		kthread_bind(cpu_rq(cpu)->migration_thread,
cpu_rq           6538 kernel/sched.c 		kthread_stop(cpu_rq(cpu)->migration_thread);
cpu_rq           6539 kernel/sched.c 		cpu_rq(cpu)->migration_thread = NULL;
cpu_rq           6546 kernel/sched.c 		rq = cpu_rq(cpu);
cpu_rq           6582 kernel/sched.c 		rq = cpu_rq(cpu);
cpu_rq           6872 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
cpu_rq           7954 kernel/sched.c 		disable_runtime(cpu_rq(cpu));
cpu_rq           7961 kernel/sched.c 		enable_runtime(cpu_rq(cpu));
cpu_rq           8063 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
cpu_rq           8092 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
cpu_rq           8200 kernel/sched.c 		rq = cpu_rq(i);
cpu_rq           8487 kernel/sched.c 		rq = cpu_rq(i);
cpu_rq           8512 kernel/sched.c 			&cpu_rq(cpu)->leaf_cfs_rq_list);
cpu_rq           8576 kernel/sched.c 		rq = cpu_rq(i);
cpu_rq           8601 kernel/sched.c 			&cpu_rq(cpu)->leaf_rt_rq_list);
cpu_rq           9043 kernel/sched.c 		struct rt_rq *rt_rq = &cpu_rq(i)->rt;
cpu_rq           9297 kernel/sched.c 		spin_lock_irq(&cpu_rq(i)->lock);
cpu_rq           9299 kernel/sched.c 		spin_unlock_irq(&cpu_rq(i)->lock);
cpu_rq           9320 kernel/sched.c 		spin_lock_irq(&cpu_rq(i)->lock);
cpu_rq           9322 kernel/sched.c 		spin_unlock_irq(&cpu_rq(i)->lock);
cpu_rq            177 kernel/sched_fair.c 	return &cpu_rq(this_cpu)->cfs;
cpu_rq            971 kernel/sched_fair.c 	if (idle_cpu(cpu) || cpu_rq(cpu)->cfs.nr_running > 1)
cpu_rq           1167 kernel/sched_fair.c 	this_rq		= cpu_rq(this_cpu);
cpu_rq           1610 kernel/sched_fair.c 	for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
cpu_rq            144 kernel/sched_rt.c 	return cpu_rq(smp_processor_id())->rd->span;
cpu_rq            223 kernel/sched_rt.c 	return &cpu_rq(cpu)->rt;
cpu_rq            240 kernel/sched_rt.c 	struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
cpu_rq           1047 kernel/sched_rt.c 		lowest_rq = cpu_rq(cpu);
cpu_rq           1184 kernel/sched_rt.c 		src_rq = cpu_rq(cpu);
cpu_rq           1541 kernel/sched_rt.c 	for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
cpu_rq             21 kernel/sched_stats.h 		struct rq *rq = cpu_rq(cpu);