task_rq           937 kernel/sched.c 		struct rq *rq = task_rq(p);
task_rq           939 kernel/sched.c 		if (likely(rq == task_rq(p)))
task_rq           957 kernel/sched.c 		rq = task_rq(p);
task_rq           959 kernel/sched.c 		if (likely(rq == task_rq(p)))
task_rq          1160 kernel/sched.c 	assert_spin_locked(&task_rq(p)->lock);
task_rq          1233 kernel/sched.c 	assert_spin_locked(&task_rq(p)->lock);
task_rq          1865 kernel/sched.c 	struct rq *rq = task_rq(p);
task_rq          1914 kernel/sched.c 		rq = task_rq(p);
task_rq           158 kernel/sched_fair.c 	return &task_rq(p)->cfs;
task_rq           164 kernel/sched_fair.c 	struct rq *rq = task_rq(p);
task_rq           828 kernel/sched_fair.c 	WARN_ON(task_rq(p) != rq);
task_rq           977 kernel/sched_fair.c 			&& !task_hot(p, task_rq(p)->clock, sd))) {
task_rq           187 kernel/sched_rt.c 	struct rq *rq = task_rq(p);
task_rq           771 kernel/sched_rt.c 	struct rq *rq = task_rq(p);
task_rq           984 kernel/sched_rt.c 	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
task_rq          1057 kernel/sched_rt.c 			if (unlikely(task_rq(task) != rq ||
task_rq          1320 kernel/sched_rt.c 		struct rq *rq = task_rq(p);
task_rq          1398 kernel/sched_rt.c 		    rq != task_rq(p))
task_rq           168 kernel/sched_stats.h 	unsigned long long now = task_rq(t)->clock, delta = 0;
task_rq           176 kernel/sched_stats.h 	rq_sched_info_dequeued(task_rq(t), delta);
task_rq           186 kernel/sched_stats.h 	unsigned long long now = task_rq(t)->clock, delta = 0;
task_rq           195 kernel/sched_stats.h 	rq_sched_info_arrive(task_rq(t), delta);
task_rq           217 kernel/sched_stats.h 			t->sched_info.last_queued = task_rq(t)->clock;
task_rq           229 kernel/sched_stats.h 	unsigned long long delta = task_rq(t)->clock -
task_rq           233 kernel/sched_stats.h 	rq_sched_info_depart(task_rq(t), delta);
task_rq           247 kernel/sched_stats.h 	struct rq *rq = task_rq(prev);