this_rq           909 include/linux/sched.h 	unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
this_rq           914 include/linux/sched.h 	int (*move_one_task) (struct rq *this_rq, int this_cpu,
this_rq           917 include/linux/sched.h 	void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
this_rq           918 include/linux/sched.h 	void (*post_schedule) (struct rq *this_rq);
this_rq           919 include/linux/sched.h 	void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
this_rq           931 include/linux/sched.h 	void (*switched_from) (struct rq *this_rq, struct task_struct *task,
this_rq           933 include/linux/sched.h 	void (*switched_to) (struct rq *this_rq, struct task_struct *task,
this_rq           935 include/linux/sched.h 	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
this_rq           986 kernel/sched.c 	rq = this_rq();
this_rq          1068 kernel/sched.c 	if (rq == this_rq()) {
this_rq          2588 kernel/sched.c 	struct rq *rq = this_rq();
this_rq          2654 kernel/sched.c 	finish_task_switch(this_rq(), prev);
this_rq          2733 kernel/sched.c 	unsigned long this_load = this_rq->load.weight;
this_rq          2736 kernel/sched.c 	this_rq->nr_load_updates++;
this_rq          2744 kernel/sched.c 		old_load = this_rq->cpu_load[i];
this_rq          2753 kernel/sched.c 		this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
this_rq          2815 kernel/sched.c 		spin_unlock(&this_rq->lock);
this_rq          2819 kernel/sched.c 		if (busiest < this_rq) {
this_rq          2820 kernel/sched.c 			spin_unlock(&this_rq->lock);
this_rq          2822 kernel/sched.c 			spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
this_rq          2834 kernel/sched.c 	lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
this_rq          2893 kernel/sched.c 	activate_task(this_rq, p, 0);
this_rq          2898 kernel/sched.c 	check_preempt_curr(this_rq, p, 0);
this_rq          2979 kernel/sched.c 	pull_task(busiest, p, this_rq, this_cpu);
this_rq          3020 kernel/sched.c 	int this_best_prio = this_rq->curr->prio;
this_rq          3024 kernel/sched.c 			class->load_balance(this_rq, this_cpu, busiest,
this_rq          3029 kernel/sched.c 		if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
this_rq          3047 kernel/sched.c 			pull_task(busiest, p, this_rq, this_cpu);
this_rq          3076 kernel/sched.c 		if (class->move_one_task(this_rq, this_cpu, busiest, sd, idle))
this_rq          3491 kernel/sched.c 	BUG_ON(busiest == this_rq);
this_rq          3504 kernel/sched.c 		double_rq_lock(this_rq, busiest);
this_rq          3505 kernel/sched.c 		ld_moved = move_tasks(this_rq, this_cpu, busiest,
this_rq          3507 kernel/sched.c 		double_rq_unlock(this_rq, busiest);
this_rq          3634 kernel/sched.c 	update_shares_locked(this_rq, sd);
this_rq          3648 kernel/sched.c 	BUG_ON(busiest == this_rq);
this_rq          3655 kernel/sched.c 		double_lock_balance(this_rq, busiest);
this_rq          3658 kernel/sched.c 		ld_moved = move_tasks(this_rq, this_cpu, busiest,
this_rq          3661 kernel/sched.c 		double_unlock_balance(this_rq, busiest);
this_rq          3678 kernel/sched.c 	update_shares_locked(this_rq, sd);
this_rq          3710 kernel/sched.c 			pulled_task = load_balance_newidle(this_cpu, this_rq,
this_rq          3719 kernel/sched.c 	if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
this_rq          3724 kernel/sched.c 		this_rq->next_balance = next_balance;
this_rq          3940 kernel/sched.c 	struct rq *this_rq = cpu_rq(this_cpu);
this_rq          3941 kernel/sched.c 	enum cpu_idle_type idle = this_rq->idle_at_tick ?
this_rq          3952 kernel/sched.c 	if (this_rq->idle_at_tick &&
this_rq          3971 kernel/sched.c 			if (time_after(this_rq->next_balance, rq->next_balance))
this_rq          3972 kernel/sched.c 				this_rq->next_balance = rq->next_balance;
this_rq          4138 kernel/sched.c 	struct rq *rq = this_rq();
this_rq          4184 kernel/sched.c 	struct rq *rq = this_rq();
this_rq          4377 kernel/sched.c 	schedstat_inc(this_rq(), sched_count);
this_rq          4380 kernel/sched.c 		schedstat_inc(this_rq(), bkl_count);
this_rq          1096 kernel/sched_fair.c 	struct task_struct *curr = this_rq->curr;
this_rq          1161 kernel/sched_fair.c 	struct rq *this_rq;
this_rq          1167 kernel/sched_fair.c 	this_rq		= cpu_rq(this_cpu);
this_rq          1199 kernel/sched_fair.c 	if (wake_affine(this_sd, this_rq, p, prev_cpu, this_cpu, sync, idx,
this_rq          1373 kernel/sched_fair.c 	return balance_tasks(this_rq, this_cpu, busiest,
this_rq          1407 kernel/sched_fair.c 		moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
this_rq          1432 kernel/sched_fair.c 	return __load_balance_fair(this_rq, this_cpu, busiest,
this_rq          1454 kernel/sched_fair.c 		if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
this_rq          1171 kernel/sched_rt.c 	int this_cpu = this_rq->cpu, ret = 0, cpu;
this_rq          1175 kernel/sched_rt.c 	if (likely(!rt_overloaded(this_rq)))
this_rq          1178 kernel/sched_rt.c 	next = pick_next_task_rt(this_rq);
this_rq          1180 kernel/sched_rt.c 	for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) {
this_rq          1192 kernel/sched_rt.c 		if (double_lock_balance(this_rq, src_rq)) {
this_rq          1195 kernel/sched_rt.c 			next = pick_next_task_rt(this_rq);
this_rq          1234 kernel/sched_rt.c 			activate_task(this_rq, p, 0);
this_rq          1249 kernel/sched_rt.c 		double_unlock_balance(this_rq, src_rq);