cfs_rq            140 include/linux/sched.h struct cfs_rq;
cfs_rq           1008 include/linux/sched.h 	struct cfs_rq		*cfs_rq;
cfs_rq           1010 include/linux/sched.h 	struct cfs_rq		*my_q;
cfs_rq            253 kernel/sched.c struct cfs_rq;
cfs_rq            267 kernel/sched.c 	struct cfs_rq **cfs_rq;
cfs_rq            361 kernel/sched.c 	p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
cfs_rq            534 kernel/sched.c 	struct cfs_rq cfs;
cfs_rq           1466 kernel/sched.c 	rq_weight = tg->cfs_rq[cpu]->load.weight;
cfs_rq           1492 kernel/sched.c 	tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
cfs_rq           1493 kernel/sched.c 	tg->cfs_rq[cpu]->rq_weight = rq_weight;
cfs_rq           1516 kernel/sched.c 		rq_weight += tg->cfs_rq[i]->load.weight;
cfs_rq           1517 kernel/sched.c 		shares += tg->cfs_rq[i]->shares;
cfs_rq           1554 kernel/sched.c 		load = tg->parent->cfs_rq[cpu]->h_load;
cfs_rq           1555 kernel/sched.c 		load *= tg->cfs_rq[cpu]->shares;
cfs_rq           1556 kernel/sched.c 		load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
cfs_rq           1559 kernel/sched.c 	tg->cfs_rq[cpu]->h_load = load;
cfs_rq           1605 kernel/sched.c 	cfs_rq->shares = shares;
cfs_rq           1824 kernel/sched.c 	struct cfs_rq *old_cfsrq = task_cfs_rq(p),
cfs_rq           8018 kernel/sched.c 	cfs_rq->tasks_timeline = RB_ROOT;
cfs_rq           8019 kernel/sched.c 	INIT_LIST_HEAD(&cfs_rq->tasks);
cfs_rq           8021 kernel/sched.c 	cfs_rq->rq = rq;
cfs_rq           8023 kernel/sched.c 	cfs_rq->min_vruntime = (u64)(-(1LL << 20));
cfs_rq           8064 kernel/sched.c 	tg->cfs_rq[cpu] = cfs_rq;
cfs_rq           8065 kernel/sched.c 	init_cfs_rq(cfs_rq, rq);
cfs_rq           8066 kernel/sched.c 	cfs_rq->tg = tg;
cfs_rq           8068 kernel/sched.c 		list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
cfs_rq           8076 kernel/sched.c 		se->cfs_rq = &rq->cfs;
cfs_rq           8078 kernel/sched.c 		se->cfs_rq = parent->my_q;
cfs_rq           8080 kernel/sched.c 	se->my_q = cfs_rq;
cfs_rq           8142 kernel/sched.c 		init_task_group.cfs_rq = (struct cfs_rq **)ptr;
cfs_rq           8149 kernel/sched.c 		root_task_group.cfs_rq = (struct cfs_rq **)ptr;
cfs_rq           8459 kernel/sched.c 		if (tg->cfs_rq)
cfs_rq           8460 kernel/sched.c 			kfree(tg->cfs_rq[i]);
cfs_rq           8465 kernel/sched.c 	kfree(tg->cfs_rq);
cfs_rq           8472 kernel/sched.c 	struct cfs_rq *cfs_rq;
cfs_rq           8477 kernel/sched.c 	tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
cfs_rq           8478 kernel/sched.c 	if (!tg->cfs_rq)
cfs_rq           8489 kernel/sched.c 		cfs_rq = kmalloc_node(sizeof(struct cfs_rq),
cfs_rq           8491 kernel/sched.c 		if (!cfs_rq)
cfs_rq           8500 kernel/sched.c 		init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent_se);
cfs_rq           8511 kernel/sched.c 	list_add_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list,
cfs_rq           8517 kernel/sched.c 	list_del_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list);
cfs_rq           8742 kernel/sched.c 	struct cfs_rq *cfs_rq = se->cfs_rq;
cfs_rq           8747 kernel/sched.c 		dequeue_entity(cfs_rq, se, 0);
cfs_rq           8753 kernel/sched.c 		enqueue_entity(cfs_rq, se, 0);
cfs_rq           8758 kernel/sched.c 	struct cfs_rq *cfs_rq = se->cfs_rq;
cfs_rq           8759 kernel/sched.c 	struct rq *rq = cfs_rq->rq;
cfs_rq           8807 kernel/sched.c 		cfs_rq_set_shares(tg->cfs_rq[i], 0);
cfs_rq            125 kernel/sched_debug.c 	struct task_group *tg = cfs_rq->tg;
cfs_rq            139 kernel/sched_debug.c 			SPLIT_NS(cfs_rq->exec_clock));
cfs_rq            142 kernel/sched_debug.c 	if (cfs_rq->rb_leftmost)
cfs_rq            143 kernel/sched_debug.c 		MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime;
cfs_rq            144 kernel/sched_debug.c 	last = __pick_last_entity(cfs_rq);
cfs_rq            162 kernel/sched_debug.c 	SEQ_printf(m, "  .%-30s: %ld\n", "nr_running", cfs_rq->nr_running);
cfs_rq            163 kernel/sched_debug.c 	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
cfs_rq            184 kernel/sched_debug.c 			cfs_rq->nr_spread_over);
cfs_rq            187 kernel/sched_debug.c 	SEQ_printf(m, "  .%-30s: %lu\n", "shares", cfs_rq->shares);
cfs_rq             90 kernel/sched_fair.c 	return cfs_rq->rq;
cfs_rq            100 kernel/sched_fair.c static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
cfs_rq            102 kernel/sched_fair.c 	return p->se.cfs_rq;
cfs_rq            106 kernel/sched_fair.c static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
cfs_rq            108 kernel/sched_fair.c 	return se->cfs_rq;
cfs_rq            112 kernel/sched_fair.c static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
cfs_rq            120 kernel/sched_fair.c static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
cfs_rq            122 kernel/sched_fair.c 	return cfs_rq->tg->cfs_rq[this_cpu];
cfs_rq            127 kernel/sched_fair.c 	list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
cfs_rq            133 kernel/sched_fair.c 	if (se->cfs_rq == pse->cfs_rq)
cfs_rq            148 kernel/sched_fair.c 	return container_of(cfs_rq, struct rq, cfs);
cfs_rq            156 kernel/sched_fair.c static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
cfs_rq            161 kernel/sched_fair.c static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
cfs_rq            170 kernel/sched_fair.c static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
cfs_rq            175 kernel/sched_fair.c static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
cfs_rq            181 kernel/sched_fair.c 		for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
cfs_rq            221 kernel/sched_fair.c 	return se->vruntime - cfs_rq->min_vruntime;
cfs_rq            229 kernel/sched_fair.c 	struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
cfs_rq            232 kernel/sched_fair.c 	s64 key = entity_key(cfs_rq, se);
cfs_rq            245 kernel/sched_fair.c 		if (key < entity_key(cfs_rq, entry)) {
cfs_rq            258 kernel/sched_fair.c 		cfs_rq->rb_leftmost = &se->run_node;
cfs_rq            263 kernel/sched_fair.c 		cfs_rq->min_vruntime =
cfs_rq            264 kernel/sched_fair.c 			max_vruntime(cfs_rq->min_vruntime, se->vruntime);
cfs_rq            268 kernel/sched_fair.c 	rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
cfs_rq            273 kernel/sched_fair.c 	if (cfs_rq->rb_leftmost == &se->run_node) {
cfs_rq            278 kernel/sched_fair.c 		cfs_rq->rb_leftmost = next_node;
cfs_rq            283 kernel/sched_fair.c 			cfs_rq->min_vruntime =
cfs_rq            284 kernel/sched_fair.c 				max_vruntime(cfs_rq->min_vruntime,
cfs_rq            289 kernel/sched_fair.c 	if (cfs_rq->next == se)
cfs_rq            290 kernel/sched_fair.c 		cfs_rq->next = NULL;
cfs_rq            292 kernel/sched_fair.c 	rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
cfs_rq            297 kernel/sched_fair.c 	return cfs_rq->rb_leftmost;
cfs_rq            302 kernel/sched_fair.c 	return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node);
cfs_rq            307 kernel/sched_fair.c 	struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
cfs_rq            393 kernel/sched_fair.c 	return calc_delta_weight(__sched_period(cfs_rq->nr_running), se);
cfs_rq            403 kernel/sched_fair.c 	unsigned long nr_running = cfs_rq->nr_running;
cfs_rq            424 kernel/sched_fair.c 	schedstat_add(cfs_rq, exec_clock, delta_exec);
cfs_rq            431 kernel/sched_fair.c 	struct sched_entity *curr = cfs_rq->curr;
cfs_rq            432 kernel/sched_fair.c 	u64 now = rq_of(cfs_rq)->clock;
cfs_rq            445 kernel/sched_fair.c 	__update_curr(cfs_rq, curr, delta_exec);
cfs_rq            458 kernel/sched_fair.c 	schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
cfs_rq            470 kernel/sched_fair.c 	if (se != cfs_rq->curr)
cfs_rq            471 kernel/sched_fair.c 		update_stats_wait_start(cfs_rq, se);
cfs_rq            478 kernel/sched_fair.c 			rq_of(cfs_rq)->clock - se->wait_start));
cfs_rq            481 kernel/sched_fair.c 			rq_of(cfs_rq)->clock - se->wait_start);
cfs_rq            492 kernel/sched_fair.c 	if (se != cfs_rq->curr)
cfs_rq            493 kernel/sched_fair.c 		update_stats_wait_end(cfs_rq, se);
cfs_rq            505 kernel/sched_fair.c 	se->exec_start = rq_of(cfs_rq)->clock;
cfs_rq            516 kernel/sched_fair.c 	cfs_rq->task_weight += weight;
cfs_rq            528 kernel/sched_fair.c 	update_load_add(&cfs_rq->load, se->load.weight);
cfs_rq            530 kernel/sched_fair.c 		inc_cpu_load(rq_of(cfs_rq), se->load.weight);
cfs_rq            532 kernel/sched_fair.c 		add_cfs_task_weight(cfs_rq, se->load.weight);
cfs_rq            533 kernel/sched_fair.c 		list_add(&se->group_node, &cfs_rq->tasks);
cfs_rq            535 kernel/sched_fair.c 	cfs_rq->nr_running++;
cfs_rq            542 kernel/sched_fair.c 	update_load_sub(&cfs_rq->load, se->load.weight);
cfs_rq            544 kernel/sched_fair.c 		dec_cpu_load(rq_of(cfs_rq), se->load.weight);
cfs_rq            546 kernel/sched_fair.c 		add_cfs_task_weight(cfs_rq, -se->load.weight);
cfs_rq            549 kernel/sched_fair.c 	cfs_rq->nr_running--;
cfs_rq            557 kernel/sched_fair.c 		u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
cfs_rq            572 kernel/sched_fair.c 		u64 delta = rq_of(cfs_rq)->clock - se->block_start;
cfs_rq            602 kernel/sched_fair.c 	s64 d = se->vruntime - cfs_rq->min_vruntime;
cfs_rq            608 kernel/sched_fair.c 		schedstat_inc(cfs_rq, nr_spread_over);
cfs_rq            617 kernel/sched_fair.c 	if (first_fair(cfs_rq)) {
cfs_rq            618 kernel/sched_fair.c 		vruntime = min_vruntime(cfs_rq->min_vruntime,
cfs_rq            619 kernel/sched_fair.c 				__pick_next_entity(cfs_rq)->vruntime);
cfs_rq            621 kernel/sched_fair.c 		vruntime = cfs_rq->min_vruntime;
cfs_rq            630 kernel/sched_fair.c 		vruntime += sched_vslice_add(cfs_rq, se);
cfs_rq            659 kernel/sched_fair.c 	update_curr(cfs_rq);
cfs_rq            660 kernel/sched_fair.c 	account_entity_enqueue(cfs_rq, se);
cfs_rq            663 kernel/sched_fair.c 		place_entity(cfs_rq, se, 0);
cfs_rq            664 kernel/sched_fair.c 		enqueue_sleeper(cfs_rq, se);
cfs_rq            667 kernel/sched_fair.c 	update_stats_enqueue(cfs_rq, se);
cfs_rq            668 kernel/sched_fair.c 	check_spread(cfs_rq, se);
cfs_rq            669 kernel/sched_fair.c 	if (se != cfs_rq->curr)
cfs_rq            670 kernel/sched_fair.c 		__enqueue_entity(cfs_rq, se);
cfs_rq            679 kernel/sched_fair.c 	update_curr(cfs_rq);
cfs_rq            681 kernel/sched_fair.c 	update_stats_dequeue(cfs_rq, se);
cfs_rq            688 kernel/sched_fair.c 				se->sleep_start = rq_of(cfs_rq)->clock;
cfs_rq            690 kernel/sched_fair.c 				se->block_start = rq_of(cfs_rq)->clock;
cfs_rq            695 kernel/sched_fair.c 	if (se != cfs_rq->curr)
cfs_rq            696 kernel/sched_fair.c 		__dequeue_entity(cfs_rq, se);
cfs_rq            697 kernel/sched_fair.c 	account_entity_dequeue(cfs_rq, se);
cfs_rq            708 kernel/sched_fair.c 	ideal_runtime = sched_slice(cfs_rq, curr);
cfs_rq            711 kernel/sched_fair.c 		resched_task(rq_of(cfs_rq)->curr);
cfs_rq            724 kernel/sched_fair.c 		update_stats_wait_end(cfs_rq, se);
cfs_rq            725 kernel/sched_fair.c 		__dequeue_entity(cfs_rq, se);
cfs_rq            728 kernel/sched_fair.c 	update_stats_curr_start(cfs_rq, se);
cfs_rq            729 kernel/sched_fair.c 	cfs_rq->curr = se;
cfs_rq            736 kernel/sched_fair.c 	if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
cfs_rq            747 kernel/sched_fair.c 	struct rq *rq = rq_of(cfs_rq);
cfs_rq            748 kernel/sched_fair.c 	u64 pair_slice = rq->clock - cfs_rq->pair_start;
cfs_rq            750 kernel/sched_fair.c 	if (!cfs_rq->next || pair_slice > sched_slice(cfs_rq, cfs_rq->next)) {
cfs_rq            751 kernel/sched_fair.c 		cfs_rq->pair_start = rq->clock;
cfs_rq            755 kernel/sched_fair.c 	return cfs_rq->next;
cfs_rq            762 kernel/sched_fair.c 	if (first_fair(cfs_rq)) {
cfs_rq            763 kernel/sched_fair.c 		se = __pick_next_entity(cfs_rq);
cfs_rq            764 kernel/sched_fair.c 		se = pick_next(cfs_rq, se);
cfs_rq            765 kernel/sched_fair.c 		set_next_entity(cfs_rq, se);
cfs_rq            778 kernel/sched_fair.c 		update_curr(cfs_rq);
cfs_rq            780 kernel/sched_fair.c 	check_spread(cfs_rq, prev);
cfs_rq            782 kernel/sched_fair.c 		update_stats_wait_start(cfs_rq, prev);
cfs_rq            784 kernel/sched_fair.c 		__enqueue_entity(cfs_rq, prev);
cfs_rq            786 kernel/sched_fair.c 	cfs_rq->curr = NULL;
cfs_rq            795 kernel/sched_fair.c 	update_curr(cfs_rq);
cfs_rq            803 kernel/sched_fair.c 		resched_task(rq_of(cfs_rq)->curr);
cfs_rq            810 kernel/sched_fair.c 			hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
cfs_rq            814 kernel/sched_fair.c 	if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
cfs_rq            815 kernel/sched_fair.c 		check_preempt_tick(cfs_rq, curr);
cfs_rq            826 kernel/sched_fair.c 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
cfs_rq            830 kernel/sched_fair.c 	if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
cfs_rq            831 kernel/sched_fair.c 		u64 slice = sched_slice(cfs_rq, se);
cfs_rq            865 kernel/sched_fair.c 	struct cfs_rq *cfs_rq;
cfs_rq            871 kernel/sched_fair.c 		cfs_rq = cfs_rq_of(se);
cfs_rq            872 kernel/sched_fair.c 		enqueue_entity(cfs_rq, se, wakeup);
cfs_rq            886 kernel/sched_fair.c 	struct cfs_rq *cfs_rq;
cfs_rq            890 kernel/sched_fair.c 		cfs_rq = cfs_rq_of(se);
cfs_rq            891 kernel/sched_fair.c 		dequeue_entity(cfs_rq, se, sleep);
cfs_rq            893 kernel/sched_fair.c 		if (cfs_rq->load.weight)
cfs_rq            909 kernel/sched_fair.c 	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
cfs_rq            915 kernel/sched_fair.c 	if (unlikely(cfs_rq->nr_running == 1))
cfs_rq            923 kernel/sched_fair.c 		update_curr(cfs_rq);
cfs_rq            930 kernel/sched_fair.c 	rightmost = __pick_last_entity(cfs_rq);
cfs_rq           1240 kernel/sched_fair.c 	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
cfs_rq           1246 kernel/sched_fair.c 		update_curr(cfs_rq);
cfs_rq           1288 kernel/sched_fair.c 	struct cfs_rq *cfs_rq = &rq->cfs;
cfs_rq           1291 kernel/sched_fair.c 	if (unlikely(!cfs_rq->nr_running))
cfs_rq           1295 kernel/sched_fair.c 		se = pick_next_entity(cfs_rq);
cfs_rq           1296 kernel/sched_fair.c 		cfs_rq = group_cfs_rq(se);
cfs_rq           1297 kernel/sched_fair.c 	} while (cfs_rq);
cfs_rq           1311 kernel/sched_fair.c 	struct cfs_rq *cfs_rq;
cfs_rq           1314 kernel/sched_fair.c 		cfs_rq = cfs_rq_of(se);
cfs_rq           1315 kernel/sched_fair.c 		put_prev_entity(cfs_rq, se);
cfs_rq           1337 kernel/sched_fair.c 	if (next == &cfs_rq->tasks)
cfs_rq           1342 kernel/sched_fair.c 	cfs_rq->balance_iterator = next->next;
cfs_rq           1349 kernel/sched_fair.c 	struct cfs_rq *cfs_rq = arg;
cfs_rq           1351 kernel/sched_fair.c 	return __load_balance_iterator(cfs_rq, cfs_rq->tasks.next);
cfs_rq           1356 kernel/sched_fair.c 	struct cfs_rq *cfs_rq = arg;
cfs_rq           1358 kernel/sched_fair.c 	return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
cfs_rq           1371 kernel/sched_fair.c 	cfs_rq_iterator.arg = cfs_rq;
cfs_rq           1393 kernel/sched_fair.c 		struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
cfs_rq           1409 kernel/sched_fair.c 				tg->cfs_rq[busiest_cpu]);
cfs_rq           1442 kernel/sched_fair.c 	struct cfs_rq *busy_cfs_rq;
cfs_rq           1468 kernel/sched_fair.c 	struct cfs_rq *cfs_rq;
cfs_rq           1472 kernel/sched_fair.c 		cfs_rq = cfs_rq_of(se);
cfs_rq           1473 kernel/sched_fair.c 		entity_tick(cfs_rq, se, queued);
cfs_rq           1488 kernel/sched_fair.c 	struct cfs_rq *cfs_rq = task_cfs_rq(p);
cfs_rq           1489 kernel/sched_fair.c 	struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
cfs_rq           1494 kernel/sched_fair.c 	update_curr(cfs_rq);
cfs_rq           1495 kernel/sched_fair.c 	place_entity(cfs_rq, se, 1);
cfs_rq           1563 kernel/sched_fair.c 	struct cfs_rq *cfs_rq = task_cfs_rq(p);
cfs_rq           1565 kernel/sched_fair.c 	update_curr(cfs_rq);
cfs_rq           1566 kernel/sched_fair.c 	place_entity(cfs_rq, &p->se, 1);
cfs_rq           1607 kernel/sched_fair.c 	struct cfs_rq *cfs_rq;
cfs_rq           1610 kernel/sched_fair.c 	for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
cfs_rq           1611 kernel/sched_fair.c 		print_cfs_rq(m, cpu, cfs_rq);