rt_rq            1024 include/linux/sched.h 	struct rt_rq		*rt_rq;
rt_rq            1026 include/linux/sched.h 	struct rt_rq		*my_q;
rt_rq             273 kernel/sched.c 	struct rt_rq **rt_rq;
rt_rq             366 kernel/sched.c 	p->rt.rt_rq  = task_group(p)->rt_rq[cpu];
rt_rq             535 kernel/sched.c 	struct rt_rq rt;
rt_rq            8031 kernel/sched.c 	array = &rt_rq->active;
rt_rq            8040 kernel/sched.c 	rt_rq->highest_prio = MAX_RT_PRIO;
rt_rq            8043 kernel/sched.c 	rt_rq->rt_nr_migratory = 0;
rt_rq            8044 kernel/sched.c 	rt_rq->overloaded = 0;
rt_rq            8047 kernel/sched.c 	rt_rq->rt_time = 0;
rt_rq            8048 kernel/sched.c 	rt_rq->rt_throttled = 0;
rt_rq            8049 kernel/sched.c 	rt_rq->rt_runtime = 0;
rt_rq            8050 kernel/sched.c 	spin_lock_init(&rt_rq->rt_runtime_lock);
rt_rq            8053 kernel/sched.c 	rt_rq->rt_nr_boosted = 0;
rt_rq            8054 kernel/sched.c 	rt_rq->rq = rq;
rt_rq            8094 kernel/sched.c 	tg->rt_rq[cpu] = rt_rq;
rt_rq            8095 kernel/sched.c 	init_rt_rq(rt_rq, rq);
rt_rq            8096 kernel/sched.c 	rt_rq->tg = tg;
rt_rq            8097 kernel/sched.c 	rt_rq->rt_se = rt_se;
rt_rq            8098 kernel/sched.c 	rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
rt_rq            8100 kernel/sched.c 		list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list);
rt_rq            8107 kernel/sched.c 		rt_se->rt_rq = &rq->rt;
rt_rq            8109 kernel/sched.c 		rt_se->rt_rq = parent->my_q;
rt_rq            8111 kernel/sched.c 	rt_se->my_q = rt_rq;
rt_rq            8157 kernel/sched.c 		init_task_group.rt_rq = (struct rt_rq **)ptr;
rt_rq            8164 kernel/sched.c 		root_task_group.rt_rq = (struct rt_rq **)ptr;
rt_rq            8547 kernel/sched.c 		if (tg->rt_rq)
rt_rq            8548 kernel/sched.c 			kfree(tg->rt_rq[i]);
rt_rq            8553 kernel/sched.c 	kfree(tg->rt_rq);
rt_rq            8560 kernel/sched.c 	struct rt_rq *rt_rq;
rt_rq            8565 kernel/sched.c 	tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
rt_rq            8566 kernel/sched.c 	if (!tg->rt_rq)
rt_rq            8578 kernel/sched.c 		rt_rq = kmalloc_node(sizeof(struct rt_rq),
rt_rq            8580 kernel/sched.c 		if (!rt_rq)
rt_rq            8589 kernel/sched.c 		init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent_se);
rt_rq            8600 kernel/sched.c 	list_add_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list,
rt_rq            8606 kernel/sched.c 	list_del_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list);
rt_rq            8947 kernel/sched.c 		struct rt_rq *rt_rq = tg->rt_rq[i];
rt_rq            8949 kernel/sched.c 		spin_lock(&rt_rq->rt_runtime_lock);
rt_rq            8950 kernel/sched.c 		rt_rq->rt_runtime = rt_runtime;
rt_rq            8951 kernel/sched.c 		spin_unlock(&rt_rq->rt_runtime_lock);
rt_rq            9043 kernel/sched.c 		struct rt_rq *rt_rq = &cpu_rq(i)->rt;
rt_rq            9045 kernel/sched.c 		spin_lock(&rt_rq->rt_runtime_lock);
rt_rq            9046 kernel/sched.c 		rt_rq->rt_runtime = global_rt_runtime();
rt_rq            9047 kernel/sched.c 		spin_unlock(&rt_rq->rt_runtime_lock);
rt_rq             197 kernel/sched_debug.c 	struct task_group *tg = rt_rq->tg;
rt_rq             212 kernel/sched_debug.c 	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
rt_rq             214 kernel/sched_debug.c 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
rt_rq              68 kernel/sched_rt.c 	if (!rt_rq->tg)
rt_rq              71 kernel/sched_rt.c 	return rt_rq->rt_runtime;
rt_rq              76 kernel/sched_rt.c 	return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
rt_rq              80 kernel/sched_rt.c 	list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
rt_rq              84 kernel/sched_rt.c 	return rt_rq->rq;
rt_rq              87 kernel/sched_rt.c static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
rt_rq              89 kernel/sched_rt.c 	return rt_se->rt_rq;
rt_rq              95 kernel/sched_rt.c static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
rt_rq             105 kernel/sched_rt.c 	struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
rt_rq             106 kernel/sched_rt.c 	struct sched_rt_entity *rt_se = rt_rq->rt_se;
rt_rq             108 kernel/sched_rt.c 	if (rt_rq->rt_nr_running) {
rt_rq             111 kernel/sched_rt.c 		if (rt_rq->highest_prio < curr->prio)
rt_rq             118 kernel/sched_rt.c 	struct sched_rt_entity *rt_se = rt_rq->rt_se;
rt_rq             126 kernel/sched_rt.c 	return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
rt_rq             131 kernel/sched_rt.c 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
rt_rq             134 kernel/sched_rt.c 	if (rt_rq)
rt_rq             135 kernel/sched_rt.c 		return !!rt_rq->rt_nr_boosted;
rt_rq             154 kernel/sched_rt.c struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
rt_rq             156 kernel/sched_rt.c 	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
rt_rq             161 kernel/sched_rt.c 	return &rt_rq->tg->rt_bandwidth;
rt_rq             168 kernel/sched_rt.c 	return rt_rq->rt_runtime;
rt_rq             177 kernel/sched_rt.c 	for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
rt_rq             181 kernel/sched_rt.c 	return container_of(rt_rq, struct rq, rt);
rt_rq             184 kernel/sched_rt.c static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
rt_rq             195 kernel/sched_rt.c static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
rt_rq             202 kernel/sched_rt.c 	if (rt_rq->rt_nr_running)
rt_rq             203 kernel/sched_rt.c 		resched_task(rq_of_rt_rq(rt_rq)->curr);
rt_rq             212 kernel/sched_rt.c 	return rt_rq->rt_throttled;
rt_rq             221 kernel/sched_rt.c struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
rt_rq             239 kernel/sched_rt.c 	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
rt_rq             249 kernel/sched_rt.c 		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
rt_rq             252 kernel/sched_rt.c 		if (iter == rt_rq)
rt_rq             271 kernel/sched_rt.c 			if (rt_rq->rt_runtime + diff > rt_period)
rt_rq             272 kernel/sched_rt.c 				diff = rt_period - rt_rq->rt_runtime;
rt_rq             274 kernel/sched_rt.c 			rt_rq->rt_runtime += diff;
rt_rq             276 kernel/sched_rt.c 			if (rt_rq->rt_runtime == rt_period) {
rt_rq             295 kernel/sched_rt.c 	struct rt_rq *rt_rq;
rt_rq             300 kernel/sched_rt.c 	for_each_leaf_rt_rq(rt_rq, rq) {
rt_rq             301 kernel/sched_rt.c 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
rt_rq             306 kernel/sched_rt.c 		spin_lock(&rt_rq->rt_runtime_lock);
rt_rq             312 kernel/sched_rt.c 		if (rt_rq->rt_runtime == RUNTIME_INF ||
rt_rq             313 kernel/sched_rt.c 				rt_rq->rt_runtime == rt_b->rt_runtime)
rt_rq             315 kernel/sched_rt.c 		spin_unlock(&rt_rq->rt_runtime_lock);
rt_rq             322 kernel/sched_rt.c 		want = rt_b->rt_runtime - rt_rq->rt_runtime;
rt_rq             328 kernel/sched_rt.c 			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
rt_rq             334 kernel/sched_rt.c 			if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
rt_rq             352 kernel/sched_rt.c 		spin_lock(&rt_rq->rt_runtime_lock);
rt_rq             363 kernel/sched_rt.c 		rt_rq->rt_runtime = RUNTIME_INF;
rt_rq             364 kernel/sched_rt.c 		spin_unlock(&rt_rq->rt_runtime_lock);
rt_rq             380 kernel/sched_rt.c 	struct rt_rq *rt_rq;
rt_rq             388 kernel/sched_rt.c 	for_each_leaf_rt_rq(rt_rq, rq) {
rt_rq             389 kernel/sched_rt.c 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
rt_rq             392 kernel/sched_rt.c 		spin_lock(&rt_rq->rt_runtime_lock);
rt_rq             393 kernel/sched_rt.c 		rt_rq->rt_runtime = rt_b->rt_runtime;
rt_rq             394 kernel/sched_rt.c 		rt_rq->rt_time = 0;
rt_rq             395 kernel/sched_rt.c 		rt_rq->rt_throttled = 0;
rt_rq             396 kernel/sched_rt.c 		spin_unlock(&rt_rq->rt_runtime_lock);
rt_rq             414 kernel/sched_rt.c 	if (rt_rq->rt_time > rt_rq->rt_runtime) {
rt_rq             415 kernel/sched_rt.c 		spin_unlock(&rt_rq->rt_runtime_lock);
rt_rq             416 kernel/sched_rt.c 		more = do_balance_runtime(rt_rq);
rt_rq             417 kernel/sched_rt.c 		spin_lock(&rt_rq->rt_runtime_lock);
rt_rq             440 kernel/sched_rt.c 		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
rt_rq             441 kernel/sched_rt.c 		struct rq *rq = rq_of_rt_rq(rt_rq);
rt_rq             444 kernel/sched_rt.c 		if (rt_rq->rt_time) {
rt_rq             447 kernel/sched_rt.c 			spin_lock(&rt_rq->rt_runtime_lock);
rt_rq             448 kernel/sched_rt.c 			if (rt_rq->rt_throttled)
rt_rq             449 kernel/sched_rt.c 				balance_runtime(rt_rq);
rt_rq             450 kernel/sched_rt.c 			runtime = rt_rq->rt_runtime;
rt_rq             451 kernel/sched_rt.c 			rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
rt_rq             452 kernel/sched_rt.c 			if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
rt_rq             453 kernel/sched_rt.c 				rt_rq->rt_throttled = 0;
rt_rq             456 kernel/sched_rt.c 			if (rt_rq->rt_time || rt_rq->rt_nr_running)
rt_rq             458 kernel/sched_rt.c 			spin_unlock(&rt_rq->rt_runtime_lock);
rt_rq             459 kernel/sched_rt.c 		} else if (rt_rq->rt_nr_running)
rt_rq             463 kernel/sched_rt.c 			sched_rt_rq_enqueue(rt_rq);
rt_rq             473 kernel/sched_rt.c 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
rt_rq             475 kernel/sched_rt.c 	if (rt_rq)
rt_rq             476 kernel/sched_rt.c 		return rt_rq->highest_prio;
rt_rq             484 kernel/sched_rt.c 	u64 runtime = sched_rt_runtime(rt_rq);
rt_rq             486 kernel/sched_rt.c 	if (rt_rq->rt_throttled)
rt_rq             487 kernel/sched_rt.c 		return rt_rq_throttled(rt_rq);
rt_rq             489 kernel/sched_rt.c 	if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
rt_rq             492 kernel/sched_rt.c 	balance_runtime(rt_rq);
rt_rq             493 kernel/sched_rt.c 	runtime = sched_rt_runtime(rt_rq);
rt_rq             497 kernel/sched_rt.c 	if (rt_rq->rt_time > runtime) {
rt_rq             498 kernel/sched_rt.c 		rt_rq->rt_throttled = 1;
rt_rq             499 kernel/sched_rt.c 		if (rt_rq_throttled(rt_rq)) {
rt_rq             500 kernel/sched_rt.c 			sched_rt_rq_dequeue(rt_rq);
rt_rq             516 kernel/sched_rt.c 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
rt_rq             536 kernel/sched_rt.c 		rt_rq = rt_rq_of_se(rt_se);
rt_rq             538 kernel/sched_rt.c 		spin_lock(&rt_rq->rt_runtime_lock);
rt_rq             539 kernel/sched_rt.c 		if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
rt_rq             540 kernel/sched_rt.c 			rt_rq->rt_time += delta_exec;
rt_rq             541 kernel/sched_rt.c 			if (sched_rt_runtime_exceeded(rt_rq))
rt_rq             544 kernel/sched_rt.c 		spin_unlock(&rt_rq->rt_runtime_lock);
rt_rq             552 kernel/sched_rt.c 	rt_rq->rt_nr_running++;
rt_rq             554 kernel/sched_rt.c 	if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
rt_rq             556 kernel/sched_rt.c 		struct rq *rq = rq_of_rt_rq(rt_rq);
rt_rq             559 kernel/sched_rt.c 		rt_rq->highest_prio = rt_se_prio(rt_se);
rt_rq             569 kernel/sched_rt.c 		struct rq *rq = rq_of_rt_rq(rt_rq);
rt_rq             574 kernel/sched_rt.c 	update_rt_migration(rq_of_rt_rq(rt_rq));
rt_rq             578 kernel/sched_rt.c 		rt_rq->rt_nr_boosted++;
rt_rq             580 kernel/sched_rt.c 	if (rt_rq->tg)
rt_rq             581 kernel/sched_rt.c 		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
rt_rq             591 kernel/sched_rt.c 	int highest_prio = rt_rq->highest_prio;
rt_rq             595 kernel/sched_rt.c 	WARN_ON(!rt_rq->rt_nr_running);
rt_rq             596 kernel/sched_rt.c 	rt_rq->rt_nr_running--;
rt_rq             598 kernel/sched_rt.c 	if (rt_rq->rt_nr_running) {
rt_rq             601 kernel/sched_rt.c 		WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
rt_rq             602 kernel/sched_rt.c 		if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
rt_rq             604 kernel/sched_rt.c 			array = &rt_rq->active;
rt_rq             605 kernel/sched_rt.c 			rt_rq->highest_prio =
rt_rq             609 kernel/sched_rt.c 		rt_rq->highest_prio = MAX_RT_PRIO;
rt_rq             613 kernel/sched_rt.c 		struct rq *rq = rq_of_rt_rq(rt_rq);
rt_rq             617 kernel/sched_rt.c 	if (rt_rq->highest_prio != highest_prio) {
rt_rq             618 kernel/sched_rt.c 		struct rq *rq = rq_of_rt_rq(rt_rq);
rt_rq             622 kernel/sched_rt.c 				   rt_rq->highest_prio);
rt_rq             625 kernel/sched_rt.c 	update_rt_migration(rq_of_rt_rq(rt_rq));
rt_rq             629 kernel/sched_rt.c 		rt_rq->rt_nr_boosted--;
rt_rq             631 kernel/sched_rt.c 	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
rt_rq             637 kernel/sched_rt.c 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
rt_rq             638 kernel/sched_rt.c 	struct rt_prio_array *array = &rt_rq->active;
rt_rq             639 kernel/sched_rt.c 	struct rt_rq *group_rq = group_rt_rq(rt_se);
rt_rq             654 kernel/sched_rt.c 	inc_rt_tasks(rt_se, rt_rq);
rt_rq             659 kernel/sched_rt.c 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
rt_rq             660 kernel/sched_rt.c 	struct rt_prio_array *array = &rt_rq->active;
rt_rq             666 kernel/sched_rt.c 	dec_rt_tasks(rt_se, rt_rq);
rt_rq             700 kernel/sched_rt.c 		struct rt_rq *rt_rq = group_rt_rq(rt_se);
rt_rq             702 kernel/sched_rt.c 		if (rt_rq && rt_rq->rt_nr_running)
rt_rq             740 kernel/sched_rt.c 		struct rt_prio_array *array = &rt_rq->active;
rt_rq             753 kernel/sched_rt.c 	struct rt_rq *rt_rq;
rt_rq             756 kernel/sched_rt.c 		rt_rq = rt_rq_of_se(rt_se);
rt_rq             757 kernel/sched_rt.c 		requeue_rt_entity(rt_rq, rt_se, head);
rt_rq             860 kernel/sched_rt.c 	struct rt_prio_array *array = &rt_rq->active;
rt_rq             878 kernel/sched_rt.c 	struct rt_rq *rt_rq;
rt_rq             880 kernel/sched_rt.c 	rt_rq = &rq->rt;
rt_rq             882 kernel/sched_rt.c 	if (unlikely(!rt_rq->rt_nr_running))
rt_rq             885 kernel/sched_rt.c 	if (rt_rq_throttled(rt_rq))
rt_rq             889 kernel/sched_rt.c 		rt_se = pick_next_rt_entity(rq, rt_rq);
rt_rq             891 kernel/sched_rt.c 		rt_rq = group_rt_rq(rt_se);
rt_rq             892 kernel/sched_rt.c 	} while (rt_rq);
rt_rq             930 kernel/sched_rt.c 	struct rt_rq *rt_rq;
rt_rq             933 kernel/sched_rt.c 	for_each_leaf_rt_rq(rt_rq, rq) {
rt_rq             934 kernel/sched_rt.c 		array = &rt_rq->active;
rt_rq            1538 kernel/sched_rt.c 	struct rt_rq *rt_rq;
rt_rq            1541 kernel/sched_rt.c 	for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
rt_rq            1542 kernel/sched_rt.c 		print_rt_rq(m, cpu, rt_rq);