rcu_ctrlblk        61 kernel/rcuclassic.c static struct rcu_ctrlblk rcu_ctrlblk = {
rcu_ctrlblk        65 kernel/rcuclassic.c 	.lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock),
rcu_ctrlblk        68 kernel/rcuclassic.c static struct rcu_ctrlblk rcu_bh_ctrlblk = {
rcu_ctrlblk       163 kernel/rcuclassic.c 		force_quiescent_state(rdp, &rcu_ctrlblk);
rcu_ctrlblk       266 kernel/rcuclassic.c 	__call_rcu(head, &rcu_ctrlblk, &__get_cpu_var(rcu_data));
rcu_ctrlblk       305 kernel/rcuclassic.c 	return rcu_ctrlblk.completed;
rcu_ctrlblk       508 kernel/rcuclassic.c 	__rcu_offline_cpu(this_rdp, &rcu_ctrlblk,
rcu_ctrlblk       594 kernel/rcuclassic.c 	__rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data));
rcu_ctrlblk       653 kernel/rcuclassic.c 	return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) ||
rcu_ctrlblk       738 kernel/rcuclassic.c 	rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);
rcu_ctrlblk       151 kernel/rcupreempt.c static struct rcu_ctrlblk rcu_ctrlblk = {
rcu_ctrlblk       152 kernel/rcupreempt.c 	.fliplock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.fliplock),
rcu_ctrlblk       155 kernel/rcupreempt.c 	.schedlock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.schedlock),
rcu_ctrlblk       157 kernel/rcupreempt.c 	.sched_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rcu_ctrlblk.sched_wq),
rcu_ctrlblk       231 kernel/rcupreempt.c 	return rcu_ctrlblk.completed;
rcu_ctrlblk       275 kernel/rcupreempt.c 		idx = ACCESS_ONCE(rcu_ctrlblk.completed) & 0x1;
rcu_ctrlblk       382 kernel/rcupreempt.c 	if (rdp->completed != rcu_ctrlblk.completed) {
rcu_ctrlblk       411 kernel/rcupreempt.c 		rdp->completed = rcu_ctrlblk.completed;
rcu_ctrlblk       739 kernel/rcupreempt.c 	rcu_ctrlblk.completed++;  /* stands in for rcu_try_flip_g2 */
rcu_ctrlblk       795 kernel/rcupreempt.c 	int lastidx = !(rcu_ctrlblk.completed & 0x1);
rcu_ctrlblk       862 kernel/rcupreempt.c 	if (unlikely(!spin_trylock_irqsave(&rcu_ctrlblk.fliplock, flags))) {
rcu_ctrlblk       872 kernel/rcupreempt.c 	switch (rcu_ctrlblk.rcu_try_flip_state) {
rcu_ctrlblk       875 kernel/rcupreempt.c 			rcu_ctrlblk.rcu_try_flip_state =
rcu_ctrlblk       880 kernel/rcupreempt.c 			rcu_ctrlblk.rcu_try_flip_state =
rcu_ctrlblk       885 kernel/rcupreempt.c 			rcu_ctrlblk.rcu_try_flip_state =
rcu_ctrlblk       890 kernel/rcupreempt.c 			rcu_ctrlblk.rcu_try_flip_state =
rcu_ctrlblk       893 kernel/rcupreempt.c 	spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
rcu_ctrlblk       936 kernel/rcupreempt.c 	if (rcu_ctrlblk.completed == rdp->completed)
rcu_ctrlblk       958 kernel/rcupreempt.c 	if (rcu_ctrlblk.completed == rdp->completed) {
rcu_ctrlblk       960 kernel/rcupreempt.c 		if (rcu_ctrlblk.completed == rdp->completed)
rcu_ctrlblk      1010 kernel/rcupreempt.c 	spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags);
rcu_ctrlblk      1027 kernel/rcupreempt.c 	spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
rcu_ctrlblk      1064 kernel/rcupreempt.c 	spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags);
rcu_ctrlblk      1066 kernel/rcupreempt.c 	spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
rcu_ctrlblk      1152 kernel/rcupreempt.c 		spin_lock_irqsave(&rcu_ctrlblk.schedlock, flags);
rcu_ctrlblk      1153 kernel/rcupreempt.c 		if (rcu_ctrlblk.sched_sleep != rcu_sched_sleeping)
rcu_ctrlblk      1155 kernel/rcupreempt.c 		rcu_ctrlblk.sched_sleep = rcu_sched_not_sleeping;
rcu_ctrlblk      1156 kernel/rcupreempt.c 		spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags);
rcu_ctrlblk      1158 kernel/rcupreempt.c 			wake_up_interruptible(&rcu_ctrlblk.sched_wq);
rcu_ctrlblk      1209 kernel/rcupreempt.c 			spin_lock_irqsave(&rcu_ctrlblk.schedlock, flags);
rcu_ctrlblk      1210 kernel/rcupreempt.c 			rcu_ctrlblk.sched_sleep = rcu_sched_sleep_prep;
rcu_ctrlblk      1211 kernel/rcupreempt.c 			spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags);
rcu_ctrlblk      1280 kernel/rcupreempt.c 		spin_lock_irqsave(&rcu_ctrlblk.schedlock, flags);
rcu_ctrlblk      1281 kernel/rcupreempt.c 		if (rcu_ctrlblk.sched_sleep != rcu_sched_sleep_prep) {
rcu_ctrlblk      1287 kernel/rcupreempt.c 			spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags);
rcu_ctrlblk      1294 kernel/rcupreempt.c 		rcu_ctrlblk.sched_sleep = rcu_sched_sleeping;
rcu_ctrlblk      1295 kernel/rcupreempt.c 		spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags);
rcu_ctrlblk      1297 kernel/rcupreempt.c 		__wait_event_interruptible(rcu_ctrlblk.sched_wq,
rcu_ctrlblk      1298 kernel/rcupreempt.c 			rcu_ctrlblk.sched_sleep != rcu_sched_sleeping,
rcu_ctrlblk      1355 kernel/rcupreempt.c 	if (rdp->completed != rcu_ctrlblk.completed)
rcu_ctrlblk      1470 kernel/rcupreempt.c 	return rcu_try_flip_state_names[rcu_ctrlblk.rcu_try_flip_state];