cpu               254 arch/x86/boot/boot.h extern struct cpu_features cpu;
cpu                31 arch/x86/boot/cpucheck.c struct cpu_features cpu;
cpu               115 arch/x86/boot/cpucheck.c 		set_bit(X86_FEATURE_FPU, cpu.flags);
cpu               129 arch/x86/boot/cpucheck.c 			      "=c" (cpu.flags[4]),
cpu               130 arch/x86/boot/cpucheck.c 			      "=d" (cpu.flags[0])
cpu               133 arch/x86/boot/cpucheck.c 			cpu.level = (tfms >> 8) & 15;
cpu               134 arch/x86/boot/cpucheck.c 			cpu.model = (tfms >> 4) & 15;
cpu               135 arch/x86/boot/cpucheck.c 			if (cpu.level >= 6)
cpu               136 arch/x86/boot/cpucheck.c 				cpu.model += ((tfms >> 16) & 0xf) << 4;
cpu               149 arch/x86/boot/cpucheck.c 			      "=c" (cpu.flags[6]),
cpu               150 arch/x86/boot/cpucheck.c 			      "=d" (cpu.flags[1])
cpu               164 arch/x86/boot/cpucheck.c 		err_flags[i] = req_flags[i] & ~cpu.flags[i];
cpu               184 arch/x86/boot/cpucheck.c 	memset(&cpu.flags, 0, sizeof cpu.flags);
cpu               185 arch/x86/boot/cpucheck.c 	cpu.level = 3;
cpu               188 arch/x86/boot/cpucheck.c 		cpu.level = 4;
cpu               193 arch/x86/boot/cpucheck.c 	if (test_bit(X86_FEATURE_LM, cpu.flags))
cpu               194 arch/x86/boot/cpucheck.c 		cpu.level = 64;
cpu               214 arch/x86/boot/cpucheck.c 		   is_centaur() && cpu.model >= 6) {
cpu               225 arch/x86/boot/cpucheck.c 		set_bit(X86_FEATURE_CX8, cpu.flags);
cpu               237 arch/x86/boot/cpucheck.c 		    : "+a" (level), "=d" (cpu.flags[0])
cpu               247 arch/x86/boot/cpucheck.c 		*cpu_level_ptr = cpu.level;
cpu               251 arch/x86/boot/cpucheck.c 	return (cpu.level < req_level || err) ? -1 : 0;
cpu                78 arch/x86/boot/main.c 	if (cpu.level < 6)
cpu               542 arch/x86/kernel/acpi/boot.c 	int cpu;
cpu               584 arch/x86/kernel/acpi/boot.c 	cpu = first_cpu(new_map);
cpu               586 arch/x86/kernel/acpi/boot.c 	*pcpu = cpu;
cpu               599 arch/x86/kernel/acpi/boot.c 	per_cpu(x86_cpu_to_apicid, cpu) = -1;
cpu               600 arch/x86/kernel/acpi/boot.c 	cpu_clear(cpu, cpu_present_map);
cpu                30 arch/x86/kernel/acpi/cstate.c 	struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu                73 arch/x86/kernel/acpi/cstate.c 	struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu                88 arch/x86/kernel/acpi/cstate.c 	percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
cpu                94 arch/x86/kernel/acpi/cstate.c 	retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
cpu               138 arch/x86/kernel/acpi/cstate.c 	unsigned int cpu = smp_processor_id();
cpu               141 arch/x86/kernel/acpi/cstate.c 	percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
cpu               665 arch/x86/kernel/apic_32.c 	int cpu = smp_processor_id();
cpu               666 arch/x86/kernel/apic_32.c 	struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
cpu               681 arch/x86/kernel/apic_32.c 		       "Spurious LAPIC timer interrupt on cpu %d\n", cpu);
cpu               693 arch/x86/kernel/apic_32.c 	per_cpu(irq_stat, cpu).apic_timer_irqs++;
cpu              1492 arch/x86/kernel/apic_32.c 	int cpu;
cpu              1514 arch/x86/kernel/apic_32.c 	cpu = first_cpu(tmp_map);
cpu              1523 arch/x86/kernel/apic_32.c 		cpu = 0;
cpu              1556 arch/x86/kernel/apic_32.c 		cpu_to_apicid[cpu] = apicid;
cpu              1557 arch/x86/kernel/apic_32.c 		bios_cpu_apicid[cpu] = apicid;
cpu              1559 arch/x86/kernel/apic_32.c 		per_cpu(x86_cpu_to_apicid, cpu) = apicid;
cpu              1560 arch/x86/kernel/apic_32.c 		per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
cpu              1564 arch/x86/kernel/apic_32.c 	cpu_set(cpu, cpu_possible_map);
cpu              1565 arch/x86/kernel/apic_32.c 	cpu_set(cpu, cpu_present_map);
cpu               548 arch/x86/kernel/apic_64.c 	int cpu = smp_processor_id();
cpu               549 arch/x86/kernel/apic_64.c 	struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
cpu               564 arch/x86/kernel/apic_64.c 		       "Spurious LAPIC timer interrupt on cpu %d\n", cpu);
cpu               576 arch/x86/kernel/apic_64.c 	per_cpu(irq_stat, cpu).apic_timer_irqs++;
cpu              1430 arch/x86/kernel/apic_64.c 	int cpu;
cpu              1452 arch/x86/kernel/apic_64.c 	cpu = first_cpu(tmp_map);
cpu              1461 arch/x86/kernel/apic_64.c 		cpu = 0;
cpu              1494 arch/x86/kernel/apic_64.c 		cpu_to_apicid[cpu] = apicid;
cpu              1495 arch/x86/kernel/apic_64.c 		bios_cpu_apicid[cpu] = apicid;
cpu              1497 arch/x86/kernel/apic_64.c 		per_cpu(x86_cpu_to_apicid, cpu) = apicid;
cpu              1498 arch/x86/kernel/apic_64.c 		per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
cpu              1502 arch/x86/kernel/apic_64.c 	cpu_set(cpu, cpu_possible_map);
cpu              1503 arch/x86/kernel/apic_64.c 	cpu_set(cpu, cpu_present_map);
cpu               601 arch/x86/kernel/apm_32.c 	int			cpu;
cpu               607 arch/x86/kernel/apm_32.c 	cpu = get_cpu();
cpu               608 arch/x86/kernel/apm_32.c 	gdt = get_cpu_gdt_table(cpu);
cpu               644 arch/x86/kernel/apm_32.c 	int			cpu;
cpu               650 arch/x86/kernel/apm_32.c 	cpu = get_cpu();
cpu               651 arch/x86/kernel/apm_32.c 	gdt = get_cpu_gdt_table(cpu);
cpu                61 arch/x86/kernel/asm-offsets_32.c 	OFFSET(TI_cpu, thread_info, cpu);
cpu               223 arch/x86/kernel/cpu/amd.c 	int cpu = smp_processor_id();
cpu               250 arch/x86/kernel/cpu/amd.c 	numa_set_node(cpu, node);
cpu               252 arch/x86/kernel/cpu/amd.c 	printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
cpu               864 arch/x86/kernel/cpu/common.c 	struct x8664_pda *pda = cpu_pda(cpu);
cpu               874 arch/x86/kernel/cpu/common.c 	pda->cpunumber = cpu;
cpu               881 arch/x86/kernel/cpu/common.c 	if (cpu == 0) {
cpu               892 arch/x86/kernel/cpu/common.c 				      cpu);
cpu               896 arch/x86/kernel/cpu/common.c 		if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE)
cpu               897 arch/x86/kernel/cpu/common.c 			pda->nodenumber = cpu_to_node(cpu);
cpu               956 arch/x86/kernel/cpu/common.c 	int cpu = stack_smp_processor_id();
cpu               957 arch/x86/kernel/cpu/common.c 	struct tss_struct *t = &per_cpu(init_tss, cpu);
cpu               958 arch/x86/kernel/cpu/common.c 	struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
cpu               965 arch/x86/kernel/cpu/common.c 	if (cpu != 0)
cpu               966 arch/x86/kernel/cpu/common.c 		pda_init(cpu);
cpu               972 arch/x86/kernel/cpu/common.c 	if (cpu_test_and_set(cpu, cpu_initialized))
cpu               973 arch/x86/kernel/cpu/common.c 		panic("CPU#%d already initialized!\n", cpu);
cpu               975 arch/x86/kernel/cpu/common.c 	printk(KERN_INFO "Initializing CPU#%d\n", cpu);
cpu               995 arch/x86/kernel/cpu/common.c 	if (cpu != 0 && x2apic)
cpu              1007 arch/x86/kernel/cpu/common.c 			if (cpu) {
cpu              1011 arch/x86/kernel/cpu/common.c 					      "stack %ld %d\n", v, cpu);
cpu              1034 arch/x86/kernel/cpu/common.c 	set_tss_desc(cpu, t);
cpu              1076 arch/x86/kernel/cpu/common.c 	int cpu = smp_processor_id();
cpu              1078 arch/x86/kernel/cpu/common.c 	struct tss_struct *t = &per_cpu(init_tss, cpu);
cpu              1081 arch/x86/kernel/cpu/common.c 	if (cpu_test_and_set(cpu, cpu_initialized)) {
cpu              1082 arch/x86/kernel/cpu/common.c 		printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
cpu              1086 arch/x86/kernel/cpu/common.c 	printk(KERN_INFO "Initializing CPU#%d\n", cpu);
cpu              1104 arch/x86/kernel/cpu/common.c 	set_tss_desc(cpu, t);
cpu              1110 arch/x86/kernel/cpu/common.c 	__set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
cpu                81 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 	struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
cpu                83 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 	if (cpu->x86_vendor != X86_VENDOR_INTEL ||
cpu                84 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 	    !cpu_has(cpu, X86_FEATURE_EST))
cpu               275 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 	set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
cpu               276 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 	if (get_cpu() != cpu) {
cpu               330 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 	retval = per_cpu(drv_data, policy->cpu)->max_freq * perf_percent / 100;
cpu               335 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 	dprintk("cpu %d: performance percent %d\n", cpu, perf_percent);
cpu               341 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 	struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu);
cpu               345 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 	dprintk("get_cur_freq_on_cpu (%d)\n", cpu);
cpu               353 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 	freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data);
cpu               385 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 	struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
cpu               395 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 	dprintk("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
cpu               451 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 		cpu_set(policy->cpu, cmd.mask);
cpu               456 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 		freqs.cpu = i;
cpu               465 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 				policy->cpu);
cpu               471 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 		freqs.cpu = i;
cpu               481 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 	struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
cpu               572 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 	unsigned int cpu = policy->cpu;
cpu               575 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 	struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
cpu               584 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 	data->acpi_data = percpu_ptr(acpi_perf_data, cpu);
cpu               585 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 	per_cpu(drv_data, cpu) = data;
cpu               590 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 	result = acpi_processor_register_performance(data->acpi_data, cpu);
cpu               611 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 		policy->cpus = per_cpu(cpu_core_map, cpu);
cpu               634 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 		if (!check_est_cpu(cpu)) {
cpu               685 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 		policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
cpu               689 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 		policy->cur = get_cur_freq_on_cpu(cpu);
cpu               706 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 	dprintk("CPU%u - ACPI performance management activated.\n", cpu);
cpu               714 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 	cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
cpu               727 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 	acpi_processor_unregister_performance(perf, cpu);
cpu               730 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 	per_cpu(drv_data, cpu) = NULL;
cpu               737 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 	struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
cpu               742 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 		cpufreq_frequency_table_put_attr(policy->cpu);
cpu               743 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 		per_cpu(drv_data, policy->cpu) = NULL;
cpu               745 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 						      policy->cpu);
cpu               754 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 	struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
cpu               238 arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c 	if (cpu)
cpu               263 arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c 	freqs.old = nforce2_get(policy->cpu);
cpu               265 arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c 	freqs.cpu = 0;		/* Only one CPU on nForce2 platforms */
cpu               318 arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c 	if (policy->cpu != 0)
cpu               365 arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c 	policy->cur = nforce2_get(policy->cpu);
cpu                41 arch/x86/kernel/cpu/cpufreq/e_powersaver.c 	if (cpu)
cpu                43 arch/x86/kernel/cpu/cpufreq/e_powersaver.c 	centaur = eps_cpu[cpu];
cpu                61 arch/x86/kernel/cpu/cpufreq/e_powersaver.c 	freqs.old = eps_get(cpu);
cpu                63 arch/x86/kernel/cpu/cpufreq/e_powersaver.c 	freqs.cpu = cpu;
cpu               121 arch/x86/kernel/cpu/cpufreq/e_powersaver.c 	unsigned int cpu = policy->cpu;
cpu               125 arch/x86/kernel/cpu/cpufreq/e_powersaver.c 	if (unlikely(eps_cpu[cpu] == NULL))
cpu               127 arch/x86/kernel/cpu/cpufreq/e_powersaver.c 	centaur = eps_cpu[cpu];
cpu               130 arch/x86/kernel/cpu/cpufreq/e_powersaver.c 			&eps_cpu[cpu]->freq_table[0],
cpu               139 arch/x86/kernel/cpu/cpufreq/e_powersaver.c 	ret = eps_set_state(centaur, cpu, dest_state);
cpu               148 arch/x86/kernel/cpu/cpufreq/e_powersaver.c 			&eps_cpu[policy->cpu]->freq_table[0]);
cpu               168 arch/x86/kernel/cpu/cpufreq/e_powersaver.c 	if (policy->cpu != 0)
cpu               296 arch/x86/kernel/cpu/cpufreq/e_powersaver.c 	cpufreq_frequency_table_get_attr(&centaur->freq_table[0], policy->cpu);
cpu               302 arch/x86/kernel/cpu/cpufreq/e_powersaver.c 	unsigned int cpu = policy->cpu;
cpu               306 arch/x86/kernel/cpu/cpufreq/e_powersaver.c 	if (eps_cpu[cpu] == NULL)
cpu               308 arch/x86/kernel/cpu/cpufreq/e_powersaver.c 	centaur = eps_cpu[cpu];
cpu               313 arch/x86/kernel/cpu/cpufreq/e_powersaver.c 	eps_set_state(centaur, cpu, hi & 0xffff);
cpu               315 arch/x86/kernel/cpu/cpufreq/e_powersaver.c 	cpufreq_frequency_table_put_attr(policy->cpu);
cpu               316 arch/x86/kernel/cpu/cpufreq/e_powersaver.c 	kfree(eps_cpu[cpu]);
cpu               317 arch/x86/kernel/cpu/cpufreq/e_powersaver.c 	eps_cpu[cpu] = NULL;
cpu               126 arch/x86/kernel/cpu/cpufreq/elanfreq.c 	freqs.cpu = 0; /* elanfreq.c is UP only driver */
cpu               229 arch/x86/kernel/cpu/cpufreq/elanfreq.c 	cpufreq_frequency_table_get_attr(elanfreq_table, policy->cpu);
cpu               236 arch/x86/kernel/cpu/cpufreq/elanfreq.c 	cpufreq_frequency_table_put_attr(policy->cpu);
cpu               259 arch/x86/kernel/cpu/cpufreq/gx-suspmod.c 	freqs.cpu = 0;
cpu               336 arch/x86/kernel/cpu/cpufreq/gx-suspmod.c 	policy->cpu = 0;
cpu               376 arch/x86/kernel/cpu/cpufreq/gx-suspmod.c 	policy->cpu = 0;
cpu               397 arch/x86/kernel/cpu/cpufreq/gx-suspmod.c 	if (!policy || policy->cpu != 0)
cpu               415 arch/x86/kernel/cpu/cpufreq/gx-suspmod.c 	policy->cpu = 0;
cpu               273 arch/x86/kernel/cpu/cpufreq/longhaul.c 	freqs.cpu = 0; /* longhaul.c is UP only driver */
cpu               683 arch/x86/kernel/cpu/cpufreq/longhaul.c 	if (cpu)
cpu               932 arch/x86/kernel/cpu/cpufreq/longhaul.c 	cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu);
cpu               939 arch/x86/kernel/cpu/cpufreq/longhaul.c 	cpufreq_frequency_table_put_attr(policy->cpu);
cpu                63 arch/x86/kernel/cpu/cpufreq/longrun.c 	policy->cpu = 0;
cpu               133 arch/x86/kernel/cpu/cpufreq/longrun.c 	policy->cpu = 0;
cpu               149 arch/x86/kernel/cpu/cpufreq/longrun.c 	if (cpu)
cpu               265 arch/x86/kernel/cpu/cpufreq/longrun.c 	if (policy->cpu != 0)
cpu                61 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c 	if (!cpu_online(cpu) || (newstate > DC_DISABLE) || (newstate == DC_RESV))
cpu                64 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c 	rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h);
cpu                67 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c 		dprintk("CPU#%d currently thermal throttled\n", cpu);
cpu                69 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c 	if (has_N44_O17_errata[cpu] && (newstate == DC_25PT || newstate == DC_DFLT))
cpu                72 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c 	rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
cpu                74 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c 		dprintk("CPU#%d disabling modulation\n", cpu);
cpu                75 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c 		wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h);
cpu                78 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c 			cpu, ((125 * newstate) / 10));
cpu                86 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c 		wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l, h);
cpu               118 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c 	freqs.old = cpufreq_p4_get(policy->cpu);
cpu               126 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c 		freqs.cpu = i;
cpu               138 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c 		freqs.cpu = i;
cpu               198 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c 	struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
cpu               203 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c 	policy->cpus = per_cpu(cpu_sibling_map, policy->cpu);
cpu               213 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c 		has_N44_O17_errata[policy->cpu] = 1;
cpu               224 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c 		if ((i<2) && (has_N44_O17_errata[policy->cpu]))
cpu               229 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c 	cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu);
cpu               241 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c 	cpufreq_frequency_table_put_attr(policy->cpu);
cpu               249 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c 	rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
cpu                82 arch/x86/kernel/cpu/cpufreq/powernow-k6.c 	freqs.cpu = 0; /* powernow-k6.c is UP only driver */
cpu               146 arch/x86/kernel/cpu/cpufreq/powernow-k6.c 	if (policy->cpu != 0)
cpu               169 arch/x86/kernel/cpu/cpufreq/powernow-k6.c 	cpufreq_frequency_table_get_attr(clock_ratio, policy->cpu);
cpu               182 arch/x86/kernel/cpu/cpufreq/powernow-k6.c 	cpufreq_frequency_table_put_attr(policy->cpu);
cpu               258 arch/x86/kernel/cpu/cpufreq/powernow-k7.c 	freqs.cpu = 0;
cpu               559 arch/x86/kernel/cpu/cpufreq/powernow-k7.c 	if (cpu)
cpu               598 arch/x86/kernel/cpu/cpufreq/powernow-k7.c 	if (policy->cpu != 0)
cpu               644 arch/x86/kernel/cpu/cpufreq/powernow-k7.c 	cpufreq_frequency_table_get_attr(powernow_table, policy->cpu);
cpu               650 arch/x86/kernel/cpu/cpufreq/powernow-k7.c 	cpufreq_frequency_table_put_attr(policy->cpu);
cpu               483 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 	set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
cpu               485 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 	if (smp_processor_id() != cpu) {
cpu               486 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 		printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu);
cpu               642 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 	if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu)
cpu               756 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 	if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) {
cpu               796 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 	if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu)
cpu               809 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 	acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
cpu               916 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 		acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
cpu               967 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 		freqs.cpu = i;
cpu               975 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 		freqs.cpu = i;
cpu               998 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 		freqs.cpu = i;
cpu              1006 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 		freqs.cpu = i;
cpu              1016 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 	struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
cpu              1030 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 	set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
cpu              1032 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 	if (smp_processor_id() != pol->cpu) {
cpu              1033 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 		printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
cpu              1043 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 		pol->cpu, targfreq, pol->min, pol->max, relation);
cpu              1092 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 	struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
cpu              1107 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 	if (!cpu_online(pol->cpu))
cpu              1110 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 	if (!check_supported_cpu(pol->cpu))
cpu              1119 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 	data->cpu = pol->cpu;
cpu              1141 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 		if (pol->cpu != 0) {
cpu              1156 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 	set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
cpu              1158 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 	if (smp_processor_id() != pol->cpu) {
cpu              1159 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 		printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
cpu              1178 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 		pol->cpus = cpumask_of_cpu(pol->cpu);
cpu              1180 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 		pol->cpus = per_cpu(cpu_core_map, pol->cpu);
cpu              1203 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 	cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
cpu              1211 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 	per_cpu(powernow_data, pol->cpu) = data;
cpu              1225 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 	struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
cpu              1232 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 	cpufreq_frequency_table_put_attr(pol->cpu);
cpu              1247 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 	first = first_cpu(per_cpu(cpu_core_map, cpu));
cpu              1253 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 	set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
cpu              1254 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 	if (smp_processor_id() != cpu) {
cpu              1256 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 			"limiting to CPU %d failed in powernowk8_get\n", cpu);
cpu                 9 arch/x86/kernel/cpu/cpufreq/powernow-k8.h 	unsigned int cpu;
cpu                62 arch/x86/kernel/cpu/cpufreq/sc520_freq.c 	freqs.cpu = 0; /* AMD Elan is UP */
cpu               121 arch/x86/kernel/cpu/cpufreq/sc520_freq.c 	cpufreq_frequency_table_get_attr(sc520_freq_table, policy->cpu);
cpu               129 arch/x86/kernel/cpu/cpufreq/sc520_freq.c 	cpufreq_frequency_table_put_attr(policy->cpu);
cpu               235 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 	struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
cpu               239 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 		if (centrino_verify_cpu_id(cpu, model->cpu_id) &&
cpu               241 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 		     strcmp(cpu->x86_model_id, model->model_name) == 0))
cpu               248 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 		       cpu->x86_model_id);
cpu               255 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 		       cpu->x86_model_id);
cpu               260 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 	per_cpu(centrino_model, policy->cpu) = model;
cpu               295 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 	if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) ||
cpu               296 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 	    (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) ||
cpu               297 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 	    (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) {
cpu               302 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 	if ((!per_cpu(centrino_model, cpu)) ||
cpu               303 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 	    (!per_cpu(centrino_model, cpu)->op_points))
cpu               308 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 		per_cpu(centrino_model, cpu)->op_points[i].frequency
cpu               311 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 		if (msr == per_cpu(centrino_model, cpu)->op_points[i].index)
cpu               312 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 			return per_cpu(centrino_model, cpu)->
cpu               316 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 		return per_cpu(centrino_model, cpu)->op_points[i-1].frequency;
cpu               329 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 	set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
cpu               330 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 	if (smp_processor_id() != cpu)
cpu               334 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 	clock_freq = extract_clock(l, cpu, 0);
cpu               344 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 		clock_freq = extract_clock(l, cpu, 1);
cpu               354 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 	struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
cpu               361 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 	if (cpu->x86_vendor != X86_VENDOR_INTEL ||
cpu               362 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 	    !cpu_has(cpu, X86_FEATURE_EST))
cpu               365 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 	if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
cpu               368 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 	if (policy->cpu != 0)
cpu               372 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 		if (centrino_verify_cpu_id(cpu, &cpu_ids[i]))
cpu               376 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 		per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i];
cpu               378 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 	if (!per_cpu(centrino_cpu, policy->cpu)) {
cpu               407 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 	freq = get_cur_freq(policy->cpu);
cpu               415 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 		per_cpu(centrino_model, policy->cpu)->op_points);
cpu               420 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 		per_cpu(centrino_model, policy->cpu)->op_points, policy->cpu);
cpu               427 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 	unsigned int cpu = policy->cpu;
cpu               429 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 	if (!per_cpu(centrino_model, cpu))
cpu               432 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 	cpufreq_frequency_table_put_attr(cpu);
cpu               434 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 	per_cpu(centrino_model, cpu) = NULL;
cpu               449 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 			per_cpu(centrino_model, policy->cpu)->op_points);
cpu               473 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 	unsigned int	msr, oldmsr = 0, h = 0, cpu = policy->cpu;
cpu               486 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 	if (unlikely(per_cpu(centrino_model, cpu) == NULL)) {
cpu               492 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 			per_cpu(centrino_model, cpu)->op_points,
cpu               534 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 		msr = per_cpu(centrino_model, cpu)->op_points[newstate].index;
cpu               545 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 			freqs.old = extract_clock(oldmsr, cpu, 0);
cpu               546 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 			freqs.new = extract_clock(msr, cpu, 0);
cpu               552 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 				freqs.cpu = k;
cpu               575 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 		freqs.cpu = k;
cpu               598 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 			freqs.cpu = j;
cpu               649 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 	struct cpuinfo_x86 *cpu = &cpu_data(0);
cpu               651 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 	if (!cpu_has(cpu, X86_FEATURE_EST))
cpu               247 arch/x86/kernel/cpu/cpufreq/speedstep-ich.c 	return _speedstep_get(&cpumask_of_cpu(cpu));
cpu               272 arch/x86/kernel/cpu/cpufreq/speedstep-ich.c 	freqs.cpu = policy->cpu;
cpu               283 arch/x86/kernel/cpu/cpufreq/speedstep-ich.c 		freqs.cpu = i;
cpu               296 arch/x86/kernel/cpu/cpufreq/speedstep-ich.c 		freqs.cpu = i;
cpu               325 arch/x86/kernel/cpu/cpufreq/speedstep-ich.c 	policy->cpus = per_cpu(cpu_sibling_map, policy->cpu);
cpu               357 arch/x86/kernel/cpu/cpufreq/speedstep-ich.c         cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
cpu               365 arch/x86/kernel/cpu/cpufreq/speedstep-ich.c 	cpufreq_frequency_table_put_attr(policy->cpu);
cpu               233 arch/x86/kernel/cpu/cpufreq/speedstep-smi.c 	freqs.cpu = 0; /* speedstep.c is UP only driver */
cpu               265 arch/x86/kernel/cpu/cpufreq/speedstep-smi.c 	if (policy->cpu != 0)
cpu               309 arch/x86/kernel/cpu/cpufreq/speedstep-smi.c 	cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
cpu               316 arch/x86/kernel/cpu/cpufreq/speedstep-smi.c 	cpufreq_frequency_table_put_attr(policy->cpu);
cpu               322 arch/x86/kernel/cpu/cpufreq/speedstep-smi.c 	if (cpu)
cpu               167 arch/x86/kernel/cpu/intel.c 	int cpu = smp_processor_id();
cpu               175 arch/x86/kernel/cpu/intel.c 	numa_set_node(cpu, node);
cpu               177 arch/x86/kernel/cpu/intel.c 	printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
cpu               324 arch/x86/kernel/cpu/intel_cacheinfo.c 	unsigned int cpu = c->cpu_index;
cpu               446 arch/x86/kernel/cpu/intel_cacheinfo.c 		per_cpu(cpu_llc_id, cpu) = l2_id;
cpu               453 arch/x86/kernel/cpu/intel_cacheinfo.c 		per_cpu(cpu_llc_id, cpu) = l3_id;
cpu               488 arch/x86/kernel/cpu/intel_cacheinfo.c 	struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu               490 arch/x86/kernel/cpu/intel_cacheinfo.c 	this_leaf = CPUID4_INFO_IDX(cpu, index);
cpu               494 arch/x86/kernel/cpu/intel_cacheinfo.c 		cpu_set(cpu, this_leaf->shared_cpu_map);
cpu               502 arch/x86/kernel/cpu/intel_cacheinfo.c 				if (i != cpu && per_cpu(cpuid4_info, i))  {
cpu               504 arch/x86/kernel/cpu/intel_cacheinfo.c 					cpu_set(cpu, sibling_leaf->shared_cpu_map);
cpu               515 arch/x86/kernel/cpu/intel_cacheinfo.c 	this_leaf = CPUID4_INFO_IDX(cpu, index);
cpu               518 arch/x86/kernel/cpu/intel_cacheinfo.c 		cpu_clear(cpu, sibling_leaf->shared_cpu_map);
cpu               531 arch/x86/kernel/cpu/intel_cacheinfo.c 		cache_remove_shared_cpu_map(cpu, i);
cpu               533 arch/x86/kernel/cpu/intel_cacheinfo.c 	kfree(per_cpu(cpuid4_info, cpu));
cpu               534 arch/x86/kernel/cpu/intel_cacheinfo.c 	per_cpu(cpuid4_info, cpu) = NULL;
cpu               547 arch/x86/kernel/cpu/intel_cacheinfo.c 	per_cpu(cpuid4_info, cpu) = kzalloc(
cpu               549 arch/x86/kernel/cpu/intel_cacheinfo.c 	if (per_cpu(cpuid4_info, cpu) == NULL)
cpu               553 arch/x86/kernel/cpu/intel_cacheinfo.c 	retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
cpu               559 arch/x86/kernel/cpu/intel_cacheinfo.c 		this_leaf = CPUID4_INFO_IDX(cpu, j);
cpu               565 arch/x86/kernel/cpu/intel_cacheinfo.c 				cache_remove_shared_cpu_map(cpu, i);
cpu               568 arch/x86/kernel/cpu/intel_cacheinfo.c 		cache_shared_cpu_map_setup(cpu, j);
cpu               574 arch/x86/kernel/cpu/intel_cacheinfo.c 		kfree(per_cpu(cpuid4_info, cpu));
cpu               575 arch/x86/kernel/cpu/intel_cacheinfo.c 		per_cpu(cpuid4_info, cpu) = NULL;
cpu               593 arch/x86/kernel/cpu/intel_cacheinfo.c 	unsigned int cpu;
cpu               800 arch/x86/kernel/cpu/intel_cacheinfo.c 		fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
cpu               814 arch/x86/kernel/cpu/intel_cacheinfo.c 		fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
cpu               836 arch/x86/kernel/cpu/intel_cacheinfo.c 	kfree(per_cpu(cache_kobject, cpu));
cpu               837 arch/x86/kernel/cpu/intel_cacheinfo.c 	kfree(per_cpu(index_kobject, cpu));
cpu               838 arch/x86/kernel/cpu/intel_cacheinfo.c 	per_cpu(cache_kobject, cpu) = NULL;
cpu               839 arch/x86/kernel/cpu/intel_cacheinfo.c 	per_cpu(index_kobject, cpu) = NULL;
cpu               840 arch/x86/kernel/cpu/intel_cacheinfo.c 	free_cache_attributes(cpu);
cpu               850 arch/x86/kernel/cpu/intel_cacheinfo.c 	err = detect_cache_attributes(cpu);
cpu               855 arch/x86/kernel/cpu/intel_cacheinfo.c 	per_cpu(cache_kobject, cpu) =
cpu               857 arch/x86/kernel/cpu/intel_cacheinfo.c 	if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
cpu               860 arch/x86/kernel/cpu/intel_cacheinfo.c 	per_cpu(index_kobject, cpu) = kzalloc(
cpu               862 arch/x86/kernel/cpu/intel_cacheinfo.c 	if (unlikely(per_cpu(index_kobject, cpu) == NULL))
cpu               868 arch/x86/kernel/cpu/intel_cacheinfo.c 	cpuid4_cache_sysfs_exit(cpu);
cpu               877 arch/x86/kernel/cpu/intel_cacheinfo.c 	unsigned int cpu = sys_dev->id;
cpu               882 arch/x86/kernel/cpu/intel_cacheinfo.c 	retval = cpuid4_cache_sysfs_init(cpu);
cpu               886 arch/x86/kernel/cpu/intel_cacheinfo.c 	retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
cpu               890 arch/x86/kernel/cpu/intel_cacheinfo.c 		cpuid4_cache_sysfs_exit(cpu);
cpu               895 arch/x86/kernel/cpu/intel_cacheinfo.c 		this_object = INDEX_KOBJECT_PTR(cpu,i);
cpu               896 arch/x86/kernel/cpu/intel_cacheinfo.c 		this_object->cpu = cpu;
cpu               900 arch/x86/kernel/cpu/intel_cacheinfo.c 					      per_cpu(cache_kobject, cpu),
cpu               904 arch/x86/kernel/cpu/intel_cacheinfo.c 				kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
cpu               906 arch/x86/kernel/cpu/intel_cacheinfo.c 			kobject_put(per_cpu(cache_kobject, cpu));
cpu               907 arch/x86/kernel/cpu/intel_cacheinfo.c 			cpuid4_cache_sysfs_exit(cpu);
cpu               912 arch/x86/kernel/cpu/intel_cacheinfo.c 	cpu_set(cpu, cache_dev_map);
cpu               914 arch/x86/kernel/cpu/intel_cacheinfo.c 	kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
cpu               920 arch/x86/kernel/cpu/intel_cacheinfo.c 	unsigned int cpu = sys_dev->id;
cpu               923 arch/x86/kernel/cpu/intel_cacheinfo.c 	if (per_cpu(cpuid4_info, cpu) == NULL)
cpu               925 arch/x86/kernel/cpu/intel_cacheinfo.c 	if (!cpu_isset(cpu, cache_dev_map))
cpu               927 arch/x86/kernel/cpu/intel_cacheinfo.c 	cpu_clear(cpu, cache_dev_map);
cpu               930 arch/x86/kernel/cpu/intel_cacheinfo.c 		kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
cpu               931 arch/x86/kernel/cpu/intel_cacheinfo.c 	kobject_put(per_cpu(cache_kobject, cpu));
cpu               932 arch/x86/kernel/cpu/intel_cacheinfo.c 	cpuid4_cache_sysfs_exit(cpu);
cpu               938 arch/x86/kernel/cpu/intel_cacheinfo.c 	unsigned int cpu = (unsigned long)hcpu;
cpu               941 arch/x86/kernel/cpu/intel_cacheinfo.c 	sys_dev = get_cpu_sysdev(cpu);
cpu               113 arch/x86/kernel/cpu/mcheck/mce_64.c 	       m->cpu, m->mcgstatus, m->bank, m->status);
cpu               203 arch/x86/kernel/cpu/mcheck/mce_64.c 	m.cpu = smp_processor_id();
cpu               340 arch/x86/kernel/cpu/mcheck/mce_64.c 	m.cpu = cpu;
cpu               627 arch/x86/kernel/cpu/mcheck/mce_64.c 		    mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) {
cpu               762 arch/x86/kernel/cpu/mcheck/mce_64.c void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu) __cpuinitdata;
cpu               836 arch/x86/kernel/cpu/mcheck/mce_64.c 	memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
cpu               837 arch/x86/kernel/cpu/mcheck/mce_64.c 	per_cpu(device_mce,cpu).id = cpu;
cpu               838 arch/x86/kernel/cpu/mcheck/mce_64.c 	per_cpu(device_mce,cpu).cls = &mce_sysclass;
cpu               840 arch/x86/kernel/cpu/mcheck/mce_64.c 	err = sysdev_register(&per_cpu(device_mce,cpu));
cpu               845 arch/x86/kernel/cpu/mcheck/mce_64.c 		err = sysdev_create_file(&per_cpu(device_mce,cpu),
cpu               850 arch/x86/kernel/cpu/mcheck/mce_64.c 	cpu_set(cpu, mce_device_initialized);
cpu               855 arch/x86/kernel/cpu/mcheck/mce_64.c 		sysdev_remove_file(&per_cpu(device_mce,cpu),
cpu               858 arch/x86/kernel/cpu/mcheck/mce_64.c 	sysdev_unregister(&per_cpu(device_mce,cpu));
cpu               867 arch/x86/kernel/cpu/mcheck/mce_64.c 	if (!cpu_isset(cpu, mce_device_initialized))
cpu               871 arch/x86/kernel/cpu/mcheck/mce_64.c 		sysdev_remove_file(&per_cpu(device_mce,cpu),
cpu               873 arch/x86/kernel/cpu/mcheck/mce_64.c 	sysdev_unregister(&per_cpu(device_mce,cpu));
cpu               874 arch/x86/kernel/cpu/mcheck/mce_64.c 	cpu_clear(cpu, mce_device_initialized);
cpu               881 arch/x86/kernel/cpu/mcheck/mce_64.c 	unsigned int cpu = (unsigned long)hcpu;
cpu               886 arch/x86/kernel/cpu/mcheck/mce_64.c 		mce_create_device(cpu);
cpu               888 arch/x86/kernel/cpu/mcheck/mce_64.c 			threshold_cpu_callback(action, cpu);
cpu               893 arch/x86/kernel/cpu/mcheck/mce_64.c 			threshold_cpu_callback(action, cpu);
cpu               894 arch/x86/kernel/cpu/mcheck/mce_64.c 		mce_remove_device(cpu);
cpu                53 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 	unsigned int cpu;
cpu               120 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 	unsigned int cpu = smp_processor_id();
cpu               152 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 				per_cpu(bank_map, cpu) |= (1 << bank);
cpu               191 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 	m.cpu = smp_processor_id();
cpu               195 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 		if (!(per_cpu(bank_map, m.cpu) & (1 << bank)))
cpu               259 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 	cpu_set(cpu, *newmask);
cpu               286 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 	affinity_set(b->cpu, &oldmask, &newmask);
cpu               309 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 	affinity_set(b->cpu, &oldmask, &newmask);
cpu               320 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 	affinity_set(b->cpu, &oldmask, &newmask);
cpu               331 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 	affinity_set(b->cpu, &oldmask, &newmask);
cpu               422 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 	b->cpu = cpu;
cpu               429 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 	if (per_cpu(threshold_banks, cpu)[bank]->blocks)
cpu               431 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 			 &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
cpu               433 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 		per_cpu(threshold_banks, cpu)[bank]->blocks = b;
cpu               436 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 				   per_cpu(threshold_banks, cpu)[bank]->kobj,
cpu               449 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 	err = allocate_threshold_blocks(cpu, bank, ++block, address);
cpu               477 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 	if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) {	/* symlink */
cpu               478 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 		i = first_cpu(per_cpu(cpu_core_map, cpu));
cpu               485 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 		if (per_cpu(threshold_banks, cpu)[bank])
cpu               493 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 		err = sysfs_create_link(&per_cpu(device_mce, cpu).kobj,
cpu               498 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 		b->cpus = per_cpu(cpu_core_map, cpu);
cpu               499 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 		per_cpu(threshold_banks, cpu)[bank] = b;
cpu               510 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 	b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj);
cpu               517 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 	b->cpus = per_cpu(cpu_core_map, cpu);
cpu               520 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 	per_cpu(threshold_banks, cpu)[bank] = b;
cpu               522 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 	affinity_set(cpu, &oldmask, &newmask);
cpu               523 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 	err = allocate_threshold_blocks(cpu, bank, 0,
cpu               531 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 		if (i == cpu)
cpu               545 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 	per_cpu(threshold_banks, cpu)[bank] = NULL;
cpu               558 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 		if (!(per_cpu(bank_map, cpu) & (1 << bank)))
cpu               560 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 		err = threshold_create_bank(cpu, bank);
cpu               579 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 	struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
cpu               590 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 	kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
cpu               591 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 	per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
cpu               600 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 	b = per_cpu(threshold_banks, cpu)[bank];
cpu               612 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 	if (shared_bank[bank] && b->blocks->cpu != cpu) {
cpu               613 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 		sysfs_remove_link(&per_cpu(device_mce, cpu).kobj, name);
cpu               614 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 		per_cpu(threshold_banks, cpu)[bank] = NULL;
cpu               621 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 		if (i == cpu)
cpu               628 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 	deallocate_threshold_block(cpu, bank);
cpu               634 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 	per_cpu(threshold_banks, cpu)[bank] = NULL;
cpu               642 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 		if (!(per_cpu(bank_map, cpu) & (1 << bank)))
cpu               644 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 		threshold_remove_bank(cpu, bank);
cpu               652 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 	if (cpu >= NR_CPUS)
cpu               658 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 		threshold_create_device(cpu);
cpu               662 arch/x86/kernel/cpu/mcheck/mce_amd_64.c 		threshold_remove_device(cpu);
cpu                37 arch/x86/kernel/cpu/mcheck/mce_intel_64.c 	unsigned int cpu = smp_processor_id();
cpu                53 arch/x86/kernel/cpu/mcheck/mce_intel_64.c 		       "CPU%d: Thermal monitoring handled by SMI\n", cpu);
cpu                63 arch/x86/kernel/cpu/mcheck/mce_intel_64.c 		       "installed\n", cpu, (h & APIC_VECTOR_MASK));
cpu                80 arch/x86/kernel/cpu/mcheck/mce_intel_64.c 		cpu, tm2 ? "TM2" : "TM1");
cpu                72 arch/x86/kernel/cpu/mcheck/p4.c 	unsigned int cpu = smp_processor_id();
cpu                90 arch/x86/kernel/cpu/mcheck/p4.c 				cpu);
cpu                98 arch/x86/kernel/cpu/mcheck/p4.c 			cpu, (h & APIC_VECTOR_MASK));
cpu               118 arch/x86/kernel/cpu/mcheck/p4.c 	printk(KERN_INFO "CPU%d: Thermal monitoring enabled\n", cpu);
cpu                41 arch/x86/kernel/cpu/mcheck/therm_throt.c 	unsigned int cpu = dev->id;                                          \
cpu                45 arch/x86/kernel/cpu/mcheck/therm_throt.c 	if (cpu_online(cpu))                                                 \
cpu                47 arch/x86/kernel/cpu/mcheck/therm_throt.c 			      per_cpu(thermal_throttle_##name, cpu));        \
cpu                87 arch/x86/kernel/cpu/mcheck/therm_throt.c 	unsigned int cpu = smp_processor_id();
cpu               101 arch/x86/kernel/cpu/mcheck/therm_throt.c 		       "cpu clock throttled (total events = %lu)\n", cpu,
cpu               106 arch/x86/kernel/cpu/mcheck/therm_throt.c 		printk(KERN_CRIT "CPU%d: Temperature/speed normal\n", cpu);
cpu               132 arch/x86/kernel/cpu/mcheck/therm_throt.c 	unsigned int cpu = (unsigned long)hcpu;
cpu               136 arch/x86/kernel/cpu/mcheck/therm_throt.c 	sys_dev = get_cpu_sysdev(cpu);
cpu               164 arch/x86/kernel/cpu/mcheck/therm_throt.c 	unsigned int cpu = 0;
cpu               176 arch/x86/kernel/cpu/mcheck/therm_throt.c 	for_each_online_cpu(cpu) {
cpu               177 arch/x86/kernel/cpu/mcheck/therm_throt.c 		err = thermal_throttle_add_dev(get_cpu_sysdev(cpu));
cpu                18 arch/x86/kernel/cpu/proc.c 			   cpus_weight(per_cpu(cpu_core_map, cpu)));
cpu                60 arch/x86/kernel/cpu/proc.c 			   cpus_weight(per_cpu(cpu_core_map, cpu)));
cpu                83 arch/x86/kernel/cpu/proc.c 	unsigned int cpu = 0;
cpu                87 arch/x86/kernel/cpu/proc.c 	cpu = c->cpu_index;
cpu                94 arch/x86/kernel/cpu/proc.c 		   cpu,
cpu               106 arch/x86/kernel/cpu/proc.c 		unsigned int freq = cpufreq_quick_get(cpu);
cpu               118 arch/x86/kernel/cpu/proc.c 	show_cpuinfo_core(m, c, cpu);
cpu                89 arch/x86/kernel/cpuid.c 	int cpu = iminor(file->f_path.dentry->d_inode);
cpu               100 arch/x86/kernel/cpuid.c 		err = smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1);
cpu               117 arch/x86/kernel/cpuid.c 	unsigned int cpu;
cpu               123 arch/x86/kernel/cpuid.c 	cpu = iminor(file->f_path.dentry->d_inode);
cpu               124 arch/x86/kernel/cpuid.c 	if (cpu >= NR_CPUS || !cpu_online(cpu)) {
cpu               128 arch/x86/kernel/cpuid.c 	c = &cpu_data(cpu);
cpu               150 arch/x86/kernel/cpuid.c 	dev = device_create_drvdata(cpuid_class, NULL, MKDEV(CPUID_MAJOR, cpu),
cpu               151 arch/x86/kernel/cpuid.c 				    NULL, "cpu%d", cpu);
cpu               157 arch/x86/kernel/cpuid.c 	device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
cpu               164 arch/x86/kernel/cpuid.c 	unsigned int cpu = (unsigned long)hcpu;
cpu               169 arch/x86/kernel/cpuid.c 		err = cpuid_device_create(cpu);
cpu               174 arch/x86/kernel/cpuid.c 		cpuid_device_destroy(cpu);
cpu               225 arch/x86/kernel/cpuid.c 	int cpu = 0;
cpu               227 arch/x86/kernel/cpuid.c 	for_each_online_cpu(cpu)
cpu               228 arch/x86/kernel/cpuid.c 		cpuid_device_destroy(cpu);
cpu                45 arch/x86/kernel/crash.c 	int cpu;
cpu                51 arch/x86/kernel/crash.c 	cpu = raw_smp_processor_id();
cpu                57 arch/x86/kernel/crash.c 	if (cpu == crashing_cpu)
cpu                67 arch/x86/kernel/crash.c 	crash_save_cpu(regs, cpu);
cpu                54 arch/x86/kernel/dumpstack_64.c 		unsigned long end = per_cpu(orig_ist, cpu).ist[k];
cpu               164 arch/x86/kernel/dumpstack_64.c 	const unsigned cpu = get_cpu();
cpu               165 arch/x86/kernel/dumpstack_64.c 	unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
cpu               200 arch/x86/kernel/dumpstack_64.c 		estack_end = in_exception_stack(cpu, (unsigned long)stack,
cpu               306 arch/x86/kernel/dumpstack_64.c 	const int cpu = smp_processor_id();
cpu               308 arch/x86/kernel/dumpstack_64.c 		(unsigned long *) (cpu_pda(cpu)->irqstackptr);
cpu               310 arch/x86/kernel/dumpstack_64.c 		(unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
cpu               375 arch/x86/kernel/dumpstack_64.c 	const int cpu = smp_processor_id();
cpu               376 arch/x86/kernel/dumpstack_64.c 	struct task_struct *cur = cpu_pda(cpu)->pcurrent;
cpu               379 arch/x86/kernel/dumpstack_64.c 	printk("CPU %d ", cpu);
cpu               437 arch/x86/kernel/dumpstack_64.c 	int cpu;
cpu               444 arch/x86/kernel/dumpstack_64.c 	cpu = smp_processor_id();
cpu               446 arch/x86/kernel/dumpstack_64.c 		if (cpu == die_owner)
cpu               452 arch/x86/kernel/dumpstack_64.c 	die_owner = cpu;
cpu               336 arch/x86/kernel/es7000_32.c 	psaival = (0x1000000 | vect | cpu);
cpu               196 arch/x86/kernel/genapic_flat_64.c 	return cpumask_of_cpu(cpu);
cpu               219 arch/x86/kernel/genapic_flat_64.c 	int cpu;
cpu               225 arch/x86/kernel/genapic_flat_64.c 	cpu = first_cpu(cpumask);
cpu               226 arch/x86/kernel/genapic_flat_64.c 	if ((unsigned)cpu < nr_cpu_ids)
cpu               227 arch/x86/kernel/genapic_flat_64.c 		return per_cpu(x86_cpu_to_apicid, cpu);
cpu                36 arch/x86/kernel/genx2apic_cluster.c 	cpu_set(cpu, domain);
cpu                94 arch/x86/kernel/genx2apic_cluster.c 	int cpu;
cpu               100 arch/x86/kernel/genx2apic_cluster.c 	cpu = first_cpu(cpumask);
cpu               101 arch/x86/kernel/genx2apic_cluster.c 	if ((unsigned)cpu < NR_CPUS)
cpu               102 arch/x86/kernel/genx2apic_cluster.c 		return per_cpu(x86_cpu_to_logical_apicid, cpu);
cpu               135 arch/x86/kernel/genx2apic_cluster.c 	int cpu = smp_processor_id();
cpu               137 arch/x86/kernel/genx2apic_cluster.c 	per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR);
cpu                40 arch/x86/kernel/genx2apic_phys.c 	cpu_set(cpu, domain);
cpu                92 arch/x86/kernel/genx2apic_phys.c 	int cpu;
cpu                98 arch/x86/kernel/genx2apic_phys.c 	cpu = first_cpu(cpumask);
cpu                99 arch/x86/kernel/genx2apic_phys.c 	if ((unsigned)cpu < NR_CPUS)
cpu               100 arch/x86/kernel/genx2apic_phys.c 		return per_cpu(x86_cpu_to_apicid, cpu);
cpu                87 arch/x86/kernel/genx2apic_uv_x.c 	cpu_set(cpu, domain);
cpu               117 arch/x86/kernel/genx2apic_uv_x.c 	apicid = per_cpu(x86_cpu_to_apicid, cpu);
cpu               129 arch/x86/kernel/genx2apic_uv_x.c 	unsigned int cpu;
cpu               131 arch/x86/kernel/genx2apic_uv_x.c 	for_each_possible_cpu(cpu)
cpu               132 arch/x86/kernel/genx2apic_uv_x.c 		if (cpu_isset(cpu, mask))
cpu               133 arch/x86/kernel/genx2apic_uv_x.c 			uv_send_IPI_one(cpu, vector);
cpu               162 arch/x86/kernel/genx2apic_uv_x.c 	int cpu;
cpu               168 arch/x86/kernel/genx2apic_uv_x.c 	cpu = first_cpu(cpumask);
cpu               169 arch/x86/kernel/genx2apic_uv_x.c 	if ((unsigned)cpu < nr_cpu_ids)
cpu               170 arch/x86/kernel/genx2apic_uv_x.c 		return per_cpu(x86_cpu_to_apicid, cpu);
cpu               366 arch/x86/kernel/genx2apic_uv_x.c 	int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
cpu               417 arch/x86/kernel/genx2apic_uv_x.c 	for_each_present_cpu(cpu) {
cpu               418 arch/x86/kernel/genx2apic_uv_x.c 		nid = cpu_to_node(cpu);
cpu               419 arch/x86/kernel/genx2apic_uv_x.c 		pnode = uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid, cpu));
cpu               424 arch/x86/kernel/genx2apic_uv_x.c 		uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base;
cpu               425 arch/x86/kernel/genx2apic_uv_x.c 		uv_cpu_hub_info(cpu)->lowmem_remap_top =
cpu               427 arch/x86/kernel/genx2apic_uv_x.c 		uv_cpu_hub_info(cpu)->m_val = m_val;
cpu               428 arch/x86/kernel/genx2apic_uv_x.c 		uv_cpu_hub_info(cpu)->n_val = m_val;
cpu               429 arch/x86/kernel/genx2apic_uv_x.c 		uv_cpu_hub_info(cpu)->numa_blade_id = blade;
cpu               430 arch/x86/kernel/genx2apic_uv_x.c 		uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
cpu               431 arch/x86/kernel/genx2apic_uv_x.c 		uv_cpu_hub_info(cpu)->pnode = pnode;
cpu               432 arch/x86/kernel/genx2apic_uv_x.c 		uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) - 1;
cpu               433 arch/x86/kernel/genx2apic_uv_x.c 		uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1;
cpu               434 arch/x86/kernel/genx2apic_uv_x.c 		uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
cpu               435 arch/x86/kernel/genx2apic_uv_x.c 		uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
cpu               436 arch/x86/kernel/genx2apic_uv_x.c 		uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */
cpu               438 arch/x86/kernel/genx2apic_uv_x.c 		uv_cpu_to_blade[cpu] = blade;
cpu               443 arch/x86/kernel/genx2apic_uv_x.c 			cpu, per_cpu(x86_cpu_to_apicid, cpu), pnode, nid,
cpu               393 arch/x86/kernel/io_apic_32.c #define CPU_IRQ(cpu)		(irq_cpu_data[cpu].irq)
cpu               394 arch/x86/kernel/io_apic_32.c #define LAST_CPU_IRQ(cpu, irq)   (irq_cpu_data[cpu].last_irq[irq])
cpu               395 arch/x86/kernel/io_apic_32.c #define IRQ_DELTA(cpu, irq) 	(irq_cpu_data[cpu].irq_delta[irq])
cpu               398 arch/x86/kernel/io_apic_32.c 	(idle_cpu(cpu) && ((now) - per_cpu(irq_stat, (cpu)).idle_timestamp > 1))
cpu               400 arch/x86/kernel/io_apic_32.c #define IRQ_ALLOWED(cpu, allowed_mask)	cpu_isset(cpu, allowed_mask)
cpu               417 arch/x86/kernel/io_apic_32.c 	int cpu = curr_cpu;
cpu               422 arch/x86/kernel/io_apic_32.c 		if (unlikely(cpu == curr_cpu))
cpu               426 arch/x86/kernel/io_apic_32.c 			cpu++;
cpu               427 arch/x86/kernel/io_apic_32.c 			if (cpu >= NR_CPUS)
cpu               428 arch/x86/kernel/io_apic_32.c 				cpu = 0;
cpu               430 arch/x86/kernel/io_apic_32.c 			cpu--;
cpu               431 arch/x86/kernel/io_apic_32.c 			if (cpu == -1)
cpu               432 arch/x86/kernel/io_apic_32.c 				cpu = NR_CPUS-1;
cpu               434 arch/x86/kernel/io_apic_32.c 	} while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu, allowed_mask) ||
cpu               435 arch/x86/kernel/io_apic_32.c 			(search_idle && !IDLE_ENOUGH(cpu, now)));
cpu               437 arch/x86/kernel/io_apic_32.c 	return cpu;
cpu               450 arch/x86/kernel/io_apic_32.c 	new_cpu = move(cpu, allowed_mask, now, 1);
cpu               451 arch/x86/kernel/io_apic_32.c 	if (cpu != new_cpu)
cpu               801 arch/x86/kernel/io_apic_64.c 	int cpu;
cpu               821 arch/x86/kernel/io_apic_64.c 	for_each_cpu_mask_nr(cpu, mask) {
cpu               826 arch/x86/kernel/io_apic_64.c 		domain = vector_allocation_domain(cpu);
cpu               876 arch/x86/kernel/io_apic_64.c 	int cpu, vector;
cpu               884 arch/x86/kernel/io_apic_64.c 	for_each_cpu_mask_nr(cpu, mask)
cpu               885 arch/x86/kernel/io_apic_64.c 		per_cpu(vector_irq, cpu)[vector] = -1;
cpu               899 arch/x86/kernel/io_apic_64.c 		if (!cpu_isset(cpu, irq_cfg[irq].domain))
cpu               902 arch/x86/kernel/io_apic_64.c 		per_cpu(vector_irq, cpu)[vector] = irq;
cpu               906 arch/x86/kernel/io_apic_64.c 		irq = per_cpu(vector_irq, cpu)[vector];
cpu               909 arch/x86/kernel/io_apic_64.c 		if (!cpu_isset(cpu, irq_cfg[irq].domain))
cpu               910 arch/x86/kernel/io_apic_64.c 			per_cpu(vector_irq, cpu)[vector] = -1;
cpu               151 arch/x86/kernel/irq_32.c 	if (hardirq_ctx[cpu])
cpu               154 arch/x86/kernel/irq_32.c 	irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
cpu               157 arch/x86/kernel/irq_32.c 	irqctx->tinfo.cpu		= cpu;
cpu               161 arch/x86/kernel/irq_32.c 	hardirq_ctx[cpu] = irqctx;
cpu               163 arch/x86/kernel/irq_32.c 	irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
cpu               166 arch/x86/kernel/irq_32.c 	irqctx->tinfo.cpu		= cpu;
cpu               170 arch/x86/kernel/irq_32.c 	softirq_ctx[cpu] = irqctx;
cpu               173 arch/x86/kernel/irq_32.c 	       cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
cpu               178 arch/x86/kernel/irq_32.c 	hardirq_ctx[cpu] = NULL;
cpu               362 arch/x86/kernel/irq_32.c 	u64 sum = nmi_count(cpu);
cpu               365 arch/x86/kernel/irq_32.c 	sum += per_cpu(irq_stat, cpu).apic_timer_irqs;
cpu               368 arch/x86/kernel/irq_32.c 	sum += per_cpu(irq_stat, cpu).irq_resched_count;
cpu               369 arch/x86/kernel/irq_32.c 	sum += per_cpu(irq_stat, cpu).irq_call_count;
cpu               370 arch/x86/kernel/irq_32.c 	sum += per_cpu(irq_stat, cpu).irq_tlb_count;
cpu               373 arch/x86/kernel/irq_32.c 	sum += per_cpu(irq_stat, cpu).irq_thermal_count;
cpu               376 arch/x86/kernel/irq_32.c 	sum += per_cpu(irq_stat, cpu).irq_spurious_count;
cpu               162 arch/x86/kernel/irq_64.c 	u64 sum = cpu_pda(cpu)->__nmi_count;
cpu               164 arch/x86/kernel/irq_64.c 	sum += cpu_pda(cpu)->apic_timer_irqs;
cpu               166 arch/x86/kernel/irq_64.c 	sum += cpu_pda(cpu)->irq_resched_count;
cpu               167 arch/x86/kernel/irq_64.c 	sum += cpu_pda(cpu)->irq_call_count;
cpu               168 arch/x86/kernel/irq_64.c 	sum += cpu_pda(cpu)->irq_tlb_count;
cpu               171 arch/x86/kernel/irq_64.c 	sum += cpu_pda(cpu)->irq_thermal_count;
cpu               172 arch/x86/kernel/irq_64.c 	sum += cpu_pda(cpu)->irq_threshold_count;
cpu               174 arch/x86/kernel/irq_64.c 	sum += cpu_pda(cpu)->irq_spurious_count;
cpu                93 arch/x86/kernel/kvmclock.c 	int cpu = smp_processor_id();
cpu                95 arch/x86/kernel/kvmclock.c 	low = (int)__pa(&per_cpu(hv_clock, cpu)) | 1;
cpu                96 arch/x86/kernel/kvmclock.c 	high = ((u64)__pa(&per_cpu(hv_clock, cpu)) >> 32);
cpu                98 arch/x86/kernel/kvmclock.c 	       cpu, high, low, txt);
cpu                95 arch/x86/kernel/microcode_amd.c 	struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu               101 arch/x86/kernel/microcode_amd.c 		       cpu);
cpu               136 arch/x86/kernel/microcode_amd.c 		       "not found in equivalent cpu table \n", cpu);
cpu               144 arch/x86/kernel/microcode_amd.c 			cpu, mc_header->processor_rev_id[0],
cpu               152 arch/x86/kernel/microcode_amd.c 			cpu, mc_header->processor_rev_id[1],
cpu               165 arch/x86/kernel/microcode_amd.c 			printk(KERN_ERR "microcode: CPU%d NB mismatch \n", cpu);
cpu               179 arch/x86/kernel/microcode_amd.c 			printk(KERN_ERR "microcode: CPU%d SB mismatch \n", cpu);
cpu               203 arch/x86/kernel/microcode_amd.c 	BUG_ON(cpu_num != cpu);
cpu               327 arch/x86/kernel/microcode_amd.c 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
cpu               351 arch/x86/kernel/microcode_amd.c 		if (get_matching_microcode(cpu, mc, new_rev)) {
cpu               370 arch/x86/kernel/microcode_amd.c 				cpu, new_rev, uci->cpu_sig.rev);
cpu               393 arch/x86/kernel/microcode_amd.c 	BUG_ON(cpu != raw_smp_processor_id());
cpu               401 arch/x86/kernel/microcode_amd.c 	ret = generic_load_microcode(cpu, (void*)firmware->data, firmware->size,
cpu               418 arch/x86/kernel/microcode_amd.c 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
cpu               115 arch/x86/kernel/microcode_core.c 	int cpu;
cpu               119 arch/x86/kernel/microcode_core.c 	for_each_online_cpu(cpu) {
cpu               120 arch/x86/kernel/microcode_core.c 		struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
cpu               125 arch/x86/kernel/microcode_core.c 		set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
cpu               126 arch/x86/kernel/microcode_core.c 		error = microcode_ops->request_microcode_user(cpu, buf, size);
cpu               130 arch/x86/kernel/microcode_core.c 			microcode_ops->apply_microcode(cpu);
cpu               216 arch/x86/kernel/microcode_core.c 	int cpu = dev->id;
cpu               224 arch/x86/kernel/microcode_core.c 		if (cpu_online(cpu)) {
cpu               225 arch/x86/kernel/microcode_core.c 			set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
cpu               228 arch/x86/kernel/microcode_core.c 				err = microcode_ops->request_microcode_fw(cpu,
cpu               231 arch/x86/kernel/microcode_core.c 					microcode_ops->apply_microcode(cpu);
cpu               277 arch/x86/kernel/microcode_core.c 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
cpu               280 arch/x86/kernel/microcode_core.c 	microcode_ops->microcode_fini_cpu(cpu);
cpu               287 arch/x86/kernel/microcode_core.c 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
cpu               290 arch/x86/kernel/microcode_core.c 	if (!microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig))
cpu               296 arch/x86/kernel/microcode_core.c 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
cpu               299 arch/x86/kernel/microcode_core.c 	pr_debug("microcode: CPU%d resumed\n", cpu);
cpu               308 arch/x86/kernel/microcode_core.c 	if (microcode_ops->collect_cpu_info(cpu, &nsig)) {
cpu               309 arch/x86/kernel/microcode_core.c 		microcode_fini_cpu(cpu);
cpu               314 arch/x86/kernel/microcode_core.c 		microcode_fini_cpu(cpu);
cpu               324 arch/x86/kernel/microcode_core.c 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
cpu               332 arch/x86/kernel/microcode_core.c 		err = microcode_resume_cpu(cpu);
cpu               334 arch/x86/kernel/microcode_core.c 		collect_cpu_info(cpu);
cpu               336 arch/x86/kernel/microcode_core.c 			err = microcode_ops->request_microcode_fw(cpu,
cpu               340 arch/x86/kernel/microcode_core.c 		microcode_ops->apply_microcode(cpu);
cpu               347 arch/x86/kernel/microcode_core.c 	set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
cpu               349 arch/x86/kernel/microcode_core.c 	BUG_ON(raw_smp_processor_id() != cpu);
cpu               352 arch/x86/kernel/microcode_core.c 	microcode_update_cpu(cpu);
cpu               360 arch/x86/kernel/microcode_core.c 	int err, cpu = sys_dev->id;
cpu               361 arch/x86/kernel/microcode_core.c 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
cpu               363 arch/x86/kernel/microcode_core.c 	if (!cpu_online(cpu))
cpu               366 arch/x86/kernel/microcode_core.c 	pr_debug("microcode: CPU%d added\n", cpu);
cpu               373 arch/x86/kernel/microcode_core.c 	microcode_init_cpu(cpu);
cpu               379 arch/x86/kernel/microcode_core.c 	int cpu = sys_dev->id;
cpu               381 arch/x86/kernel/microcode_core.c 	if (!cpu_online(cpu))
cpu               384 arch/x86/kernel/microcode_core.c 	pr_debug("microcode: CPU%d removed\n", cpu);
cpu               385 arch/x86/kernel/microcode_core.c 	microcode_fini_cpu(cpu);
cpu               392 arch/x86/kernel/microcode_core.c 	int cpu = dev->id;
cpu               394 arch/x86/kernel/microcode_core.c 	if (!cpu_online(cpu))
cpu               411 arch/x86/kernel/microcode_core.c 	unsigned int cpu = (unsigned long)hcpu;
cpu               414 arch/x86/kernel/microcode_core.c 	sys_dev = get_cpu_sysdev(cpu);
cpu               418 arch/x86/kernel/microcode_core.c 		microcode_init_cpu(cpu);
cpu               421 arch/x86/kernel/microcode_core.c 		pr_debug("microcode: CPU%d added\n", cpu);
cpu               424 arch/x86/kernel/microcode_core.c 				"group for CPU%d\n", cpu);
cpu               430 arch/x86/kernel/microcode_core.c 		pr_debug("microcode: CPU%d removed\n", cpu);
cpu               435 arch/x86/kernel/microcode_core.c 		microcode_fini_cpu(cpu);
cpu               318 arch/x86/kernel/microcode_intel.c 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
cpu               322 arch/x86/kernel/microcode_intel.c 	BUG_ON(cpu_num != cpu);
cpu               360 arch/x86/kernel/microcode_intel.c 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
cpu               408 arch/x86/kernel/microcode_intel.c 				cpu, new_rev, uci->cpu_sig.rev);
cpu               425 arch/x86/kernel/microcode_intel.c 	struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu               430 arch/x86/kernel/microcode_intel.c 	BUG_ON(cpu != raw_smp_processor_id());
cpu               439 arch/x86/kernel/microcode_intel.c 	ret = generic_load_microcode(cpu, (void*)firmware->data, firmware->size,
cpu               455 arch/x86/kernel/microcode_intel.c 	BUG_ON(cpu != raw_smp_processor_id());
cpu               457 arch/x86/kernel/microcode_intel.c 	return generic_load_microcode(cpu, (void*)buf, size, &get_ucode_user);
cpu               462 arch/x86/kernel/microcode_intel.c 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
cpu                74 arch/x86/kernel/msr.c 	int cpu = iminor(file->f_path.dentry->d_inode);
cpu                82 arch/x86/kernel/msr.c 		err = rdmsr_safe_on_cpu(cpu, reg, &data[0], &data[1]);
cpu               105 arch/x86/kernel/msr.c 	int cpu = iminor(file->f_path.dentry->d_inode);
cpu               117 arch/x86/kernel/msr.c 		err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]);
cpu               132 arch/x86/kernel/msr.c 	unsigned int cpu = iminor(file->f_path.dentry->d_inode);
cpu               133 arch/x86/kernel/msr.c 	struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu               137 arch/x86/kernel/msr.c 	cpu = iminor(file->f_path.dentry->d_inode);
cpu               139 arch/x86/kernel/msr.c 	if (cpu >= NR_CPUS || !cpu_online(cpu)) {
cpu               143 arch/x86/kernel/msr.c 	c = &cpu_data(cpu);
cpu               166 arch/x86/kernel/msr.c 	dev = device_create_drvdata(msr_class, NULL, MKDEV(MSR_MAJOR, cpu),
cpu               167 arch/x86/kernel/msr.c 				    NULL, "msr%d", cpu);
cpu               173 arch/x86/kernel/msr.c 	device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu));
cpu               179 arch/x86/kernel/msr.c 	unsigned int cpu = (unsigned long)hcpu;
cpu               184 arch/x86/kernel/msr.c 		err = msr_device_create(cpu);
cpu               189 arch/x86/kernel/msr.c 		msr_device_destroy(cpu);
cpu               238 arch/x86/kernel/msr.c 	int cpu = 0;
cpu               239 arch/x86/kernel/msr.c 	for_each_online_cpu(cpu)
cpu               240 arch/x86/kernel/msr.c 		msr_device_destroy(cpu);
cpu                67 arch/x86/kernel/nmi.c 	return cpu_pda(cpu)->__nmi_count;
cpu                69 arch/x86/kernel/nmi.c 	return nmi_count(cpu);
cpu                90 arch/x86/kernel/nmi.c 	return per_cpu(irq_stat, cpu).apic_timer_irqs +
cpu                91 arch/x86/kernel/nmi.c 		per_cpu(irq_stat, cpu).irq0_irqs;
cpu               123 arch/x86/kernel/nmi.c 			cpu, prev_nmi_count[cpu], get_nmi_count(cpu));
cpu               130 arch/x86/kernel/nmi.c 	per_cpu(wd_enabled, cpu) = 0;
cpu               137 arch/x86/kernel/nmi.c 	int cpu;
cpu               153 arch/x86/kernel/nmi.c 	for_each_possible_cpu(cpu)
cpu               154 arch/x86/kernel/nmi.c 		prev_nmi_count[cpu] = get_nmi_count(cpu);
cpu               158 arch/x86/kernel/nmi.c 	for_each_online_cpu(cpu) {
cpu               159 arch/x86/kernel/nmi.c 		if (!per_cpu(wd_enabled, cpu))
cpu               161 arch/x86/kernel/nmi.c 		if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5)
cpu               162 arch/x86/kernel/nmi.c 			report_broken_nmi(cpu, prev_nmi_count);
cpu               368 arch/x86/kernel/nmi.c 		unsigned cpu;
cpu               375 arch/x86/kernel/nmi.c 		for_each_present_cpu(cpu) {
cpu               376 arch/x86/kernel/nmi.c 			if (per_cpu(nmi_touch, cpu) != 1)
cpu               377 arch/x86/kernel/nmi.c 				per_cpu(nmi_touch, cpu) = 1;
cpu               398 arch/x86/kernel/nmi.c 	int cpu = smp_processor_id();
cpu               408 arch/x86/kernel/nmi.c 	sum = get_timer_irqs(cpu);
cpu               415 arch/x86/kernel/nmi.c 	if (cpu_isset(cpu, backtrace_mask)) {
cpu               419 arch/x86/kernel/nmi.c 		printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
cpu               422 arch/x86/kernel/nmi.c 		cpu_clear(cpu, backtrace_mask);
cpu               524 arch/x86/kernel/nmi.c 		return unknown_nmi_panic_callback(regs, cpu);
cpu               254 arch/x86/kernel/process.c 	cpu_clear(cpu, c1e_mask);
cpu               281 arch/x86/kernel/process.c 		int cpu = smp_processor_id();
cpu               283 arch/x86/kernel/process.c 		if (!cpu_isset(cpu, c1e_mask)) {
cpu               284 arch/x86/kernel/process.c 			cpu_set(cpu, c1e_mask);
cpu               292 arch/x86/kernel/process.c 					   &cpu);
cpu               294 arch/x86/kernel/process.c 			       cpu);
cpu               297 arch/x86/kernel/process.c 		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
cpu               306 arch/x86/kernel/process.c 		 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
cpu                94 arch/x86/kernel/process_32.c 	int cpu = smp_processor_id();
cpu               106 arch/x86/kernel/process_32.c 			if (rcu_pending(cpu))
cpu               107 arch/x86/kernel/process_32.c 				rcu_check_callbacks(cpu, 0);
cpu               109 arch/x86/kernel/process_32.c 			if (cpu_is_offline(cpu))
cpu               237 arch/x86/kernel/process_32.c 		int cpu = get_cpu();
cpu               238 arch/x86/kernel/process_32.c 		struct tss_struct *tss = &per_cpu(init_tss, cpu);
cpu               555 arch/x86/kernel/process_32.c 	int cpu = smp_processor_id();
cpu               556 arch/x86/kernel/process_32.c 	struct tss_struct *tss = &per_cpu(init_tss, cpu);
cpu               587 arch/x86/kernel/process_32.c 	load_TLS(next, cpu);
cpu               553 arch/x86/kernel/process_64.c 	int cpu = smp_processor_id();
cpu               554 arch/x86/kernel/process_64.c 	struct tss_struct *tss = &per_cpu(init_tss, cpu);
cpu               587 arch/x86/kernel/process_64.c 	load_TLS(next, cpu);
cpu               761 arch/x86/kernel/process_64.c 	int cpu;
cpu               767 arch/x86/kernel/process_64.c 		cpu = get_cpu();
cpu               773 arch/x86/kernel/process_64.c 				load_TLS(&task->thread, cpu);
cpu               793 arch/x86/kernel/process_64.c 		cpu = get_cpu();
cpu               799 arch/x86/kernel/process_64.c 				load_TLS(&task->thread, cpu);
cpu                62 arch/x86/kernel/setup_percpu.c 	int cpu;
cpu                64 arch/x86/kernel/setup_percpu.c 	for_each_possible_cpu(cpu) {
cpu                65 arch/x86/kernel/setup_percpu.c 		per_cpu(x86_cpu_to_apicid, cpu) =
cpu                66 arch/x86/kernel/setup_percpu.c 				early_per_cpu_map(x86_cpu_to_apicid, cpu);
cpu                67 arch/x86/kernel/setup_percpu.c 		per_cpu(x86_bios_cpu_apicid, cpu) =
cpu                68 arch/x86/kernel/setup_percpu.c 				early_per_cpu_map(x86_bios_cpu_apicid, cpu);
cpu                70 arch/x86/kernel/setup_percpu.c 		per_cpu(x86_cpu_to_node_map, cpu) =
cpu                71 arch/x86/kernel/setup_percpu.c 				early_per_cpu_map(x86_cpu_to_node_map, cpu);
cpu               105 arch/x86/kernel/setup_percpu.c 	int cpu;
cpu               120 arch/x86/kernel/setup_percpu.c 	for_each_possible_cpu(cpu) {
cpu               121 arch/x86/kernel/setup_percpu.c 		if (cpu == 0) {
cpu               126 arch/x86/kernel/setup_percpu.c 		new_cpu_pda[cpu] = (struct x8664_pda *)pda;
cpu               127 arch/x86/kernel/setup_percpu.c 		new_cpu_pda[cpu]->in_bootmem = 1;
cpu               145 arch/x86/kernel/setup_percpu.c 	int cpu;
cpu               155 arch/x86/kernel/setup_percpu.c 	for_each_possible_cpu(cpu) {
cpu               159 arch/x86/kernel/setup_percpu.c 		int node = early_cpu_to_node(cpu);
cpu               164 arch/x86/kernel/setup_percpu.c 				cpu, node);
cpu               167 arch/x86/kernel/setup_percpu.c 					 cpu, __pa(ptr));
cpu               173 arch/x86/kernel/setup_percpu.c 					 cpu, node, __pa(ptr));
cpu               176 arch/x86/kernel/setup_percpu.c 		per_cpu_offset(cpu) = ptr - __per_cpu_start;
cpu               227 arch/x86/kernel/setup_percpu.c 	if (cpu_pda(cpu) && node != NUMA_NO_NODE)
cpu               228 arch/x86/kernel/setup_percpu.c 		cpu_pda(cpu)->nodenumber = node;
cpu               231 arch/x86/kernel/setup_percpu.c 		cpu_to_node_map[cpu] = node;
cpu               233 arch/x86/kernel/setup_percpu.c 	else if (per_cpu_offset(cpu))
cpu               234 arch/x86/kernel/setup_percpu.c 		per_cpu(x86_cpu_to_node_map, cpu) = node;
cpu               237 arch/x86/kernel/setup_percpu.c 		pr_debug("Setting node for non-present cpu %d\n", cpu);
cpu               242 arch/x86/kernel/setup_percpu.c 	numa_set_node(cpu, NUMA_NO_NODE);
cpu               249 arch/x86/kernel/setup_percpu.c 	cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
cpu               254 arch/x86/kernel/setup_percpu.c 	cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]);
cpu               264 arch/x86/kernel/setup_percpu.c 	int node = cpu_to_node(cpu);
cpu               276 arch/x86/kernel/setup_percpu.c 		cpu_set(cpu, *mask);
cpu               278 arch/x86/kernel/setup_percpu.c 		cpu_clear(cpu, *mask);
cpu               282 arch/x86/kernel/setup_percpu.c 		enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf);
cpu               287 arch/x86/kernel/setup_percpu.c 	numa_set_cpumask(cpu, 1);
cpu               292 arch/x86/kernel/setup_percpu.c 	numa_set_cpumask(cpu, 0);
cpu               299 arch/x86/kernel/setup_percpu.c 			"cpu_to_node(%d): usage too early!\n", cpu);
cpu               301 arch/x86/kernel/setup_percpu.c 		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
cpu               303 arch/x86/kernel/setup_percpu.c 	return per_cpu(x86_cpu_to_node_map, cpu);
cpu               314 arch/x86/kernel/setup_percpu.c 		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
cpu               316 arch/x86/kernel/setup_percpu.c 	if (!per_cpu_offset(cpu)) {
cpu               318 arch/x86/kernel/setup_percpu.c 			"early_cpu_to_node(%d): no per_cpu area!\n", cpu);
cpu               322 arch/x86/kernel/setup_percpu.c 	return per_cpu(x86_cpu_to_node_map, cpu);
cpu               117 arch/x86/kernel/smp.c 	if (unlikely(cpu_is_offline(cpu))) {
cpu               121 arch/x86/kernel/smp.c 	send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
cpu               126 arch/x86/kernel/smp.c 	send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR);
cpu               147 arch/x86/kernel/smpboot.c 	printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
cpu               148 arch/x86/kernel/smpboot.c 	cpu_set(cpu, node_to_cpumask_map[node]);
cpu               149 arch/x86/kernel/smpboot.c 	cpu_to_node_map[cpu] = node;
cpu               157 arch/x86/kernel/smpboot.c 	printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
cpu               159 arch/x86/kernel/smpboot.c 		cpu_clear(cpu, node_to_cpumask_map[node]);
cpu               160 arch/x86/kernel/smpboot.c 	cpu_to_node_map[cpu] = 0;
cpu               175 arch/x86/kernel/smpboot.c 	int cpu = smp_processor_id();
cpu               182 arch/x86/kernel/smpboot.c 	cpu_2_logical_apicid[cpu] = apicid;
cpu               183 arch/x86/kernel/smpboot.c 	map_cpu_to_node(cpu, node);
cpu               188 arch/x86/kernel/smpboot.c 	cpu_2_logical_apicid[cpu] = BAD_APICID;
cpu               189 arch/x86/kernel/smpboot.c 	unmap_cpu_to_node(cpu);
cpu               446 arch/x86/kernel/smpboot.c 	struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu               448 arch/x86/kernel/smpboot.c 	cpu_set(cpu, cpu_sibling_setup_map);
cpu               454 arch/x86/kernel/smpboot.c 				cpu_set(i, per_cpu(cpu_sibling_map, cpu));
cpu               455 arch/x86/kernel/smpboot.c 				cpu_set(cpu, per_cpu(cpu_sibling_map, i));
cpu               456 arch/x86/kernel/smpboot.c 				cpu_set(i, per_cpu(cpu_core_map, cpu));
cpu               457 arch/x86/kernel/smpboot.c 				cpu_set(cpu, per_cpu(cpu_core_map, i));
cpu               459 arch/x86/kernel/smpboot.c 				cpu_set(cpu, cpu_data(i).llc_shared_map);
cpu               463 arch/x86/kernel/smpboot.c 		cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
cpu               466 arch/x86/kernel/smpboot.c 	cpu_set(cpu, c->llc_shared_map);
cpu               469 arch/x86/kernel/smpboot.c 		per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
cpu               475 arch/x86/kernel/smpboot.c 		if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
cpu               476 arch/x86/kernel/smpboot.c 		    per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
cpu               478 arch/x86/kernel/smpboot.c 			cpu_set(cpu, cpu_data(i).llc_shared_map);
cpu               481 arch/x86/kernel/smpboot.c 			cpu_set(i, per_cpu(cpu_core_map, cpu));
cpu               482 arch/x86/kernel/smpboot.c 			cpu_set(cpu, per_cpu(cpu_core_map, i));
cpu               486 arch/x86/kernel/smpboot.c 			if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
cpu               497 arch/x86/kernel/smpboot.c 				if (i != cpu)
cpu               499 arch/x86/kernel/smpboot.c 			} else if (i != cpu && !c->booted_cores)
cpu               508 arch/x86/kernel/smpboot.c 	struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu               514 arch/x86/kernel/smpboot.c 		return per_cpu(cpu_core_map, cpu);
cpu               521 arch/x86/kernel/smpboot.c 	int cpu;
cpu               527 arch/x86/kernel/smpboot.c 	for_each_possible_cpu(cpu)
cpu               528 arch/x86/kernel/smpboot.c 		if (cpu_isset(cpu, cpu_callout_map))
cpu               529 arch/x86/kernel/smpboot.c 			bogosum += cpu_data(cpu).loops_per_jiffy;
cpu               746 arch/x86/kernel/smpboot.c 	int cpu;
cpu               754 arch/x86/kernel/smpboot.c 	c_idle->idle = fork_idle(c_idle->cpu);
cpu               776 arch/x86/kernel/smpboot.c 	int node = cpu_to_node(cpu);
cpu               778 arch/x86/kernel/smpboot.c 	if (cpu_pda(cpu) && !cpu_pda(cpu)->in_bootmem)
cpu               781 arch/x86/kernel/smpboot.c 	oldpda = cpu_pda(cpu);
cpu               785 arch/x86/kernel/smpboot.c 			"for CPU %d on node %d\n", cpu, node);
cpu               799 arch/x86/kernel/smpboot.c 	cpu_pda(cpu) = newpda;
cpu               816 arch/x86/kernel/smpboot.c 		.cpu = cpu,
cpu               823 arch/x86/kernel/smpboot.c 	if (cpu > 0) {
cpu               824 arch/x86/kernel/smpboot.c 		boot_error = get_local_pda(cpu);
cpu               833 arch/x86/kernel/smpboot.c 	c_idle.idle = get_idle_for_cpu(cpu);
cpu               842 arch/x86/kernel/smpboot.c 		init_idle(c_idle.idle, cpu);
cpu               854 arch/x86/kernel/smpboot.c 		printk("failed fork for CPU %d\n", cpu);
cpu               858 arch/x86/kernel/smpboot.c 	set_idle_for_cpu(cpu, c_idle.idle);
cpu               861 arch/x86/kernel/smpboot.c 	per_cpu(current_task, cpu) = c_idle.idle;
cpu               862 arch/x86/kernel/smpboot.c 	init_gdt(cpu);
cpu               864 arch/x86/kernel/smpboot.c 	irq_ctx_init(cpu);
cpu               866 arch/x86/kernel/smpboot.c 	cpu_pda(cpu)->pcurrent = c_idle.idle;
cpu               869 arch/x86/kernel/smpboot.c 	early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
cpu               878 arch/x86/kernel/smpboot.c 			  cpu, apicid, start_ip);
cpu               910 arch/x86/kernel/smpboot.c 		pr_debug("Before Callout %d.\n", cpu);
cpu               911 arch/x86/kernel/smpboot.c 		cpu_set(cpu, cpu_callout_map);
cpu               912 arch/x86/kernel/smpboot.c 		pr_debug("After Callout %d.\n", cpu);
cpu               918 arch/x86/kernel/smpboot.c 			if (cpu_isset(cpu, cpu_callin_map))
cpu               923 arch/x86/kernel/smpboot.c 		if (cpu_isset(cpu, cpu_callin_map)) {
cpu               926 arch/x86/kernel/smpboot.c 			printk(KERN_INFO "CPU%d: ", cpu);
cpu               927 arch/x86/kernel/smpboot.c 			print_cpu_info(&cpu_data(cpu));
cpu               947 arch/x86/kernel/smpboot.c 		numa_remove_cpu(cpu); /* was set by numa_add_cpu */
cpu               948 arch/x86/kernel/smpboot.c 		cpu_clear(cpu, cpu_callout_map); /* was set by do_boot_cpu() */
cpu               949 arch/x86/kernel/smpboot.c 		cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
cpu               950 arch/x86/kernel/smpboot.c 		cpu_clear(cpu, cpu_present_map);
cpu               951 arch/x86/kernel/smpboot.c 		per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
cpu               967 arch/x86/kernel/smpboot.c 	int apicid = cpu_present_to_apicid(cpu);
cpu               973 arch/x86/kernel/smpboot.c 	pr_debug("++++++++++++++++++++=_---CPU UP  %u\n", cpu);
cpu               977 arch/x86/kernel/smpboot.c 		printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu);
cpu               984 arch/x86/kernel/smpboot.c 	if (cpu_isset(cpu, cpu_callin_map)) {
cpu               985 arch/x86/kernel/smpboot.c 		pr_debug("do_boot_cpu %d Already started\n", cpu);
cpu               995 arch/x86/kernel/smpboot.c 	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
cpu              1004 arch/x86/kernel/smpboot.c 	err = do_boot_cpu(apicid, cpu);
cpu              1009 arch/x86/kernel/smpboot.c 	err = do_boot_cpu(apicid, cpu);
cpu              1021 arch/x86/kernel/smpboot.c 	check_tsc_sync_source(cpu);
cpu              1024 arch/x86/kernel/smpboot.c 	while (!cpu_online(cpu)) {
cpu              1061 arch/x86/kernel/smpboot.c 		unsigned int cpu;
cpu              1069 arch/x86/kernel/smpboot.c 		for_each_present_cpu(cpu) {
cpu              1071 arch/x86/kernel/smpboot.c 				cpu_clear(cpu, cpu_present_map);
cpu              1076 arch/x86/kernel/smpboot.c 		for_each_possible_cpu(cpu) {
cpu              1078 arch/x86/kernel/smpboot.c 				cpu_clear(cpu, cpu_possible_map);
cpu              1181 arch/x86/kernel/smpboot.c 	current_thread_info()->cpu = 0;  /* needed? */
cpu              1307 arch/x86/kernel/smpboot.c 	struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu              1309 arch/x86/kernel/smpboot.c 	for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) {
cpu              1310 arch/x86/kernel/smpboot.c 		cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
cpu              1314 arch/x86/kernel/smpboot.c 		if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
cpu              1318 arch/x86/kernel/smpboot.c 	for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu))
cpu              1319 arch/x86/kernel/smpboot.c 		cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
cpu              1320 arch/x86/kernel/smpboot.c 	cpus_clear(per_cpu(cpu_sibling_map, cpu));
cpu              1321 arch/x86/kernel/smpboot.c 	cpus_clear(per_cpu(cpu_core_map, cpu));
cpu              1324 arch/x86/kernel/smpboot.c 	cpu_clear(cpu, cpu_sibling_setup_map);
cpu              1329 arch/x86/kernel/smpboot.c 	cpu_clear(cpu, cpu_online_map);
cpu              1330 arch/x86/kernel/smpboot.c 	cpu_clear(cpu, cpu_callout_map);
cpu              1331 arch/x86/kernel/smpboot.c 	cpu_clear(cpu, cpu_callin_map);
cpu              1333 arch/x86/kernel/smpboot.c 	cpu_clear(cpu, cpu_initialized);
cpu              1334 arch/x86/kernel/smpboot.c 	numa_remove_cpu(cpu);
cpu              1339 arch/x86/kernel/smpboot.c 	int cpu = smp_processor_id();
cpu              1350 arch/x86/kernel/smpboot.c 	remove_siblinginfo(cpu);
cpu              1354 arch/x86/kernel/smpboot.c 	remove_cpu_from_maps(cpu);
cpu              1361 arch/x86/kernel/smpboot.c 	int cpu = smp_processor_id();
cpu              1371 arch/x86/kernel/smpboot.c 	if (cpu == 0)
cpu              1389 arch/x86/kernel/smpboot.c 		if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
cpu              1390 arch/x86/kernel/smpboot.c 			printk(KERN_INFO "CPU %d is now offline\n", cpu);
cpu              1397 arch/x86/kernel/smpboot.c 	printk(KERN_ERR "CPU %u didn't die...\n", cpu);
cpu                20 arch/x86/kernel/smpcommon.c 	pack_descriptor(&gdt, __per_cpu_offset[cpu], 0xFFFFF,
cpu                24 arch/x86/kernel/smpcommon.c 	write_gdt_entry(get_cpu_gdt_table(cpu),
cpu                27 arch/x86/kernel/smpcommon.c 	per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
cpu                28 arch/x86/kernel/smpcommon.c 	per_cpu(cpu_number, cpu) = cpu;
cpu                37 arch/x86/kernel/tlb_32.c 	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
cpu                39 arch/x86/kernel/tlb_32.c 	cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
cpu                92 arch/x86/kernel/tlb_32.c 	unsigned long cpu;
cpu                94 arch/x86/kernel/tlb_32.c 	cpu = get_cpu();
cpu                96 arch/x86/kernel/tlb_32.c 	if (!cpu_isset(cpu, flush_cpumask))
cpu               107 arch/x86/kernel/tlb_32.c 	if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
cpu               108 arch/x86/kernel/tlb_32.c 		if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
cpu               114 arch/x86/kernel/tlb_32.c 			leave_mm(cpu);
cpu               118 arch/x86/kernel/tlb_32.c 	cpu_clear(cpu, flush_cpumask);
cpu               232 arch/x86/kernel/tlb_32.c 	unsigned long cpu = smp_processor_id();
cpu               235 arch/x86/kernel/tlb_32.c 	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
cpu               236 arch/x86/kernel/tlb_32.c 		leave_mm(cpu);
cpu               246 arch/x86/kernel/tlb_32.c 	int cpu = raw_smp_processor_id();
cpu               248 arch/x86/kernel/tlb_32.c 	per_cpu(cpu_tlbstate, cpu).state = 0;
cpu               249 arch/x86/kernel/tlb_32.c 	per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
cpu                67 arch/x86/kernel/tlb_64.c 	cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
cpu               122 arch/x86/kernel/tlb_64.c 	int cpu;
cpu               126 arch/x86/kernel/tlb_64.c 	cpu = smp_processor_id();
cpu               134 arch/x86/kernel/tlb_64.c 	if (!cpu_isset(cpu, f->flush_cpumask))
cpu               152 arch/x86/kernel/tlb_64.c 			leave_mm(cpu);
cpu               156 arch/x86/kernel/tlb_64.c 	cpu_clear(cpu, f->flush_cpumask);
cpu               269 arch/x86/kernel/tlb_64.c 	unsigned long cpu = smp_processor_id();
cpu               273 arch/x86/kernel/tlb_64.c 		leave_mm(cpu);
cpu                66 arch/x86/kernel/tlb_uv.c 	int cpu;
cpu                69 arch/x86/kernel/tlb_uv.c 	cpu = uv_blade_processor_id();
cpu                72 arch/x86/kernel/tlb_uv.c 	this_cpu_mask = 1UL << cpu;
cpu               231 arch/x86/kernel/tlb_uv.c 	if (cpu < UV_CPUS_PER_ACT_STATUS) {
cpu               233 arch/x86/kernel/tlb_uv.c 		right_shift = cpu * UV_ACT_STATUS_SIZE;
cpu               237 arch/x86/kernel/tlb_uv.c 		    ((cpu - UV_CPUS_PER_ACT_STATUS) * UV_ACT_STATUS_SIZE);
cpu               243 arch/x86/kernel/tlb_uv.c 			cpu;
cpu               305 arch/x86/kernel/tlb_uv.c 	int cpu;
cpu               310 arch/x86/kernel/tlb_uv.c 	cpu = uv_blade_processor_id();
cpu               313 arch/x86/kernel/tlb_uv.c 	bau_desc += UV_ITEMS_PER_DESCRIPTOR * cpu;
cpu               343 arch/x86/kernel/tlb_uv.c 	return uv_flush_send_and_wait(cpu, this_blade, bau_desc, cpumaskp);
cpu               458 arch/x86/kernel/tlb_uv.c 	int cpu;
cpu               460 arch/x86/kernel/tlb_uv.c 	cpu = *(loff_t *)data;
cpu               462 arch/x86/kernel/tlb_uv.c 	if (!cpu) {
cpu               468 arch/x86/kernel/tlb_uv.c 	if (cpu < num_possible_cpus() && cpu_online(cpu)) {
cpu               469 arch/x86/kernel/tlb_uv.c 		stat = &per_cpu(ptcstats, cpu);
cpu               471 arch/x86/kernel/tlb_uv.c 			   cpu, stat->requestor,
cpu               476 arch/x86/kernel/tlb_uv.c 					(uv_cpu_to_blade_id(cpu)),
cpu                36 arch/x86/kernel/tls.c 	int cpu;
cpu                41 arch/x86/kernel/tls.c 	cpu = get_cpu();
cpu                53 arch/x86/kernel/tls.c 		load_TLS(t, cpu);
cpu                49 arch/x86/kernel/topology.c 		per_cpu(cpu_devices, num).cpu.hotpluggable = 1;
cpu                50 arch/x86/kernel/topology.c 	return register_cpu(&per_cpu(cpu_devices, num).cpu, num);
cpu                56 arch/x86/kernel/topology.c 	unregister_cpu(&per_cpu(cpu_devices, num).cpu);
cpu                62 arch/x86/kernel/topology.c 	return register_cpu(&per_cpu(cpu_devices, num).cpu, num);
cpu               132 arch/x86/kernel/traps.c 	int cpu;
cpu               134 arch/x86/kernel/traps.c 	cpu = get_cpu();
cpu               135 arch/x86/kernel/traps.c 	tss = &per_cpu(init_tss, cpu);
cpu               435 arch/x86/kernel/traps.c 	int cpu;
cpu               437 arch/x86/kernel/traps.c 	cpu = smp_processor_id();
cpu               440 arch/x86/kernel/traps.c 	if (!cpu)
cpu               454 arch/x86/kernel/traps.c 		if (!do_nmi_callback(regs, cpu))
cpu               485 arch/x86/kernel/traps.c 	{ int cpu; cpu = smp_processor_id(); ++nmi_count(cpu); }
cpu               566 arch/x86/kernel/tsc.c 	scale = &per_cpu(cyc2ns, cpu);
cpu               601 arch/x86/kernel/tsc.c 	if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC))
cpu               607 arch/x86/kernel/tsc.c 		lpj = &cpu_data(freq->cpu).loops_per_jiffy;
cpu               627 arch/x86/kernel/tsc.c 	set_cyc2ns_scale(tsc_khz, freq->cpu);
cpu               797 arch/x86/kernel/tsc.c 	int cpu;
cpu               830 arch/x86/kernel/tsc.c 	for_each_possible_cpu(cpu)
cpu               831 arch/x86/kernel/tsc.c 		set_cyc2ns_scale(cpu_khz, cpu);
cpu               112 arch/x86/kernel/tsc_sync.c 			  smp_processor_id(), cpu);
cpu               198 arch/x86/kernel/vmi_32.c 	struct desc_struct *gdt = get_cpu_gdt_table(cpu);
cpu               206 arch/x86/kernel/vmi_32.c 	unsigned cpu = smp_processor_id();
cpu               212 arch/x86/kernel/vmi_32.c 	write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, &desc, DESC_LDT);
cpu               214 arch/x86/kernel/vmiclock_32.c 	int cpu = smp_processor_id();
cpu               229 arch/x86/kernel/vmiclock_32.c 	evt->cpumask = cpumask_of_cpu(cpu);
cpu               213 arch/x86/kernel/vsyscall_64.c 	if (cpu)
cpu               214 arch/x86/kernel/vsyscall_64.c 		*cpu = p & 0xfff;
cpu               256 arch/x86/kernel/vsyscall_64.c 	node = cpu_to_node(cpu);
cpu               258 arch/x86/kernel/vsyscall_64.c 	if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
cpu               259 arch/x86/kernel/vsyscall_64.c 		write_rdtscp_aux((node << 12) | cpu);
cpu               265 arch/x86/kernel/vsyscall_64.c 	d |= cpu;
cpu               268 arch/x86/kernel/vsyscall_64.c 	write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
cpu               280 arch/x86/kernel/vsyscall_64.c 	long cpu = (long)arg;
cpu               282 arch/x86/kernel/vsyscall_64.c 		smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1);
cpu                84 arch/x86/kvm/svm.c 	int cpu;
cpu                98 arch/x86/kvm/svm.c 	int cpu;
cpu               340 arch/x86/kvm/svm.c 	svm_data->cpu = cpu;
cpu               346 arch/x86/kvm/svm.c 	per_cpu(svm_data, cpu) = svm_data;
cpu               420 arch/x86/kvm/svm.c 	int cpu;
cpu               438 arch/x86/kvm/svm.c 	for_each_online_cpu(cpu) {
cpu               439 arch/x86/kvm/svm.c 		r = svm_cpu_init(cpu);
cpu               470 arch/x86/kvm/svm.c 	int cpu;
cpu               472 arch/x86/kvm/svm.c 	for_each_online_cpu(cpu)
cpu               473 arch/x86/kvm/svm.c 		svm_cpu_uninit(cpu);
cpu               694 arch/x86/kvm/svm.c 	if (unlikely(cpu != vcpu->cpu)) {
cpu               704 arch/x86/kvm/svm.c 		vcpu->cpu = cpu;
cpu               959 arch/x86/kvm/svm.c 	svm->vcpu.cpu = svm_data->cpu;
cpu              1515 arch/x86/kvm/svm.c 	int cpu = raw_smp_processor_id();
cpu              1517 arch/x86/kvm/svm.c 	struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
cpu              1524 arch/x86/kvm/svm.c 	int cpu = raw_smp_processor_id();
cpu              1526 arch/x86/kvm/svm.c 	struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
cpu              1529 arch/x86/kvm/svm.c 	if (svm->vcpu.cpu != cpu ||
cpu               335 arch/x86/kvm/vmx.c 	int cpu = raw_smp_processor_id();
cpu               337 arch/x86/kvm/vmx.c 	if (vmx->vcpu.cpu == cpu)
cpu               339 arch/x86/kvm/vmx.c 	if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
cpu               340 arch/x86/kvm/vmx.c 		per_cpu(current_vmcs, cpu) = NULL;
cpu               343 arch/x86/kvm/vmx.c 	vmx->vcpu.cpu = -1;
cpu               349 arch/x86/kvm/vmx.c 	if (vmx->vcpu.cpu == -1)
cpu               351 arch/x86/kvm/vmx.c 	smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1);
cpu               626 arch/x86/kvm/vmx.c 	if (vcpu->cpu != cpu) {
cpu               632 arch/x86/kvm/vmx.c 			 &per_cpu(vcpus_on_cpu, cpu));
cpu               636 arch/x86/kvm/vmx.c 	if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
cpu               639 arch/x86/kvm/vmx.c 		per_cpu(current_vmcs, cpu) = vmx->vmcs;
cpu               648 arch/x86/kvm/vmx.c 	if (vcpu->cpu != cpu) {
cpu               652 arch/x86/kvm/vmx.c 		vcpu->cpu = cpu;
cpu              1042 arch/x86/kvm/vmx.c 	int cpu = raw_smp_processor_id();
cpu              1043 arch/x86/kvm/vmx.c 	u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
cpu              1046 arch/x86/kvm/vmx.c 	INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu));
cpu              1064 arch/x86/kvm/vmx.c 	int cpu = raw_smp_processor_id();
cpu              1067 arch/x86/kvm/vmx.c 	list_for_each_entry_safe(vmx, n, &per_cpu(vcpus_on_cpu, cpu),
cpu              1207 arch/x86/kvm/vmx.c 	int node = cpu_to_node(cpu);
cpu              1232 arch/x86/kvm/vmx.c 	int cpu;
cpu              1234 arch/x86/kvm/vmx.c 	for_each_online_cpu(cpu)
cpu              1235 arch/x86/kvm/vmx.c 		free_vmcs(per_cpu(vmxarea, cpu));
cpu              1240 arch/x86/kvm/vmx.c 	int cpu;
cpu              1242 arch/x86/kvm/vmx.c 	for_each_online_cpu(cpu) {
cpu              1245 arch/x86/kvm/vmx.c 		vmcs = alloc_vmcs_cpu(cpu);
cpu              1251 arch/x86/kvm/vmx.c 		per_cpu(vmxarea, cpu) = vmcs;
cpu              3115 arch/x86/kvm/vmx.c 	int cpu;
cpu              3142 arch/x86/kvm/vmx.c 	cpu = get_cpu();
cpu              3143 arch/x86/kvm/vmx.c 	vmx_vcpu_load(&vmx->vcpu, cpu);
cpu               973 arch/x86/kvm/x86.c 	kvm_x86_ops->vcpu_load(vcpu, cpu);
cpu              4043 arch/x86/kvm/x86.c 	int ipi_pcpu = vcpu->cpu;
cpu              4044 arch/x86/kvm/x86.c 	int cpu = get_cpu();
cpu              4054 arch/x86/kvm/x86.c 	if (vcpu->guest_mode && vcpu->cpu != cpu)
cpu               282 arch/x86/lguest/boot.c 	lazy_hcall(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu, 0);
cpu                54 arch/x86/lib/delay.c 	int cpu;
cpu                57 arch/x86/lib/delay.c 	cpu = smp_processor_id();
cpu                78 arch/x86/lib/delay.c 		if (unlikely(cpu != smp_processor_id())) {
cpu                80 arch/x86/lib/delay.c 			cpu = smp_processor_id();
cpu                32 arch/x86/lib/msr-on-cpu.c 	err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
cpu                47 arch/x86/lib/msr-on-cpu.c 	err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
cpu                74 arch/x86/lib/msr-on-cpu.c 	err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
cpu                89 arch/x86/lib/msr-on-cpu.c 	err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
cpu               731 arch/x86/mach-voyager/voyager_cat.c 			__u8 cpu = i & 0x07;
cpu               735 arch/x86/mach-voyager/voyager_cat.c 			voyager_extended_vic_processors |= (1 << cpu);
cpu               736 arch/x86/mach-voyager/voyager_cat.c 			cpu += 4;
cpu               737 arch/x86/mach-voyager/voyager_cat.c 			voyager_extended_vic_processors |= (1 << cpu);
cpu               883 arch/x86/mach-voyager/voyager_cat.c 			__u8 cpu;
cpu               889 arch/x86/mach-voyager/voyager_cat.c 				cpu = (i & 0x07) + j * 8;
cpu               891 arch/x86/mach-voyager/voyager_cat.c 				cpu = (i & 0x03) + j * 4;
cpu               894 arch/x86/mach-voyager/voyager_cat.c 				voyager_extended_vic_processors |= (1 << cpu);
cpu               901 arch/x86/mach-voyager/voyager_cat.c 				voyager_extended_vic_processors |= (1 << cpu);
cpu               903 arch/x86/mach-voyager/voyager_cat.c 				    (~(1 << cpu));
cpu               906 arch/x86/mach-voyager/voyager_cat.c 			voyager_quad_processors |= (1 << cpu);
cpu               907 arch/x86/mach-voyager/voyager_cat.c 			voyager_quad_cpi_addr[cpu] = (struct voyager_qic_cpi *)
cpu               909 arch/x86/mach-voyager/voyager_cat.c 			CDEBUG(("CPU%d: CPI address 0x%lx\n", cpu,
cpu               910 arch/x86/mach-voyager/voyager_cat.c 				(unsigned long)voyager_quad_cpi_addr[cpu]));
cpu               100 arch/x86/mach-voyager/voyager_smp.c 	voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi =
cpu               106 arch/x86/mach-voyager/voyager_smp.c 	int cpu;
cpu               108 arch/x86/mach-voyager/voyager_smp.c 	for_each_online_cpu(cpu) {
cpu               109 arch/x86/mach-voyager/voyager_smp.c 		if (cpuset & (1 << cpu)) {
cpu               111 arch/x86/mach-voyager/voyager_smp.c 			if (!cpu_online(cpu))
cpu               114 arch/x86/mach-voyager/voyager_smp.c 					hard_smp_processor_id(), cpi, cpu));
cpu               116 arch/x86/mach-voyager/voyager_smp.c 			send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET);
cpu               130 arch/x86/mach-voyager/voyager_smp.c 	if (voyager_quad_processors & (1 << cpu))
cpu               131 arch/x86/mach-voyager/voyager_smp.c 		send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET);
cpu               133 arch/x86/mach-voyager/voyager_smp.c 		send_CPI(1 << cpu, cpi);
cpu               138 arch/x86/mach-voyager/voyager_smp.c 	__u8 cpu = smp_processor_id();
cpu               139 arch/x86/mach-voyager/voyager_smp.c 	__u32 mask = cpus_addr(cpu_online_map)[0] & ~(1 << cpu);
cpu               151 arch/x86/mach-voyager/voyager_smp.c 	__u8 cpu = hard_smp_processor_id();
cpu               153 arch/x86/mach-voyager/voyager_smp.c 	return (voyager_extended_vic_processors & (1 << cpu));
cpu               158 arch/x86/mach-voyager/voyager_smp.c 	__u8 cpu = hard_smp_processor_id();
cpu               161 arch/x86/mach-voyager/voyager_smp.c 		& voyager_allowed_boot_processors & (1 << cpu));
cpu               404 arch/x86/mach-voyager/voyager_smp.c 	current_thread_info()->cpu = boot_cpu_id;
cpu               498 arch/x86/mach-voyager/voyager_smp.c 	int quad_boot = (1 << cpu) & voyager_quad_processors
cpu               525 arch/x86/mach-voyager/voyager_smp.c 	idle = fork_idle(cpu);
cpu               527 arch/x86/mach-voyager/voyager_smp.c 		panic("failed fork for CPU%d", cpu);
cpu               532 arch/x86/mach-voyager/voyager_smp.c 	init_gdt(cpu);
cpu               533 arch/x86/mach-voyager/voyager_smp.c 	per_cpu(current_task, cpu) = idle;
cpu               534 arch/x86/mach-voyager/voyager_smp.c 	early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
cpu               535 arch/x86/mach-voyager/voyager_smp.c 	irq_ctx_init(cpu);
cpu               538 arch/x86/mach-voyager/voyager_smp.c 	VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu,
cpu               548 arch/x86/mach-voyager/voyager_smp.c 		printk("CPU %d: non extended Quad boot\n", cpu);
cpu               554 arch/x86/mach-voyager/voyager_smp.c 		printk("CPU%d: extended VIC boot\n", cpu);
cpu               572 arch/x86/mach-voyager/voyager_smp.c 		send_one_QIC_CPI(cpu, VIC_CPU_BOOT_CPI);
cpu               574 arch/x86/mach-voyager/voyager_smp.c 		outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID);
cpu               583 arch/x86/mach-voyager/voyager_smp.c 		send_CPI((1 << cpu), VIC_CPU_BOOT_CPI);
cpu               599 arch/x86/mach-voyager/voyager_smp.c 			cpu, smp_processor_id()));
cpu               601 arch/x86/mach-voyager/voyager_smp.c 		printk("CPU%d: ", cpu);
cpu               602 arch/x86/mach-voyager/voyager_smp.c 		print_cpu_info(&cpu_data(cpu));
cpu               604 arch/x86/mach-voyager/voyager_smp.c 		cpu_set(cpu, cpu_callout_map);
cpu               605 arch/x86/mach-voyager/voyager_smp.c 		cpu_set(cpu, cpu_present_map);
cpu               607 arch/x86/mach-voyager/voyager_smp.c 		printk("CPU%d FAILED TO BOOT: ", cpu);
cpu               783 arch/x86/mach-voyager/voyager_smp.c 	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
cpu               785 arch/x86/mach-voyager/voyager_smp.c 	cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
cpu               794 arch/x86/mach-voyager/voyager_smp.c 	__u8 cpu = smp_processor_id();
cpu               796 arch/x86/mach-voyager/voyager_smp.c 	if (!test_bit(cpu, &smp_invalidate_needed))
cpu               804 arch/x86/mach-voyager/voyager_smp.c 	if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
cpu               805 arch/x86/mach-voyager/voyager_smp.c 		if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
cpu               811 arch/x86/mach-voyager/voyager_smp.c 			voyager_leave_mm(cpu);
cpu               814 arch/x86/mach-voyager/voyager_smp.c 	clear_bit(cpu, &smp_invalidate_needed);
cpu               927 arch/x86/mach-voyager/voyager_smp.c 	__u8 cpu = get_cpu();
cpu               929 arch/x86/mach-voyager/voyager_smp.c 	VDEBUG(("VOYAGER SMP: CPU%d enabling irq mask 0x%x\n", cpu,
cpu               930 arch/x86/mach-voyager/voyager_smp.c 		vic_irq_enable_mask[cpu]));
cpu               934 arch/x86/mach-voyager/voyager_smp.c 		if (vic_irq_enable_mask[cpu] & (1 << irq))
cpu               937 arch/x86/mach-voyager/voyager_smp.c 	vic_irq_enable_mask[cpu] = 0;
cpu              1038 arch/x86/mach-voyager/voyager_smp.c 	__u8 cpu = smp_processor_id();
cpu              1045 arch/x86/mach-voyager/voyager_smp.c 	if (test_and_clear_bit(VIC_TIMER_CPI, &vic_cpi_mailbox[cpu]))
cpu              1047 arch/x86/mach-voyager/voyager_smp.c 	if (test_and_clear_bit(VIC_INVALIDATE_CPI, &vic_cpi_mailbox[cpu]))
cpu              1049 arch/x86/mach-voyager/voyager_smp.c 	if (test_and_clear_bit(VIC_RESCHEDULE_CPI, &vic_cpi_mailbox[cpu]))
cpu              1051 arch/x86/mach-voyager/voyager_smp.c 	if (test_and_clear_bit(VIC_ENABLE_IRQ_CPI, &vic_cpi_mailbox[cpu]))
cpu              1053 arch/x86/mach-voyager/voyager_smp.c 	if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu]))
cpu              1055 arch/x86/mach-voyager/voyager_smp.c 	if (test_and_clear_bit(VIC_CALL_FUNCTION_SINGLE_CPI, &vic_cpi_mailbox[cpu]))
cpu              1062 arch/x86/mach-voyager/voyager_smp.c 	unsigned long cpu = smp_processor_id();
cpu              1065 arch/x86/mach-voyager/voyager_smp.c 	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
cpu              1066 arch/x86/mach-voyager/voyager_smp.c 		voyager_leave_mm(cpu);
cpu              1078 arch/x86/mach-voyager/voyager_smp.c 	send_one_CPI(cpu, VIC_RESCHEDULE_CPI);
cpu              1125 arch/x86/mach-voyager/voyager_smp.c 	int cpu = smp_processor_id();
cpu              1129 arch/x86/mach-voyager/voyager_smp.c 	if (--per_cpu(prof_counter, cpu) <= 0) {
cpu              1138 arch/x86/mach-voyager/voyager_smp.c 		per_cpu(prof_counter, cpu) = per_cpu(prof_multiplier, cpu);
cpu              1139 arch/x86/mach-voyager/voyager_smp.c 		if (per_cpu(prof_counter, cpu) !=
cpu              1140 arch/x86/mach-voyager/voyager_smp.c 		    per_cpu(prof_old_multiplier, cpu)) {
cpu              1142 arch/x86/mach-voyager/voyager_smp.c 			per_cpu(prof_old_multiplier, cpu) =
cpu              1143 arch/x86/mach-voyager/voyager_smp.c 			    per_cpu(prof_counter, cpu);
cpu              1149 arch/x86/mach-voyager/voyager_smp.c 	if (((1 << cpu) & voyager_extended_vic_processors) == 0)
cpu              1165 arch/x86/mach-voyager/voyager_smp.c 	if ((++vic_tick[cpu] & 0x7) != 0)
cpu              1194 arch/x86/mach-voyager/voyager_smp.c 	weight = (vic_intr_count[cpu] * voyager_extended_cpus
cpu              1205 arch/x86/mach-voyager/voyager_smp.c 	if ((vic_tick[cpu] & 0xFFF) == 0) {
cpu              1208 arch/x86/mach-voyager/voyager_smp.c 		       cpu, vic_tick[cpu], weight);
cpu              1282 arch/x86/mach-voyager/voyager_smp.c 	int cpu;
cpu              1297 arch/x86/mach-voyager/voyager_smp.c 	for_each_online_cpu(cpu) {
cpu              1298 arch/x86/mach-voyager/voyager_smp.c 		if (cpuset & (1 << cpu))
cpu              1299 arch/x86/mach-voyager/voyager_smp.c 			set_bit(cpi, &vic_cpi_mailbox[cpu]);
cpu              1313 arch/x86/mach-voyager/voyager_smp.c 	__u8 cpu = hard_smp_processor_id();
cpu              1318 arch/x86/mach-voyager/voyager_smp.c 	return voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi;
cpu              1341 arch/x86/mach-voyager/voyager_smp.c 	__u8 cpu = smp_processor_id();
cpu              1346 arch/x86/mach-voyager/voyager_smp.c 		printk("VOYAGER SMP: CPU%d lost CPI%d\n", cpu, cpi);
cpu              1356 arch/x86/mach-voyager/voyager_smp.c 		printk("VOYAGER SMP: CPU%d still asserting CPI%d\n", cpu, cpi);
cpu              1364 arch/x86/mach-voyager/voyager_smp.c #define cached_21(cpu)	(__byte(0,vic_irq_mask[cpu]))
cpu              1365 arch/x86/mach-voyager/voyager_smp.c #define cached_A1(cpu)	(__byte(1,vic_irq_mask[cpu]))
cpu              1399 arch/x86/mach-voyager/voyager_smp.c 	int cpu = smp_processor_id(), real_cpu;
cpu              1405 arch/x86/mach-voyager/voyager_smp.c 		irq, cpu, cpu_irq_affinity[cpu]));
cpu              1414 arch/x86/mach-voyager/voyager_smp.c 		if (real_cpu == cpu) {
cpu              1433 arch/x86/mach-voyager/voyager_smp.c 	__u8 cpu = smp_processor_id();
cpu              1435 arch/x86/mach-voyager/voyager_smp.c 	__u16 old_mask = vic_irq_mask[cpu];
cpu              1437 arch/x86/mach-voyager/voyager_smp.c 	vic_irq_mask[cpu] &= mask;
cpu              1438 arch/x86/mach-voyager/voyager_smp.c 	if (vic_irq_mask[cpu] == old_mask)
cpu              1442 arch/x86/mach-voyager/voyager_smp.c 		irq, cpu));
cpu              1445 arch/x86/mach-voyager/voyager_smp.c 		outb_p(cached_A1(cpu), 0xA1);
cpu              1448 arch/x86/mach-voyager/voyager_smp.c 		outb_p(cached_21(cpu), 0x21);
cpu              1455 arch/x86/mach-voyager/voyager_smp.c 	__u8 cpu = smp_processor_id();
cpu              1457 arch/x86/mach-voyager/voyager_smp.c 	__u16 old_mask = vic_irq_mask[cpu];
cpu              1462 arch/x86/mach-voyager/voyager_smp.c 	vic_irq_mask[cpu] |= mask;
cpu              1463 arch/x86/mach-voyager/voyager_smp.c 	if (old_mask == vic_irq_mask[cpu])
cpu              1467 arch/x86/mach-voyager/voyager_smp.c 		irq, cpu));
cpu              1470 arch/x86/mach-voyager/voyager_smp.c 		outb_p(cached_A1(cpu), 0xA1);
cpu              1473 arch/x86/mach-voyager/voyager_smp.c 		outb_p(cached_21(cpu), 0x21);
cpu              1487 arch/x86/mach-voyager/voyager_smp.c 	__u8 cpu = smp_processor_id();
cpu              1491 arch/x86/mach-voyager/voyager_smp.c 	vic_intr_count[cpu]++;
cpu              1493 arch/x86/mach-voyager/voyager_smp.c 	if (!(cpu_irq_affinity[cpu] & (1 << irq))) {
cpu              1497 arch/x86/mach-voyager/voyager_smp.c 			"on cpu %d\n", irq, cpu));
cpu              1508 arch/x86/mach-voyager/voyager_smp.c 			irq, cpu));
cpu              1538 arch/x86/mach-voyager/voyager_smp.c 			__u8 cpu = smp_processor_id();
cpu              1543 arch/x86/mach-voyager/voyager_smp.c 			       cpu, irq);
cpu              1555 arch/x86/mach-voyager/voyager_smp.c 				outb(cpu, VIC_PROCESSOR_ID);
cpu              1604 arch/x86/mach-voyager/voyager_smp.c 	int cpu;
cpu              1628 arch/x86/mach-voyager/voyager_smp.c 	for_each_online_cpu(cpu) {
cpu              1629 arch/x86/mach-voyager/voyager_smp.c 		unsigned long cpu_mask = 1 << cpu;
cpu              1633 arch/x86/mach-voyager/voyager_smp.c 			cpu_irq_affinity[cpu] |= irq_mask;
cpu              1636 arch/x86/mach-voyager/voyager_smp.c 			cpu_irq_affinity[cpu] &= ~irq_mask;
cpu              1665 arch/x86/mach-voyager/voyager_smp.c 	__u8 cpu = smp_processor_id();
cpu              1668 arch/x86/mach-voyager/voyager_smp.c 	vic_irq_mask[cpu] = vic_irq_mask[boot_cpu_id];
cpu              1679 arch/x86/mach-voyager/voyager_smp.c 			cpu, QIC_CPI_ENABLE));
cpu              1683 arch/x86/mach-voyager/voyager_smp.c 		cpu, vic_irq_mask[cpu]));
cpu              1688 arch/x86/mach-voyager/voyager_smp.c 	int old_cpu = smp_processor_id(), cpu;
cpu              1691 arch/x86/mach-voyager/voyager_smp.c 	for_each_online_cpu(cpu) {
cpu              1696 arch/x86/mach-voyager/voyager_smp.c 		outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID);
cpu              1709 arch/x86/mach-voyager/voyager_smp.c 		       cpu, vic_irq_mask[cpu], imr, irr, isr);
cpu              1717 arch/x86/mach-voyager/voyager_smp.c 					       cpu, irq);
cpu              1719 arch/x86/mach-voyager/voyager_smp.c 					outb(VIC_CPU_MASQUERADE_ENABLE | cpu,
cpu              1759 arch/x86/mach-voyager/voyager_smp.c 	if (cpu_isset(cpu, smp_commenced_mask))
cpu              1763 arch/x86/mach-voyager/voyager_smp.c 	if (!cpu_isset(cpu, cpu_callin_map))
cpu              1766 arch/x86/mach-voyager/voyager_smp.c 	cpu_set(cpu, smp_commenced_mask);
cpu              1767 arch/x86/mach-voyager/voyager_smp.c 	while (!cpu_online(cpu))
cpu              1779 arch/x86/mach-voyager/voyager_smp.c 	current_thread_info()->cpu = hard_smp_processor_id();
cpu               411 arch/x86/mm/mmio-mod.c 	int cpu;
cpu               421 arch/x86/mm/mmio-mod.c 	for_each_cpu_mask(cpu, downed_cpus) {
cpu               422 arch/x86/mm/mmio-mod.c 		err = cpu_down(cpu);
cpu               424 arch/x86/mm/mmio-mod.c 			pr_info(NAME "CPU%d is down.\n", cpu);
cpu               426 arch/x86/mm/mmio-mod.c 			pr_err(NAME "Error taking CPU%d down: %d\n", cpu, err);
cpu               437 arch/x86/mm/mmio-mod.c 	int cpu;
cpu               443 arch/x86/mm/mmio-mod.c 	for_each_cpu_mask(cpu, downed_cpus) {
cpu               444 arch/x86/mm/mmio-mod.c 		err = cpu_up(cpu);
cpu               446 arch/x86/mm/mmio-mod.c 			pr_info(NAME "enabled CPU%d.\n", cpu);
cpu               448 arch/x86/mm/mmio-mod.c 			pr_err(NAME "cannot re-enable CPU%d: %d\n", cpu, err);
cpu               621 arch/x86/mm/numa_64.c 	int cpu;
cpu               626 arch/x86/mm/numa_64.c 	for_each_possible_cpu(cpu) {
cpu               628 arch/x86/mm/numa_64.c 		u16 apicid = cpu_to_apicid[cpu];
cpu               637 arch/x86/mm/numa_64.c 		numa_set_node(cpu, node);
cpu                43 arch/x86/oprofile/nmi_int.c 	int cpu = (unsigned long)data;
cpu                47 arch/x86/oprofile/nmi_int.c 		smp_call_function_single(cpu, nmi_cpu_start, NULL, 0);
cpu                50 arch/x86/oprofile/nmi_int.c 		smp_call_function_single(cpu, nmi_cpu_stop, NULL, 1);
cpu               115 arch/x86/oprofile/nmi_int.c 	int cpu = smp_processor_id();
cpu               119 arch/x86/oprofile/nmi_int.c 		if (model->check_ctrs(args->regs, &per_cpu(cpu_msrs, cpu)))
cpu               155 arch/x86/oprofile/nmi_int.c 	int cpu = smp_processor_id();
cpu               156 arch/x86/oprofile/nmi_int.c 	struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
cpu               201 arch/x86/oprofile/nmi_int.c 	int cpu = smp_processor_id();
cpu               202 arch/x86/oprofile/nmi_int.c 	struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
cpu               206 arch/x86/oprofile/nmi_int.c 	per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
cpu               219 arch/x86/oprofile/nmi_int.c 	int cpu;
cpu               236 arch/x86/oprofile/nmi_int.c 	for_each_possible_cpu(cpu) {
cpu               237 arch/x86/oprofile/nmi_int.c 		if (cpu != 0) {
cpu               238 arch/x86/oprofile/nmi_int.c 			memcpy(per_cpu(cpu_msrs, cpu).counters,
cpu               242 arch/x86/oprofile/nmi_int.c 			memcpy(per_cpu(cpu_msrs, cpu).controls,
cpu               282 arch/x86/oprofile/nmi_int.c 	int cpu = smp_processor_id();
cpu               292 arch/x86/oprofile/nmi_int.c 	apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
cpu               382 arch/x86/oprofile/op_model_p4.c 	int cpu = smp_processor_id();
cpu               383 arch/x86/oprofile/op_model_p4.c 	return (cpu != first_cpu(per_cpu(cpu_sibling_map, cpu)));
cpu               582 arch/x86/pci/amd_bus.c 	int cpu = (long)hcpu;
cpu               586 arch/x86/pci/amd_bus.c 		smp_call_function_single(cpu, enable_pci_io_ecs, NULL, 0);
cpu               600 arch/x86/pci/amd_bus.c 	int cpu;
cpu               607 arch/x86/pci/amd_bus.c 	for_each_online_cpu(cpu)
cpu               609 arch/x86/pci/amd_bus.c 			       (void *)(long)cpu);
cpu                51 arch/x86/pci/mmconfig_32.c 	int cpu = smp_processor_id();
cpu                53 arch/x86/pci/mmconfig_32.c 	    cpu != mmcfg_last_accessed_cpu) {
cpu                55 arch/x86/pci/mmconfig_32.c 		mmcfg_last_accessed_cpu = cpu;
cpu                69 arch/x86/power/cpu_32.c 	int cpu = smp_processor_id();
cpu                70 arch/x86/power/cpu_32.c 	struct tss_struct *t = &per_cpu(init_tss, cpu);
cpu                72 arch/x86/power/cpu_32.c 	set_tss_desc(cpu, t);	/*
cpu               145 arch/x86/power/cpu_64.c 	int cpu = smp_processor_id();
cpu               146 arch/x86/power/cpu_64.c 	struct tss_struct *t = &per_cpu(init_tss, cpu);
cpu               153 arch/x86/power/cpu_64.c 	set_tss_desc(cpu, t);
cpu               155 arch/x86/power/cpu_64.c 	get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
cpu               228 arch/x86/vdso/vdso32-setup.c 	int cpu = get_cpu();
cpu               229 arch/x86/vdso/vdso32-setup.c 	struct tss_struct *tss = &per_cpu(init_tss, cpu);
cpu                28 arch/x86/vdso/vgetcpu.c 	if (cpu)
cpu                29 arch/x86/vdso/vgetcpu.c 		*cpu = p & 0xfff;
cpu               132 arch/x86/xen/enlighten.c 	per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
cpu               137 arch/x86/xen/enlighten.c 	vcpup = &per_cpu(xen_vcpu_info, cpu);
cpu               143 arch/x86/xen/enlighten.c 	       cpu, vcpup, info.mfn, info.offset);
cpu               148 arch/x86/xen/enlighten.c 	err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
cpu               156 arch/x86/xen/enlighten.c 		per_cpu(xen_vcpu, cpu) = vcpup;
cpu               159 arch/x86/xen/enlighten.c 		       cpu, vcpup);
cpu               171 arch/x86/xen/enlighten.c 		int cpu;
cpu               173 arch/x86/xen/enlighten.c 		for_each_online_cpu(cpu) {
cpu               174 arch/x86/xen/enlighten.c 			bool other_cpu = (cpu != smp_processor_id());
cpu               177 arch/x86/xen/enlighten.c 			    HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL))
cpu               180 arch/x86/xen/enlighten.c 			xen_vcpu_setup(cpu);
cpu               183 arch/x86/xen/enlighten.c 			    HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL))
cpu               349 arch/x86/xen/enlighten.c 	struct desc_struct *gdt = get_cpu_gdt_table(cpu);
cpu               385 arch/x86/xen/enlighten.c 	load_TLS_descriptor(t, cpu, 0);
cpu               386 arch/x86/xen/enlighten.c 	load_TLS_descriptor(t, cpu, 1);
cpu               387 arch/x86/xen/enlighten.c 	load_TLS_descriptor(t, cpu, 2);
cpu              1062 arch/x86/xen/enlighten.c 	int cpu;
cpu              1064 arch/x86/xen/enlighten.c 	for_each_possible_cpu(cpu)
cpu              1065 arch/x86/xen/enlighten.c 		xen_vcpu_setup(cpu);
cpu                29 arch/x86/xen/irq.c 		int cpu;
cpu                31 arch/x86/xen/irq.c 		for_each_possible_cpu(cpu)
cpu                32 arch/x86/xen/irq.c 			per_cpu(vector_irq, cpu)[i] = i;
cpu              1062 arch/x86/xen/mmu.c 	unsigned cpu;
cpu              1080 arch/x86/xen/mmu.c 	for_each_online_cpu(cpu) {
cpu              1081 arch/x86/xen/mmu.c 		if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
cpu              1082 arch/x86/xen/mmu.c 			cpu_set(cpu, mask);
cpu                64 arch/x86/xen/smp.c 	int cpu = smp_processor_id();
cpu                73 arch/x86/xen/smp.c 	cpu = smp_processor_id();
cpu                74 arch/x86/xen/smp.c 	smp_store_cpu_info(cpu);
cpu                75 arch/x86/xen/smp.c 	cpu_data(cpu).x86_max_cores = 1;
cpu                76 arch/x86/xen/smp.c 	set_cpu_sibling_map(cpu);
cpu                80 arch/x86/xen/smp.c 	cpu_set(cpu, cpu_online_map);
cpu               101 arch/x86/xen/smp.c 	resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
cpu               103 arch/x86/xen/smp.c 				    cpu,
cpu               110 arch/x86/xen/smp.c 	per_cpu(resched_irq, cpu) = rc;
cpu               112 arch/x86/xen/smp.c 	callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
cpu               114 arch/x86/xen/smp.c 				    cpu,
cpu               121 arch/x86/xen/smp.c 	per_cpu(callfunc_irq, cpu) = rc;
cpu               123 arch/x86/xen/smp.c 	debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
cpu               124 arch/x86/xen/smp.c 	rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
cpu               129 arch/x86/xen/smp.c 	per_cpu(debug_irq, cpu) = rc;
cpu               131 arch/x86/xen/smp.c 	callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
cpu               133 arch/x86/xen/smp.c 				    cpu,
cpu               140 arch/x86/xen/smp.c 	per_cpu(callfuncsingle_irq, cpu) = rc;
cpu               145 arch/x86/xen/smp.c 	if (per_cpu(resched_irq, cpu) >= 0)
cpu               146 arch/x86/xen/smp.c 		unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
cpu               147 arch/x86/xen/smp.c 	if (per_cpu(callfunc_irq, cpu) >= 0)
cpu               148 arch/x86/xen/smp.c 		unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
cpu               149 arch/x86/xen/smp.c 	if (per_cpu(debug_irq, cpu) >= 0)
cpu               150 arch/x86/xen/smp.c 		unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
cpu               151 arch/x86/xen/smp.c 	if (per_cpu(callfuncsingle_irq, cpu) >= 0)
cpu               152 arch/x86/xen/smp.c 		unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
cpu               184 arch/x86/xen/smp.c 	unsigned cpu;
cpu               199 arch/x86/xen/smp.c 		for (cpu = NR_CPUS - 1; !cpu_possible(cpu); cpu--)
cpu               201 arch/x86/xen/smp.c 		cpu_clear(cpu, cpu_possible_map);
cpu               204 arch/x86/xen/smp.c 	for_each_possible_cpu (cpu) {
cpu               207 arch/x86/xen/smp.c 		if (cpu == 0)
cpu               210 arch/x86/xen/smp.c 		idle = fork_idle(cpu);
cpu               212 arch/x86/xen/smp.c 			panic("failed fork for CPU %d", cpu);
cpu               214 arch/x86/xen/smp.c 		cpu_set(cpu, cpu_present_map);
cpu               224 arch/x86/xen/smp.c 	if (cpu_test_and_set(cpu, xen_cpu_initialized_map))
cpu               231 arch/x86/xen/smp.c 	gdt = get_cpu_gdt_table(cpu);
cpu               268 arch/x86/xen/smp.c 	per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
cpu               271 arch/x86/xen/smp.c 	if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
cpu               280 arch/x86/xen/smp.c 	struct task_struct *idle = idle_task(cpu);
cpu               285 arch/x86/xen/smp.c 	WARN_ON(cpu == 0);
cpu               286 arch/x86/xen/smp.c 	if (cpu > 0) {
cpu               287 arch/x86/xen/smp.c 		rc = get_local_pda(cpu);
cpu               294 arch/x86/xen/smp.c 	init_gdt(cpu);
cpu               295 arch/x86/xen/smp.c 	per_cpu(current_task, cpu) = idle;
cpu               296 arch/x86/xen/smp.c 	irq_ctx_init(cpu);
cpu               298 arch/x86/xen/smp.c 	cpu_pda(cpu)->pcurrent = idle;
cpu               301 arch/x86/xen/smp.c 	xen_setup_timer(cpu);
cpu               302 arch/x86/xen/smp.c 	xen_init_lock_cpu(cpu);
cpu               304 arch/x86/xen/smp.c 	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
cpu               307 arch/x86/xen/smp.c 	per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
cpu               309 arch/x86/xen/smp.c 	rc = cpu_initialize_context(cpu, idle);
cpu               316 arch/x86/xen/smp.c 	rc = xen_smp_intr_init(cpu);
cpu               320 arch/x86/xen/smp.c 	rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
cpu               323 arch/x86/xen/smp.c 	while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
cpu               338 arch/x86/xen/smp.c 	unsigned int cpu = smp_processor_id();
cpu               339 arch/x86/xen/smp.c 	if (cpu == 0)
cpu               350 arch/x86/xen/smp.c 	while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
cpu               354 arch/x86/xen/smp.c 	unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
cpu               355 arch/x86/xen/smp.c 	unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
cpu               356 arch/x86/xen/smp.c 	unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
cpu               357 arch/x86/xen/smp.c 	unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
cpu               358 arch/x86/xen/smp.c 	xen_uninit_lock_cpu(cpu);
cpu               359 arch/x86/xen/smp.c 	xen_teardown_timer(cpu);
cpu               391 arch/x86/xen/smp.c 	int cpu = smp_processor_id();
cpu               397 arch/x86/xen/smp.c 	HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
cpu               408 arch/x86/xen/smp.c 	xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
cpu               413 arch/x86/xen/smp.c 	unsigned cpu;
cpu               417 arch/x86/xen/smp.c 	for_each_cpu_mask_nr(cpu, mask)
cpu               418 arch/x86/xen/smp.c 		xen_send_IPI_one(cpu, vector);
cpu               423 arch/x86/xen/smp.c 	int cpu;
cpu               428 arch/x86/xen/smp.c 	for_each_cpu_mask_nr(cpu, mask) {
cpu               429 arch/x86/xen/smp.c 		if (xen_vcpu_stolen(cpu)) {
cpu               438 arch/x86/xen/smp.c 	xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR);
cpu               303 arch/x86/xen/spinlock.c 	int cpu;
cpu               307 arch/x86/xen/spinlock.c 	for_each_online_cpu(cpu) {
cpu               309 arch/x86/xen/spinlock.c 		if (per_cpu(lock_spinners, cpu) == xl) {
cpu               311 arch/x86/xen/spinlock.c 			xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
cpu               344 arch/x86/xen/spinlock.c 	name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
cpu               346 arch/x86/xen/spinlock.c 				     cpu,
cpu               354 arch/x86/xen/spinlock.c 		per_cpu(lock_kicker_irq, cpu) = irq;
cpu               357 arch/x86/xen/spinlock.c 	printk("cpu %d spinlock event irq %d\n", cpu, irq);
cpu               362 arch/x86/xen/spinlock.c 	unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
cpu               107 arch/x86/xen/time.c 	area.addr.v = &per_cpu(runstate, cpu);
cpu               110 arch/x86/xen/time.c 			       cpu, &area))
cpu               351 arch/x86/xen/time.c 	int cpu = smp_processor_id();
cpu               359 arch/x86/xen/time.c 		if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
cpu               365 arch/x86/xen/time.c 		if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, cpu, NULL) ||
cpu               366 arch/x86/xen/time.c 		    HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
cpu               377 arch/x86/xen/time.c 	int cpu = smp_processor_id();
cpu               386 arch/x86/xen/time.c 	ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single);
cpu               434 arch/x86/xen/time.c 	printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
cpu               436 arch/x86/xen/time.c 	name = kasprintf(GFP_KERNEL, "timer%d", cpu);
cpu               440 arch/x86/xen/time.c 	irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
cpu               444 arch/x86/xen/time.c 	evt = &per_cpu(xen_clock_events, cpu);
cpu               447 arch/x86/xen/time.c 	evt->cpumask = cpumask_of_cpu(cpu);
cpu               450 arch/x86/xen/time.c 	setup_runstate_info(cpu);
cpu               456 arch/x86/xen/time.c 	BUG_ON(cpu == 0);
cpu               457 arch/x86/xen/time.c 	evt = &per_cpu(xen_clock_events, cpu);
cpu               470 arch/x86/xen/time.c 	int cpu;
cpu               475 arch/x86/xen/time.c 	for_each_online_cpu(cpu) {
cpu               476 arch/x86/xen/time.c 		if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
cpu               483 arch/x86/xen/time.c 	int cpu = smp_processor_id();
cpu               487 arch/x86/xen/time.c 	if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) {
cpu               501 arch/x86/xen/time.c 	xen_setup_timer(cpu);
cpu                55 block/blk-core.c 	int cpu;
cpu                60 block/blk-core.c 	cpu = part_stat_lock();
cpu                64 block/blk-core.c 		part_stat_inc(cpu, part, merges[rw]);
cpu                66 block/blk-core.c 		part_round_stats(cpu, part);
cpu               114 block/blk-core.c 	rq->cpu = -1;
cpu               998 block/blk-core.c 		__part_stat_add(cpu, part, time_in_queue,
cpu              1000 block/blk-core.c 		__part_stat_add(cpu, part, io_ticks, (now - part->stamp));
cpu              1025 block/blk-core.c 		part_round_stats_single(cpu, &part_to_disk(part)->part0, now);
cpu              1026 block/blk-core.c 	part_round_stats_single(cpu, part, now);
cpu              1072 block/blk-core.c 	req->cpu = bio->bi_comp_cpu;
cpu              1154 block/blk-core.c 			req->cpu = bio->bi_comp_cpu;
cpu              1183 block/blk-core.c 			req->cpu = bio->bi_comp_cpu;
cpu              1221 block/blk-core.c 		req->cpu = blk_cpu_to_group(smp_processor_id());
cpu              1654 block/blk-core.c 		int cpu;
cpu              1656 block/blk-core.c 		cpu = part_stat_lock();
cpu              1658 block/blk-core.c 		part_stat_add(cpu, part, sectors[rw], nr_bytes >> 9);
cpu              1770 block/blk-core.c 		int cpu;
cpu              1772 block/blk-core.c 		cpu = part_stat_lock();
cpu              1775 block/blk-core.c 		part_stat_inc(cpu, part, ios[rw]);
cpu              1776 block/blk-core.c 		part_stat_add(cpu, part, ticks[rw], duration);
cpu              1777 block/blk-core.c 		part_round_stats(cpu, part);
cpu               391 block/blk-merge.c 		int cpu;
cpu               393 block/blk-merge.c 		cpu = part_stat_lock();
cpu               396 block/blk-merge.c 		part_round_stats(cpu, part);
cpu               404 block/blk-merge.c 		req->cpu = next->cpu;
cpu                60 block/blk-softirq.c 	if (cpu_online(cpu)) {
cpu                67 block/blk-softirq.c 		__smp_call_function_single(cpu, data);
cpu                88 block/blk-softirq.c 		int cpu = (unsigned long) hcpu;
cpu                91 block/blk-softirq.c 		list_splice_init(&per_cpu(blk_cpu_done, cpu),
cpu               108 block/blk-softirq.c 	int ccpu, cpu, group_cpu;
cpu               113 block/blk-softirq.c 	cpu = smp_processor_id();
cpu               114 block/blk-softirq.c 	group_cpu = blk_cpu_to_group(cpu);
cpu               119 block/blk-softirq.c 	if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && req->cpu != -1)
cpu               120 block/blk-softirq.c 		ccpu = req->cpu;
cpu               122 block/blk-softirq.c 		ccpu = cpu;
cpu               124 block/blk-softirq.c 	if (ccpu == cpu || ccpu == group_cpu) {
cpu               101 block/blk.h    	cpumask_t mask = cpu_coregroup_map(cpu);
cpu               104 block/blk.h    	return first_cpu(per_cpu(cpu_sibling_map, cpu));
cpu               106 block/blk.h    	return cpu;
cpu                40 block/blktrace.c 		const int cpu = smp_processor_id();
cpu                47 block/blktrace.c 		t->cpu = cpu;
cpu               130 block/blktrace.c 	int cpu;
cpu               158 block/blktrace.c 		cpu = smp_processor_id();
cpu               159 block/blktrace.c 		sequence = per_cpu_ptr(bt->sequence, cpu);
cpu               169 block/blktrace.c 		t->cpu = cpu;
cpu               959 block/genhd.c  	int cpu;
cpu               971 block/genhd.c  		cpu = part_stat_lock();
cpu               972 block/genhd.c  		part_round_stats(cpu, hd);
cpu               214 crypto/async_tx/async_tx.c 	int cpu, cap, cpu_idx = 0;
cpu               224 crypto/async_tx/async_tx.c 		for_each_possible_cpu(cpu) {
cpu               226 crypto/async_tx/async_tx.c 				per_cpu_ptr(channel_table[cap], cpu)->ref;
cpu               229 crypto/async_tx/async_tx.c 				per_cpu_ptr(channel_table[cap], cpu)->ref =
cpu               235 crypto/async_tx/async_tx.c 		for_each_online_cpu(cpu) {
cpu               242 crypto/async_tx/async_tx.c 			per_cpu_ptr(channel_table[cap], cpu)->ref = new;
cpu               396 crypto/async_tx/async_tx.c 		int cpu = get_cpu();
cpu               397 crypto/async_tx/async_tx.c 		ref = per_cpu_ptr(channel_table[tx_type], cpu)->ref;
cpu              3259 fs/buffer.c    	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
cpu              3265 fs/buffer.c    	get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
cpu              3266 fs/buffer.c    	per_cpu(bh_accounting, cpu).nr = 0;
cpu               409 fs/file.c      	struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu);
cpu               187 fs/namespace.c 	int cpu;
cpu               188 fs/namespace.c 	for_each_possible_cpu(cpu) {
cpu               189 fs/namespace.c 		struct mnt_writer *writer = &per_cpu(mnt_writers, cpu);
cpu               200 fs/namespace.c 	int cpu;
cpu               203 fs/namespace.c 	for_each_possible_cpu(cpu) {
cpu               204 fs/namespace.c 		cpu_writer = &per_cpu(mnt_writers, cpu);
cpu               274 fs/namespace.c 	int cpu;
cpu               277 fs/namespace.c 	for_each_possible_cpu(cpu) {
cpu               278 fs/namespace.c 		cpu_writer = &per_cpu(mnt_writers, cpu);
cpu               607 fs/namespace.c 	int cpu;
cpu               615 fs/namespace.c 	for_each_possible_cpu(cpu) {
cpu               616 fs/namespace.c 		struct mnt_writer *cpu_writer = &per_cpu(mnt_writers, cpu);
cpu                26 fs/nfs/iostat.h 	int cpu;
cpu                28 fs/nfs/iostat.h 	cpu = get_cpu();
cpu                29 fs/nfs/iostat.h 	iostats = per_cpu_ptr(server->io_stats, cpu);
cpu                45 fs/nfs/iostat.h 	int cpu;
cpu                47 fs/nfs/iostat.h 	cpu = get_cpu();
cpu                48 fs/nfs/iostat.h 	iostats = per_cpu_ptr(server->io_stats, cpu);
cpu               576 fs/nfs/super.c 	int i, cpu;
cpu               621 fs/nfs/super.c 	for_each_possible_cpu(cpu) {
cpu               625 fs/nfs/super.c 		stats = per_cpu_ptr(nfss->io_stats, cpu);
cpu               217 fs/partitions/check.c 	int cpu;
cpu               219 fs/partitions/check.c 	cpu = part_stat_lock();
cpu               220 fs/partitions/check.c 	part_round_stats(cpu, p);
cpu              2319 fs/xfs/xfs_mount.c 	int		cpu, ret = 0;
cpu              2323 fs/xfs/xfs_mount.c 	cpu = get_cpu();
cpu              2324 fs/xfs/xfs_mount.c 	icsbp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, cpu);
cpu               101 include/asm-cris/arch-v32/hwregs/config_defs.h   unsigned int cpu          : 1;
cpu                96 include/asm-cris/arch-v32/mach-a3/hwregs/clkgen_defs.h   unsigned int cpu             : 1;
cpu               101 include/asm-cris/arch-v32/mach-fs/hwregs/config_defs.h   unsigned int cpu          : 1;
cpu                 9 include/asm-cris/smp.h #define raw_smp_processor_id() (current_thread_info()->cpu)
cpu                35 include/asm-cris/thread_info.h 	__u32			cpu;		/* current CPU */
cpu                62 include/asm-cris/thread_info.h 	.cpu		= 0,				\
cpu                37 include/asm-frv/thread_info.h 	__u32			cpu;		/* current CPU */
cpu                69 include/asm-frv/thread_info.h 	.cpu		= 0,			\
cpu                57 include/asm-generic/percpu.h 	(*SHIFT_PERCPU_PTR(&per_cpu_var(var), per_cpu_offset(cpu)))
cpu                70 include/asm-generic/percpu.h #define per_cpu(var, cpu)			(*((void)(cpu), &per_cpu_var(var)))
cpu                35 include/asm-generic/topology.h #define cpu_to_node(cpu)	((void)(cpu),0)
cpu                25 include/asm-generic/vmlinux.lds.h #define CPU_KEEP(sec)    *(.cpu##sec)
cpu                29 include/asm-generic/vmlinux.lds.h #define CPU_DISCARD(sec) *(.cpu##sec)
cpu               125 include/asm-m32r/mmu_context.h 	int cpu = smp_processor_id();
cpu               130 include/asm-m32r/mmu_context.h 		cpu_set(cpu, next->cpu_vm_mask);
cpu               138 include/asm-m32r/mmu_context.h 		if (!cpu_test_and_set(cpu, next->cpu_vm_mask))
cpu                63 include/asm-m32r/smp.h #define raw_smp_processor_id()	(current_thread_info()->cpu)
cpu                76 include/asm-m32r/smp.h 	return cpu;
cpu                81 include/asm-m32r/smp.h 	return cpu;
cpu                30 include/asm-m32r/thread_info.h 	__u32			cpu;		/* current CPU */
cpu                70 include/asm-m32r/thread_info.h 	.cpu		= 0,			\
cpu                12 include/asm-m68k/thread_info.h 	__u32 cpu; /* should always be 0 on m68k */
cpu                42 include/asm-mn10300/mmu_context.h 	cpu_set((cpu), (task)->cpu_vm_mask)
cpu                44 include/asm-mn10300/mmu_context.h 	cpu_test_and_set((cpu), (task)->cpu_vm_mask)
cpu               124 include/asm-mn10300/mmu_context.h 	int cpu = smp_processor_id();
cpu               127 include/asm-mn10300/mmu_context.h 		cpu_ran_vm(cpu, next);
cpu               128 include/asm-mn10300/mmu_context.h 		activate_context(next, cpu);
cpu               130 include/asm-mn10300/mmu_context.h 	} else if (!cpu_maybe_ran_vm(cpu, next)) {
cpu               131 include/asm-mn10300/mmu_context.h 		activate_context(next, cpu);
cpu                46 include/asm-mn10300/thread_info.h 	__u32			cpu;		/* current CPU */
cpu                78 include/asm-mn10300/thread_info.h 	.cpu		= 0,			\
cpu                27 include/asm-parisc/smp.h #define cpu_number_map(cpu)	(cpu)
cpu                28 include/asm-parisc/smp.h #define cpu_logical_map(cpu)	(cpu)
cpu                49 include/asm-parisc/smp.h #define raw_smp_processor_id()	(current_thread_info()->cpu)
cpu                14 include/asm-parisc/thread_info.h 	__u32 cpu;			/* current CPU */
cpu                24 include/asm-parisc/thread_info.h 	.cpu		= 0,			\
cpu                35 include/asm-um/mmu_context.h 	unsigned cpu = smp_processor_id();
cpu                38 include/asm-um/mmu_context.h 		cpu_clear(cpu, prev->cpu_vm_mask);
cpu                39 include/asm-um/mmu_context.h 		cpu_set(cpu, next->cpu_vm_mask);
cpu                10 include/asm-um/smp.h #define raw_smp_processor_id() (current_thread->cpu)
cpu                19 include/asm-um/thread_info.h 	__u32			cpu;		/* current CPU */
cpu                34 include/asm-um/thread_info.h 	.cpu =		0,			\
cpu                 4 include/asm-x86/bigsmp/apic.h #define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu))
cpu                15 include/asm-x86/bigsmp/apic.h 	static unsigned long cpu = NR_CPUS;
cpu                17 include/asm-x86/bigsmp/apic.h 		if (cpu >= NR_CPUS)
cpu                18 include/asm-x86/bigsmp/apic.h 			cpu = first_cpu(cpu_online_map);
cpu                20 include/asm-x86/bigsmp/apic.h 			cpu = next_cpu(cpu, cpu_online_map);
cpu                21 include/asm-x86/bigsmp/apic.h 	} while (cpu >= NR_CPUS);
cpu                22 include/asm-x86/bigsmp/apic.h 	return cpumask_of_cpu(cpu);
cpu                49 include/asm-x86/bigsmp/apic.h 	id = xapic_phys_to_log_apicid(cpu);
cpu                64 include/asm-x86/bigsmp/apic.h 	int cpu = smp_processor_id();
cpu                67 include/asm-x86/bigsmp/apic.h 	val = calculate_ldr(cpu);
cpu               104 include/asm-x86/bigsmp/apic.h 	if (cpu >= NR_CPUS)
cpu               106 include/asm-x86/bigsmp/apic.h 	return cpu_physical_id(cpu);
cpu               131 include/asm-x86/bigsmp/apic.h 	int cpu;
cpu               134 include/asm-x86/bigsmp/apic.h 	cpu = first_cpu(cpumask);
cpu               135 include/asm-x86/bigsmp/apic.h 	apicid = cpu_to_logical_apicid(cpu);
cpu                11 include/asm-x86/cpu.h 	struct cpu cpu;
cpu                44 include/asm-x86/desc.h 	return per_cpu(gdt_page, cpu).gdt;
cpu                96 include/asm-x86/desc.h #define load_TLS(t, cpu) native_load_tls(t, cpu)
cpu               178 include/asm-x86/desc.h 	struct desc_struct *d = get_cpu_gdt_table(cpu);
cpu               194 include/asm-x86/desc.h #define set_tss_desc(cpu, addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
cpu               201 include/asm-x86/desc.h 		unsigned cpu = smp_processor_id();
cpu               206 include/asm-x86/desc.h 		write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT,
cpu               247 include/asm-x86/desc.h 	struct desc_struct *gdt = get_cpu_gdt_table(cpu);
cpu                 4 include/asm-x86/es7000/apic.h #define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
cpu                53 include/asm-x86/es7000/apic.h 	id = xapic_phys_to_log_apicid(cpu);
cpu                67 include/asm-x86/es7000/apic.h 	int cpu = smp_processor_id();
cpu                70 include/asm-x86/es7000/apic.h 	val = calculate_ldr(cpu);
cpu               122 include/asm-x86/es7000/apic.h        if (cpu >= NR_CPUS)
cpu               124 include/asm-x86/es7000/apic.h        return (int)cpu_2_logical_apicid[cpu];
cpu               152 include/asm-x86/es7000/apic.h 	int cpu;
cpu               167 include/asm-x86/es7000/apic.h 	cpu = first_cpu(cpumask);
cpu               168 include/asm-x86/es7000/apic.h 	apicid = cpu_to_logical_apicid(cpu);
cpu               170 include/asm-x86/es7000/apic.h 		if (cpu_isset(cpu, cpumask)) {
cpu               171 include/asm-x86/es7000/apic.h 			int new_apicid = cpu_to_logical_apicid(cpu);
cpu               184 include/asm-x86/es7000/apic.h 		cpu++;
cpu                41 include/asm-x86/genapic_32.h 	int (*cpu_to_logical_apicid)(int cpu);
cpu                22 include/asm-x86/genapic_64.h 	cpumask_t (*vector_allocation_domain)(int cpu);
cpu                23 include/asm-x86/hardirq_32.h #define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
cpu               390 include/asm-x86/kvm_host.h 	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
cpu               113 include/asm-x86/mach-default/mach_apic.h 	return 1 << cpu;
cpu                39 include/asm-x86/mce.h 	__u8  cpu;	/* cpu that raised the error */
cpu                95 include/asm-x86/mce.h extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
cpu                13 include/asm-x86/microcode.h 	int  (*request_microcode_user) (int cpu, const void __user *buf, size_t size);
cpu                14 include/asm-x86/microcode.h 	int  (*request_microcode_fw) (int cpu, struct device *device);
cpu                16 include/asm-x86/microcode.h 	void (*apply_microcode) (int cpu);
cpu                18 include/asm-x86/microcode.h 	int  (*collect_cpu_info) (int cpu, struct cpu_signature *csig);
cpu                19 include/asm-x86/microcode.h 	void (*microcode_fini_cpu) (int cpu);
cpu                 7 include/asm-x86/mmu_context_32.h 	unsigned cpu = smp_processor_id();
cpu                 8 include/asm-x86/mmu_context_32.h 	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
cpu                 9 include/asm-x86/mmu_context_32.h 		per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
cpu                17 include/asm-x86/mmu_context_32.h 	int cpu = smp_processor_id();
cpu                21 include/asm-x86/mmu_context_32.h 		cpu_clear(cpu, prev->cpu_vm_mask);
cpu                23 include/asm-x86/mmu_context_32.h 		per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
cpu                24 include/asm-x86/mmu_context_32.h 		per_cpu(cpu_tlbstate, cpu).active_mm = next;
cpu                26 include/asm-x86/mmu_context_32.h 		cpu_set(cpu, next->cpu_vm_mask);
cpu                39 include/asm-x86/mmu_context_32.h 		per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
cpu                40 include/asm-x86/mmu_context_32.h 		BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
cpu                42 include/asm-x86/mmu_context_32.h 		if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
cpu                17 include/asm-x86/mmu_context_64.h 	unsigned cpu = smp_processor_id();
cpu                20 include/asm-x86/mmu_context_64.h 		cpu_clear(cpu, prev->cpu_vm_mask);
cpu                25 include/asm-x86/mmu_context_64.h 		cpu_set(cpu, next->cpu_vm_mask);
cpu                36 include/asm-x86/mmu_context_64.h 		if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
cpu                68 include/asm-x86/numaq/apic.h        if (cpu >= NR_CPUS)
cpu                70 include/asm-x86/numaq/apic.h 	return (int)cpu_2_logical_apicid[cpu];
cpu                94 include/asm-x86/numaq/apic.h 	int cpu = __ffs(logical_apicid & 0xf);
cpu                96 include/asm-x86/numaq/apic.h 	return physid_mask_of_physid(cpu + 4*node);
cpu               117 include/asm-x86/paravirt.h 	void (*load_tls)(struct thread_struct *t, unsigned int cpu);
cpu               878 include/asm-x86/paravirt.h 	PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
cpu                15 include/asm-x86/percpu.h #define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
cpu               137 include/asm-x86/processor.h #define cpu_data(cpu)		per_cpu(cpu_info, cpu)
cpu               149 include/asm-x86/processor.h 	return cpu_data(cpu).hlt_works_ok;
cpu                56 include/asm-x86/smp.h 	void (*smp_send_reschedule)(int cpu);
cpu                58 include/asm-x86/smp.h 	int (*cpu_up)(unsigned cpu);
cpu                60 include/asm-x86/smp.h 	void (*cpu_die)(unsigned int cpu);
cpu                64 include/asm-x86/smp.h 	void (*send_call_func_single_ipi)(int cpu);
cpu                98 include/asm-x86/smp.h 	return smp_ops.cpu_up(cpu);
cpu               108 include/asm-x86/smp.h 	smp_ops.cpu_die(cpu);
cpu               118 include/asm-x86/smp.h 	smp_ops.smp_send_reschedule(cpu);
cpu               123 include/asm-x86/smp.h 	smp_ops.send_call_func_single_ipi(cpu);
cpu               147 include/asm-x86/smp.h #define cpu_physical_id(cpu)	per_cpu(x86_cpu_to_apicid, cpu)
cpu               178 include/asm-x86/smp.h 	ti->cpu;							\
cpu               101 include/asm-x86/summit/apic.h        if (cpu >= NR_CPUS)
cpu               103 include/asm-x86/summit/apic.h 	return (int)cpu_2_logical_apicid[cpu];
cpu               145 include/asm-x86/summit/apic.h 	int cpu;
cpu               156 include/asm-x86/summit/apic.h 	cpu = first_cpu(cpumask);
cpu               157 include/asm-x86/summit/apic.h 	apicid = cpu_to_logical_apicid(cpu);
cpu               159 include/asm-x86/summit/apic.h 		if (cpu_isset(cpu, cpumask)) {
cpu               160 include/asm-x86/summit/apic.h 			int new_apicid = cpu_to_logical_apicid(cpu);
cpu               169 include/asm-x86/summit/apic.h 		cpu++;
cpu                29 include/asm-x86/thread_info.h 	__u32			cpu;		/* current CPU */
cpu                48 include/asm-x86/thread_info.h 	.cpu		= 0,			\
cpu                56 include/asm-x86/topology.h 	return cpu_to_node_map[cpu];
cpu                58 include/asm-x86/topology.h #define early_cpu_to_node(cpu)	cpu_to_node(cpu)
cpu                93 include/asm-x86/topology.h 	return per_cpu(x86_cpu_to_node_map, cpu);
cpu               100 include/asm-x86/topology.h 		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
cpu               102 include/asm-x86/topology.h 	return per_cpu(x86_cpu_to_node_map, cpu);
cpu               224 include/asm-x86/topology.h #define topology_physical_package_id(cpu)	(cpu_data(cpu).phys_proc_id)
cpu               225 include/asm-x86/topology.h #define topology_core_id(cpu)			(cpu_data(cpu).cpu_core_id)
cpu               226 include/asm-x86/topology.h #define topology_core_siblings(cpu)		(per_cpu(cpu_core_map, cpu))
cpu               227 include/asm-x86/topology.h #define topology_thread_siblings(cpu)		(per_cpu(cpu_sibling_map, cpu))
cpu               326 include/asm-x86/uv/uv_bau.h 	test_bit((cpu), (bau_local_cpumask).bits)
cpu               136 include/asm-x86/uv/uv_hub.h #define uv_cpu_hub_info(cpu)	(&per_cpu(__uv_hub_info, cpu))
cpu               308 include/asm-x86/uv/uv_hub.h 	return uv_cpu_to_blade[cpu];
cpu               338 include/asm-x86/uv/uv_hub.h 	return uv_blade_info[uv_cpu_to_blade_id(cpu)].pnode;
cpu                25 include/asm-xtensa/smp.h #define cpu_logical_map(cpu)	(cpu)
cpu                50 include/asm-xtensa/thread_info.h 	__u32			cpu;		/* current CPU */
cpu                94 include/asm-xtensa/thread_info.h 	.cpu		= 0,			\
cpu               368 include/linux/bio.h 	bio->bi_comp_cpu = cpu;
cpu               145 include/linux/blkdev.h 	int cpu;
cpu               573 include/linux/blkdev.h #define blk_rq_cpu_valid(rq)	((rq)->cpu != -1)
cpu               105 include/linux/blktrace_api.h 	__u32 cpu;		/* on what cpu did it happen */
cpu                59 include/linux/cn_proc.h 	__u32 cpu;
cpu                87 include/linux/cpufreq.h 	unsigned int		cpu;    /* cpu nr of registered CPU */
cpu               124 include/linux/cpufreq.h 	unsigned int cpu;	/* cpu nr */
cpu               227 include/linux/cpufreq.h 	unsigned int	(*get)	(unsigned int cpu);
cpu               231 include/linux/cpufreq.h 				 unsigned int cpu);
cpu                87 include/linux/cpuidle.h 	unsigned int		cpu;
cpu               144 include/linux/cpumask.h #define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
cpu               147 include/linux/cpumask.h 	set_bit(cpu, dstp->bits);
cpu               150 include/linux/cpumask.h #define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
cpu               153 include/linux/cpumask.h 	clear_bit(cpu, dstp->bits);
cpu               169 include/linux/cpumask.h #define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
cpu               171 include/linux/cpumask.h #define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
cpu               174 include/linux/cpumask.h 	return test_and_set_bit(cpu, addr->bits);
cpu               280 include/linux/cpumask.h 	const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
cpu               281 include/linux/cpumask.h 	p -= cpu / BITS_PER_LONG;
cpu               290 include/linux/cpumask.h #define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu))
cpu               408 include/linux/cpumask.h 	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
cpu               421 include/linux/cpumask.h 	for ((cpu) = -1;				\
cpu               422 include/linux/cpumask.h 		(cpu) = next_cpu((cpu), (mask)),	\
cpu               423 include/linux/cpumask.h 		(cpu) < NR_CPUS; )
cpu               430 include/linux/cpumask.h #define for_each_cpu_mask_nr(cpu, mask)	for_each_cpu_mask(cpu, mask)
cpu               438 include/linux/cpumask.h 	for ((cpu) = -1;				\
cpu               439 include/linux/cpumask.h 		(cpu) = next_cpu_nr((cpu), (mask)),	\
cpu               440 include/linux/cpumask.h 		(cpu) < nr_cpu_ids; )
cpu               510 include/linux/cpumask.h #define cpu_online(cpu)		cpu_isset((cpu), cpu_online_map)
cpu               511 include/linux/cpumask.h #define cpu_possible(cpu)	cpu_isset((cpu), cpu_possible_map)
cpu               512 include/linux/cpumask.h #define cpu_present(cpu)	cpu_isset((cpu), cpu_present_map)
cpu               513 include/linux/cpumask.h #define cpu_active(cpu)		cpu_isset((cpu), cpu_active_map)
cpu               518 include/linux/cpumask.h #define cpu_online(cpu)		((cpu) == 0)
cpu               519 include/linux/cpumask.h #define cpu_possible(cpu)	((cpu) == 0)
cpu               520 include/linux/cpumask.h #define cpu_present(cpu)	((cpu) == 0)
cpu               521 include/linux/cpumask.h #define cpu_active(cpu)		((cpu) == 0)
cpu               524 include/linux/cpumask.h #define cpu_is_offline(cpu)	unlikely(!cpu_online(cpu))
cpu               526 include/linux/cpumask.h #define for_each_possible_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_possible_map)
cpu               527 include/linux/cpumask.h #define for_each_online_cpu(cpu)   for_each_cpu_mask_nr((cpu), cpu_online_map)
cpu               528 include/linux/cpumask.h #define for_each_present_cpu(cpu)  for_each_cpu_mask_nr((cpu), cpu_present_map)
cpu                23 include/linux/dca.h 				     int cpu);
cpu               252 include/linux/genhd.h 	(per_cpu_ptr((part)->dkstats, (cpu))->field += (addnd))
cpu               311 include/linux/genhd.h 	__part_stat_add((cpu), (part), field, addnd);			\
cpu               313 include/linux/genhd.h 		__part_stat_add((cpu), &part_to_disk((part))->part0,	\
cpu               318 include/linux/genhd.h 	part_stat_add(cpu, gendiskp, field, -1)
cpu               320 include/linux/genhd.h 	part_stat_add(cpu, gendiskp, field, 1)
cpu               322 include/linux/genhd.h 	part_stat_add(cpu, gendiskp, field, -subnd)
cpu               171 include/linux/irq.h 	unsigned int		cpu;
cpu                21 include/linux/irq_cpustat.h #define __IRQ_STAT(cpu, member)	(irq_stat[cpu].member)
cpu                29 include/linux/irq_cpustat.h #define nmi_count(cpu)		__IRQ_STAT((cpu), __nmi_count)	/* i386 */
cpu                36 include/linux/kernel_stat.h #define kstat_cpu(cpu)	per_cpu(kstat, cpu)
cpu                47 include/linux/kernel_stat.h 	int cpu, sum = 0;
cpu                49 include/linux/kernel_stat.h 	for_each_possible_cpu(cpu)
cpu                50 include/linux/kernel_stat.h 		sum += kstat_cpu(cpu).irqs[irq];
cpu                67 include/linux/kvm_host.h 	int   cpu;
cpu               167 include/linux/lockdep.h 	int				cpu;
cpu               401 include/linux/module.h 		unsigned int cpu = get_cpu();
cpu               403 include/linux/module.h 			local_inc(&module->ref[cpu].count);
cpu              1500 include/linux/netdevice.h 	txq->xmit_lock_owner = cpu;
cpu              1539 include/linux/netdevice.h 	int cpu;
cpu              1542 include/linux/netdevice.h 	cpu = smp_processor_id();
cpu              1552 include/linux/netdevice.h 		__netif_tx_lock(txq, cpu);
cpu              1590 include/linux/netdevice.h 		__netif_tx_lock(txq, cpu);		\
cpu              1603 include/linux/netdevice.h 	int cpu;
cpu              1606 include/linux/netdevice.h 	cpu = smp_processor_id();
cpu              1610 include/linux/netdevice.h 		__netif_tx_lock(txq, cpu);
cpu                81 include/linux/percpu.h         (__typeof__(ptr))__p->ptrs[(cpu)];	          \
cpu                89 include/linux/percpu.h #define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
cpu               114 include/linux/percpu.h #define per_cpu_ptr(ptr, cpu)	percpu_ptr((ptr), (cpu))
cpu                 9 include/linux/posix-timers.h 	cputime_t cpu;
cpu                57 include/linux/posix-timers.h 		struct cpu_timer_list cpu;
cpu               110 include/linux/preempt.h 	void (*sched_in)(struct preempt_notifier *notifier, int cpu);
cpu               107 include/linux/rcuclassic.h 	int cpu;
cpu               122 include/linux/rcuclassic.h 	struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
cpu               127 include/linux/rcuclassic.h 	struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
cpu                55 include/linux/rcupreempt.h 	struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
cpu                52 include/linux/relay.h 	unsigned int cpu;		/* this buf's cpu */
cpu              2105 include/linux/sched.h 	return task_thread_info(p)->cpu;
cpu               183 include/linux/topology.h #define topology_physical_package_id(cpu)	((void)(cpu), -1)
cpu               186 include/linux/topology.h #define topology_core_id(cpu)			((void)(cpu), 0)
cpu               189 include/linux/topology.h #define topology_thread_siblings(cpu)		cpumask_of_cpu(cpu)
cpu               192 include/linux/topology.h #define topology_core_siblings(cpu)		cpumask_of_cpu(cpu)
cpu                56 include/media/saa7146.h 	__le32		*cpu;
cpu              1160 include/net/tcp.h 	int cpu = get_cpu();
cpu              1161 include/net/tcp.h 	struct tcp_md5sig_pool *ret = __tcp_get_md5sig_pool(cpu);
cpu               379 init/main.c    	int cpu, highest_cpu = 0;
cpu               381 init/main.c    	for_each_possible_cpu(cpu)
cpu               382 init/main.c    		highest_cpu = cpu;
cpu               413 init/main.c    	unsigned int cpu;
cpu               419 init/main.c    	cpu = smp_processor_id();
cpu               420 init/main.c    	cpu_set(cpu, cpu_active_map);
cpu               423 init/main.c    	for_each_present_cpu(cpu) {
cpu               426 init/main.c    		if (!cpu_online(cpu))
cpu               427 init/main.c    			cpu_up(cpu);
cpu               524 init/main.c    	int cpu = smp_processor_id();
cpu               526 init/main.c    	cpu_set(cpu, cpu_online_map);
cpu               527 init/main.c    	cpu_set(cpu, cpu_present_map);
cpu               528 init/main.c    	cpu_set(cpu, cpu_possible_map);
cpu               180 kernel/cpu.c   		if (task_cpu(p) == cpu &&
cpu               185 kernel/cpu.c   				 p->comm, task_pid_nr(p), cpu,
cpu               221 kernel/cpu.c   	void *hcpu = (void *)(long)cpu;
cpu               231 kernel/cpu.c   	if (!cpu_online(cpu))
cpu               242 kernel/cpu.c   				__func__, cpu);
cpu               250 kernel/cpu.c   	cpu_clear(cpu, tmp);
cpu               252 kernel/cpu.c   	tmp = cpumask_of_cpu(cpu);
cpu               263 kernel/cpu.c   	BUG_ON(cpu_online(cpu));
cpu               266 kernel/cpu.c   	while (!idle_cpu(cpu))
cpu               270 kernel/cpu.c   	__cpu_die(cpu);
cpu               277 kernel/cpu.c   	check_for_tasks(cpu);
cpu               302 kernel/cpu.c   	cpu_clear(cpu, cpu_active_map);
cpu               314 kernel/cpu.c   	err = _cpu_down(cpu, 0);
cpu               316 kernel/cpu.c   	if (cpu_online(cpu))
cpu               317 kernel/cpu.c   		cpu_set(cpu, cpu_active_map);
cpu               330 kernel/cpu.c   	void *hcpu = (void *)(long)cpu;
cpu               333 kernel/cpu.c   	if (cpu_online(cpu) || !cpu_present(cpu))
cpu               342 kernel/cpu.c   				__func__, cpu);
cpu               348 kernel/cpu.c   	ret = __cpu_up(cpu);
cpu               351 kernel/cpu.c   	BUG_ON(!cpu_online(cpu));
cpu               353 kernel/cpu.c   	cpu_set(cpu, cpu_active_map);
cpu               370 kernel/cpu.c   	if (!cpu_isset(cpu, cpu_possible_map)) {
cpu               372 kernel/cpu.c   			"configured as may-hotadd at boot time\n", cpu);
cpu               387 kernel/cpu.c   	err = _cpu_up(cpu, 0);
cpu               399 kernel/cpu.c   	int cpu, first_cpu, error = 0;
cpu               408 kernel/cpu.c   	for_each_online_cpu(cpu) {
cpu               409 kernel/cpu.c   		if (cpu == first_cpu)
cpu               411 kernel/cpu.c   		error = _cpu_down(cpu, 1);
cpu               413 kernel/cpu.c   			cpu_set(cpu, frozen_cpus);
cpu               414 kernel/cpu.c   			printk("CPU%d is down\n", cpu);
cpu               417 kernel/cpu.c   				cpu, error);
cpu               434 kernel/cpu.c   	int cpu, error;
cpu               443 kernel/cpu.c   	for_each_cpu_mask_nr(cpu, frozen_cpus) {
cpu               444 kernel/cpu.c   		error = _cpu_up(cpu, 1);
cpu               446 kernel/cpu.c   			printk("CPU%d is up\n", cpu);
cpu               449 kernel/cpu.c   		printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
cpu               470 kernel/cpu.c   	if (cpu_isset(cpu, frozen_cpus))
cpu               473 kernel/cpu.c   	raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu);
cpu                83 kernel/fork.c  	int cpu;
cpu                86 kernel/fork.c  	for_each_online_cpu(cpu)
cpu                87 kernel/fork.c  		total += per_cpu(process_counts, cpu);
cpu              1313 kernel/fork.c  		init_idle(task, cpu);
cpu               707 kernel/hrtimer.c 	int cpu = smp_processor_id();
cpu               708 kernel/hrtimer.c 	struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu);
cpu               719 kernel/hrtimer.c 				    "mode on CPU %d\n", cpu);
cpu              1582 kernel/hrtimer.c 	struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
cpu              1686 kernel/hrtimer.c 	BUG_ON(cpu_online(cpu));
cpu              1687 kernel/hrtimer.c 	old_base = &per_cpu(hrtimer_bases, cpu);
cpu              1690 kernel/hrtimer.c 	tick_cancel_sched_timer(cpu);
cpu              1698 kernel/hrtimer.c 					 &new_base->clock_base[i], cpu))
cpu              1718 kernel/hrtimer.c 	unsigned int cpu = (long)hcpu;
cpu              1724 kernel/hrtimer.c 		init_hrtimers_cpu(cpu);
cpu              1730 kernel/hrtimer.c 		clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu);
cpu              1731 kernel/hrtimer.c 		migrate_hrtimers(cpu);
cpu               308 kernel/irq/chip.c 	const unsigned int cpu = smp_processor_id();
cpu               315 kernel/irq/chip.c 	kstat_cpu(cpu).irqs[irq]++;
cpu               347 kernel/irq/chip.c 	unsigned int cpu = smp_processor_id();
cpu               357 kernel/irq/chip.c 	kstat_cpu(cpu).irqs[irq]++;
cpu               395 kernel/irq/chip.c 	unsigned int cpu = smp_processor_id();
cpu               405 kernel/irq/chip.c 	kstat_cpu(cpu).irqs[irq]++;
cpu               454 kernel/irq/chip.c 	const unsigned int cpu = smp_processor_id();
cpu               472 kernel/irq/chip.c 	kstat_cpu(cpu).irqs[irq]++;
cpu              1118 kernel/kexec.c 	if ((cpu < 0) || (cpu >= NR_CPUS))
cpu              1128 kernel/kexec.c 	buf = (u32*)per_cpu_ptr(crash_notes, cpu);
cpu                64 kernel/kgdb.c  	int			cpu;
cpu               567 kernel/kgdb.c  	int cpu;
cpu               570 kernel/kgdb.c  	cpu = raw_smp_processor_id();
cpu               571 kernel/kgdb.c  	kgdb_info[cpu].debuggerinfo = regs;
cpu               572 kernel/kgdb.c  	kgdb_info[cpu].task = current;
cpu               578 kernel/kgdb.c  	atomic_set(&cpu_in_kgdb[cpu], 1);
cpu               581 kernel/kgdb.c  	while (atomic_read(&passive_cpu_wait[cpu]))
cpu               584 kernel/kgdb.c  	kgdb_info[cpu].debuggerinfo = NULL;
cpu               585 kernel/kgdb.c  	kgdb_info[cpu].task = NULL;
cpu               592 kernel/kgdb.c  	atomic_set(&cpu_in_kgdb[cpu], 0);
cpu               855 kernel/kgdb.c  		thread = kgdb_info[ks->cpu].task;
cpu               856 kernel/kgdb.c  		local_debuggerinfo = kgdb_info[ks->cpu].debuggerinfo;
cpu               998 kernel/kgdb.c  	int cpu;
cpu              1014 kernel/kgdb.c  			for_each_online_cpu(cpu) {
cpu              1016 kernel/kgdb.c  				int_to_threadref(thref, -cpu - 2);
cpu              1241 kernel/kgdb.c  	kgdb_usethread = kgdb_info[ks->cpu].task;
cpu              1242 kernel/kgdb.c  	ks->kgdb_usethreadid = shadow_pid(kgdb_info[ks->cpu].task->pid);
cpu              1399 kernel/kgdb.c  	int i, cpu;
cpu              1401 kernel/kgdb.c  	ks->cpu			= raw_smp_processor_id();
cpu              1419 kernel/kgdb.c  	cpu = raw_smp_processor_id();
cpu              1424 kernel/kgdb.c  	while (atomic_cmpxchg(&kgdb_active, -1, cpu) != -1)
cpu              1433 kernel/kgdb.c  	    atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
cpu              1458 kernel/kgdb.c  	kgdb_info[ks->cpu].debuggerinfo = ks->linux_regs;
cpu              1459 kernel/kgdb.c  	kgdb_info[ks->cpu].task = current;
cpu              1476 kernel/kgdb.c  	atomic_set(&cpu_in_kgdb[ks->cpu], 1);
cpu              1509 kernel/kgdb.c  	kgdb_info[ks->cpu].debuggerinfo = NULL;
cpu              1510 kernel/kgdb.c  	kgdb_info[ks->cpu].task = NULL;
cpu              1511 kernel/kgdb.c  	atomic_set(&cpu_in_kgdb[ks->cpu], 0);
cpu              1539 kernel/kgdb.c  	if (!atomic_read(&cpu_in_kgdb[cpu]) &&
cpu              1540 kernel/kgdb.c  			atomic_read(&kgdb_active) != cpu &&
cpu               180 kernel/kthread.c 	set_task_cpu(k, cpu);
cpu               181 kernel/kthread.c 	k->cpus_allowed = cpumask_of_cpu(cpu);
cpu               178 kernel/lockdep.c 	int cpu, i;
cpu               181 kernel/lockdep.c 	for_each_possible_cpu(cpu) {
cpu               183 kernel/lockdep.c 			&per_cpu(lock_stats, cpu)[class - lock_classes];
cpu               203 kernel/lockdep.c 	int cpu;
cpu               205 kernel/lockdep.c 	for_each_possible_cpu(cpu) {
cpu               207 kernel/lockdep.c 			&per_cpu(lock_stats, cpu)[class - lock_classes];
cpu              2508 kernel/lockdep.c 	lock->cpu = raw_smp_processor_id();
cpu              3033 kernel/lockdep.c 	if (lock->cpu != smp_processor_id())
cpu              3047 kernel/lockdep.c 	int i, cpu;
cpu              3069 kernel/lockdep.c 	cpu = smp_processor_id();
cpu              3083 kernel/lockdep.c 	if (lock->cpu != cpu)
cpu              3087 kernel/lockdep.c 	lock->cpu = cpu;
cpu               464 kernel/module.c 	int cpu;
cpu               466 kernel/module.c 	for_each_possible_cpu(cpu)
cpu               467 kernel/module.c 		memcpy(pcpudest + per_cpu_offset(cpu), from, size);
cpu               856 kernel/module.c 		unsigned int cpu = get_cpu();
cpu               857 kernel/module.c 		local_dec(&module->ref[cpu].count);
cpu                42 kernel/posix-cpu-timers.c 		ret.cpu = timespec_to_cputime(tp);
cpu                52 kernel/posix-cpu-timers.c 		*tp = ns_to_timespec(cpu.sched);
cpu                54 kernel/posix-cpu-timers.c 		cputime_to_timespec(cpu.cpu, tp);
cpu                64 kernel/posix-cpu-timers.c 		return cputime_lt(now.cpu, then.cpu);
cpu                74 kernel/posix-cpu-timers.c 		acc->cpu = cputime_add(acc->cpu, val.cpu);
cpu                84 kernel/posix-cpu-timers.c 		a.cpu = cputime_sub(a.cpu, b.cpu);
cpu               111 kernel/posix-cpu-timers.c 	if (timer->it.cpu.incr.sched == 0)
cpu               117 kernel/posix-cpu-timers.c 		if (now.sched < timer->it.cpu.expires.sched)
cpu               119 kernel/posix-cpu-timers.c 		incr = timer->it.cpu.incr.sched;
cpu               120 kernel/posix-cpu-timers.c 		delta = now.sched + incr - timer->it.cpu.expires.sched;
cpu               127 kernel/posix-cpu-timers.c 			timer->it.cpu.expires.sched += incr;
cpu               134 kernel/posix-cpu-timers.c 		if (cputime_lt(now.cpu, timer->it.cpu.expires.cpu))
cpu               136 kernel/posix-cpu-timers.c 		incr = timer->it.cpu.incr.cpu;
cpu               137 kernel/posix-cpu-timers.c 		delta = cputime_sub(cputime_add(now.cpu, incr),
cpu               138 kernel/posix-cpu-timers.c 				    timer->it.cpu.expires.cpu);
cpu               145 kernel/posix-cpu-timers.c 			timer->it.cpu.expires.cpu =
cpu               146 kernel/posix-cpu-timers.c 				cputime_add(timer->it.cpu.expires.cpu, incr);
cpu               208 kernel/posix-cpu-timers.c 		cpu->cpu = prof_ticks(p);
cpu               211 kernel/posix-cpu-timers.c 		cpu->cpu = virt_ticks(p);
cpu               214 kernel/posix-cpu-timers.c 		cpu->sched = sched_ns(p);
cpu               234 kernel/posix-cpu-timers.c 		cpu->cpu = cputime_add(p->signal->utime, p->signal->stime);
cpu               236 kernel/posix-cpu-timers.c 			cpu->cpu = cputime_add(cpu->cpu, prof_ticks(t));
cpu               241 kernel/posix-cpu-timers.c 		cpu->cpu = p->signal->utime;
cpu               243 kernel/posix-cpu-timers.c 			cpu->cpu = cputime_add(cpu->cpu, virt_ticks(t));
cpu               248 kernel/posix-cpu-timers.c 		cpu->sched = p->signal->sum_sched_runtime;
cpu               251 kernel/posix-cpu-timers.c 			cpu->sched += t->se.sum_exec_runtime;
cpu               253 kernel/posix-cpu-timers.c 		cpu->sched += sched_ns(p);
cpu               271 kernel/posix-cpu-timers.c 					    cpu);
cpu               347 kernel/posix-cpu-timers.c 	INIT_LIST_HEAD(&new_timer->it.cpu.entry);
cpu               348 kernel/posix-cpu-timers.c 	new_timer->it.cpu.incr.sched = 0;
cpu               349 kernel/posix-cpu-timers.c 	new_timer->it.cpu.expires.sched = 0;
cpu               369 kernel/posix-cpu-timers.c 	new_timer->it.cpu.task = p;
cpu               388 kernel/posix-cpu-timers.c 	struct task_struct *p = timer->it.cpu.task;
cpu               398 kernel/posix-cpu-timers.c 			BUG_ON(!list_empty(&timer->it.cpu.entry));
cpu               401 kernel/posix-cpu-timers.c 			if (timer->it.cpu.firing)
cpu               404 kernel/posix-cpu-timers.c 				list_del(&timer->it.cpu.entry);
cpu               431 kernel/posix-cpu-timers.c 		if (cputime_lt(timer->expires.cpu, ptime)) {
cpu               432 kernel/posix-cpu-timers.c 			timer->expires.cpu = cputime_zero;
cpu               434 kernel/posix-cpu-timers.c 			timer->expires.cpu = cputime_sub(timer->expires.cpu,
cpu               442 kernel/posix-cpu-timers.c 		if (cputime_lt(timer->expires.cpu, utime)) {
cpu               443 kernel/posix-cpu-timers.c 			timer->expires.cpu = cputime_zero;
cpu               445 kernel/posix-cpu-timers.c 			timer->expires.cpu = cputime_sub(timer->expires.cpu,
cpu               503 kernel/posix-cpu-timers.c 		left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
cpu               518 kernel/posix-cpu-timers.c 		left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
cpu               556 kernel/posix-cpu-timers.c 	put_task_struct(timer->it.cpu.task);
cpu               557 kernel/posix-cpu-timers.c 	timer->it.cpu.task = NULL;
cpu               558 kernel/posix-cpu-timers.c 	timer->it.cpu.expires = cpu_time_sub(timer->it_clock,
cpu               559 kernel/posix-cpu-timers.c 					     timer->it.cpu.expires,
cpu               570 kernel/posix-cpu-timers.c 	struct task_struct *p = timer->it.cpu.task;
cpu               572 kernel/posix-cpu-timers.c 	struct cpu_timer_list *const nt = &timer->it.cpu;
cpu               592 kernel/posix-cpu-timers.c 			if (cputime_gt(next->expires.cpu, nt->expires.cpu))
cpu               614 kernel/posix-cpu-timers.c 					       nt->expires.cpu))
cpu               615 kernel/posix-cpu-timers.c 					p->it_prof_expires = nt->expires.cpu;
cpu               621 kernel/posix-cpu-timers.c 					       nt->expires.cpu))
cpu               622 kernel/posix-cpu-timers.c 					p->it_virt_expires = nt->expires.cpu;
cpu               642 kernel/posix-cpu-timers.c 					       timer->it.cpu.expires.cpu))
cpu               649 kernel/posix-cpu-timers.c 					       timer->it.cpu.expires.cpu))
cpu               653 kernel/posix-cpu-timers.c 				    i <= cputime_to_secs(timer->it.cpu.expires.cpu))
cpu               659 kernel/posix-cpu-timers.c 					timer->it.cpu.task,
cpu               661 kernel/posix-cpu-timers.c 					timer->it.cpu.expires, now);
cpu               681 kernel/posix-cpu-timers.c 		timer->it.cpu.expires.sched = 0;
cpu               682 kernel/posix-cpu-timers.c 	} else if (timer->it.cpu.incr.sched == 0) {
cpu               687 kernel/posix-cpu-timers.c 		timer->it.cpu.expires.sched = 0;
cpu               708 kernel/posix-cpu-timers.c 	struct task_struct *p = timer->it.cpu.task;
cpu               730 kernel/posix-cpu-timers.c 		timer->it.cpu.task = NULL;
cpu               741 kernel/posix-cpu-timers.c 	old_expires = timer->it.cpu.expires;
cpu               742 kernel/posix-cpu-timers.c 	if (unlikely(timer->it.cpu.firing)) {
cpu               743 kernel/posix-cpu-timers.c 		timer->it.cpu.firing = -1;
cpu               746 kernel/posix-cpu-timers.c 		list_del_init(&timer->it.cpu.entry);
cpu               780 kernel/posix-cpu-timers.c 					    timer->it.cpu.expires)) {
cpu               783 kernel/posix-cpu-timers.c 					timer->it.cpu.expires, val);
cpu               814 kernel/posix-cpu-timers.c 	timer->it.cpu.expires = new_expires;
cpu               827 kernel/posix-cpu-timers.c 	timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
cpu               855 kernel/posix-cpu-timers.c 				   timer->it.cpu.incr, &old->it_interval);
cpu               863 kernel/posix-cpu-timers.c 	struct task_struct *p = timer->it.cpu.task;
cpu               870 kernel/posix-cpu-timers.c 			   timer->it.cpu.incr, &itp->it_interval);
cpu               872 kernel/posix-cpu-timers.c 	if (timer->it.cpu.expires.sched == 0) {	/* Timer not armed at all.  */
cpu               883 kernel/posix-cpu-timers.c 		sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
cpu               903 kernel/posix-cpu-timers.c 			timer->it.cpu.task = NULL;
cpu               904 kernel/posix-cpu-timers.c 			timer->it.cpu.expires.sched = 0;
cpu               916 kernel/posix-cpu-timers.c 		if (timer->it.cpu.incr.sched == 0 &&
cpu               918 kernel/posix-cpu-timers.c 				    timer->it.cpu.expires, now)) {
cpu               923 kernel/posix-cpu-timers.c 			timer->it.cpu.expires.sched = 0;
cpu               944 kernel/posix-cpu-timers.c 	if (cpu_time_before(timer->it_clock, now, timer->it.cpu.expires)) {
cpu               947 kernel/posix-cpu-timers.c 						timer->it.cpu.expires, now),
cpu               977 kernel/posix-cpu-timers.c 		if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
cpu               978 kernel/posix-cpu-timers.c 			tsk->it_prof_expires = t->expires.cpu;
cpu               992 kernel/posix-cpu-timers.c 		if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
cpu               993 kernel/posix-cpu-timers.c 			tsk->it_virt_expires = t->expires.cpu;
cpu              1095 kernel/posix-cpu-timers.c 		if (!--maxfire || cputime_lt(ptime, tl->expires.cpu)) {
cpu              1096 kernel/posix-cpu-timers.c 			prof_expires = tl->expires.cpu;
cpu              1110 kernel/posix-cpu-timers.c 		if (!--maxfire || cputime_lt(utime, tl->expires.cpu)) {
cpu              1111 kernel/posix-cpu-timers.c 			virt_expires = tl->expires.cpu;
cpu              1258 kernel/posix-cpu-timers.c 	struct task_struct *p = timer->it.cpu.task;
cpu              1286 kernel/posix-cpu-timers.c 			timer->it.cpu.task = p = NULL;
cpu              1287 kernel/posix-cpu-timers.c 			timer->it.cpu.expires.sched = 0;
cpu              1372 kernel/posix-cpu-timers.c 	list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
cpu              1375 kernel/posix-cpu-timers.c 		list_del_init(&timer->it.cpu.entry);
cpu              1376 kernel/posix-cpu-timers.c 		firing = timer->it.cpu.firing;
cpu              1377 kernel/posix-cpu-timers.c 		timer->it.cpu.firing = 0;
cpu              1408 kernel/posix-cpu-timers.c 			if (cputime_le(*oldval, now.cpu)) {
cpu              1412 kernel/posix-cpu-timers.c 				*oldval = cputime_sub(*oldval, now.cpu);
cpu              1418 kernel/posix-cpu-timers.c 		*newval = cputime_add(*newval, now.cpu);
cpu              1436 kernel/posix-cpu-timers.c 				  struct cpu_timer_list, entry)->expires.cpu,
cpu              1443 kernel/posix-cpu-timers.c 		expires.cpu = *newval;
cpu              1477 kernel/posix-cpu-timers.c 			if (timer.it.cpu.expires.sched == 0) {
cpu              1497 kernel/posix-cpu-timers.c 		sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
cpu               623 kernel/printk.c 	return cpu_online(cpu) || have_callable_console();
cpu               649 kernel/printk.c 		if (!can_use_console(cpu)) {
cpu               239 kernel/profile.c 	int cpu = smp_processor_id();
cpu               241 kernel/profile.c 	per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu);
cpu               246 kernel/profile.c 	int i, j, cpu;
cpu               252 kernel/profile.c 	for_each_online_cpu(cpu) {
cpu               253 kernel/profile.c 		struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j];
cpu               269 kernel/profile.c 	int i, cpu;
cpu               275 kernel/profile.c 	for_each_online_cpu(cpu) {
cpu               276 kernel/profile.c 		struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i];
cpu               285 kernel/profile.c 	int i, j, cpu;
cpu               293 kernel/profile.c 	cpu = get_cpu();
cpu               294 kernel/profile.c 	hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)];
cpu               336 kernel/profile.c 	int node, cpu = (unsigned long)__cpu;
cpu               342 kernel/profile.c 		node = cpu_to_node(cpu);
cpu               343 kernel/profile.c 		per_cpu(cpu_profile_flip, cpu) = 0;
cpu               344 kernel/profile.c 		if (!per_cpu(cpu_profile_hits, cpu)[1]) {
cpu               350 kernel/profile.c 			per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
cpu               352 kernel/profile.c 		if (!per_cpu(cpu_profile_hits, cpu)[0]) {
cpu               358 kernel/profile.c 			per_cpu(cpu_profile_hits, cpu)[0] = page_address(page);
cpu               362 kernel/profile.c 		page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
cpu               363 kernel/profile.c 		per_cpu(cpu_profile_hits, cpu)[1] = NULL;
cpu               368 kernel/profile.c 		cpu_set(cpu, prof_cpu_mask);
cpu               374 kernel/profile.c 		cpu_clear(cpu, prof_cpu_mask);
cpu               375 kernel/profile.c 		if (per_cpu(cpu_profile_hits, cpu)[0]) {
cpu               376 kernel/profile.c 			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
cpu               377 kernel/profile.c 			per_cpu(cpu_profile_hits, cpu)[0] = NULL;
cpu               380 kernel/profile.c 		if (per_cpu(cpu_profile_hits, cpu)[1]) {
cpu               381 kernel/profile.c 			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
cpu               382 kernel/profile.c 			per_cpu(cpu_profile_hits, cpu)[1] = NULL;
cpu               532 kernel/profile.c 	int cpu;
cpu               534 kernel/profile.c 	for_each_online_cpu(cpu) {
cpu               535 kernel/profile.c 		int node = cpu_to_node(cpu);
cpu               543 kernel/profile.c 		per_cpu(cpu_profile_hits, cpu)[1]
cpu               550 kernel/profile.c 		per_cpu(cpu_profile_hits, cpu)[0]
cpu               558 kernel/profile.c 	for_each_online_cpu(cpu) {
cpu               561 kernel/profile.c 		if (per_cpu(cpu_profile_hits, cpu)[0]) {
cpu               562 kernel/profile.c 			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
cpu               563 kernel/profile.c 			per_cpu(cpu_profile_hits, cpu)[0] = NULL;
cpu               566 kernel/profile.c 		if (per_cpu(cpu_profile_hits, cpu)[1]) {
cpu               567 kernel/profile.c 			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
cpu               568 kernel/profile.c 			per_cpu(cpu_profile_hits, cpu)[1] = NULL;
cpu                87 kernel/rcuclassic.c 	int cpu;
cpu               114 kernel/rcuclassic.c 		cpu_clear(rdp->cpu, cpumask);
cpu               115 kernel/rcuclassic.c 		for_each_cpu_mask_nr(cpu, cpumask)
cpu               116 kernel/rcuclassic.c 			smp_send_reschedule(cpu);
cpu               177 kernel/rcuclassic.c 	int cpu;
cpu               195 kernel/rcuclassic.c 	for_each_possible_cpu(cpu) {
cpu               196 kernel/rcuclassic.c 		if (cpu_isset(cpu, rcp->cpumask))
cpu               197 kernel/rcuclassic.c 			printk(" %d", cpu);
cpu               409 kernel/rcuclassic.c 	cpu_clear(cpu, rcp->cpumask);
cpu               456 kernel/rcuclassic.c 		cpu_quiet(rdp->cpu, rcp);
cpu               494 kernel/rcuclassic.c 		cpu_quiet(rdp->cpu, rcp);
cpu               509 kernel/rcuclassic.c 					&per_cpu(rcu_data, cpu));
cpu               511 kernel/rcuclassic.c 					&per_cpu(rcu_bh_data, cpu));
cpu               653 kernel/rcuclassic.c 	return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) ||
cpu               654 kernel/rcuclassic.c 		__rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu));
cpu               665 kernel/rcuclassic.c 	struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
cpu               666 kernel/rcuclassic.c 	struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu);
cpu               668 kernel/rcuclassic.c 	return !!rdp->nxtlist || !!rdp_bh->nxtlist || rcu_pending(cpu);
cpu               680 kernel/rcuclassic.c 	    (idle_cpu(cpu) && !in_softirq() &&
cpu               698 kernel/rcuclassic.c 		rcu_qsctr_inc(cpu);
cpu               699 kernel/rcuclassic.c 		rcu_bh_qsctr_inc(cpu);
cpu               712 kernel/rcuclassic.c 		rcu_bh_qsctr_inc(cpu);
cpu               728 kernel/rcuclassic.c 	rdp->cpu = cpu;
cpu               735 kernel/rcuclassic.c 	struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
cpu               736 kernel/rcuclassic.c 	struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu);
cpu               738 kernel/rcuclassic.c 	rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);
cpu               739 kernel/rcuclassic.c 	rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp);
cpu               746 kernel/rcuclassic.c 	long cpu = (long)hcpu;
cpu               751 kernel/rcuclassic.c 		rcu_online_cpu(cpu);
cpu               755 kernel/rcuclassic.c 		rcu_offline_cpu(cpu);
cpu                95 kernel/rcupdate.c 	int cpu = smp_processor_id();
cpu                96 kernel/rcupdate.c 	struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
cpu               203 kernel/rcupreempt.c #define RCU_DATA_CPU(cpu)	(&per_cpu(rcu_data, cpu))
cpu               209 kernel/rcupreempt.c #define RCU_TRACE_CPU(f, cpu) RCU_TRACE(f, &(RCU_DATA_CPU(cpu)->trace));
cpu               378 kernel/rcupreempt.c 	int cpu;
cpu               420 kernel/rcupreempt.c 	cpu = raw_smp_processor_id();
cpu               421 kernel/rcupreempt.c 	if (per_cpu(rcu_flip_flag, cpu) == rcu_flipped) {
cpu               423 kernel/rcupreempt.c 		per_cpu(rcu_flip_flag, cpu) = rcu_flip_seen;
cpu               445 kernel/rcupreempt.c 	int cpu = smp_processor_id();
cpu               446 kernel/rcupreempt.c 	struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
cpu               448 kernel/rcupreempt.c 	if (per_cpu(rcu_update_flag, cpu))
cpu               449 kernel/rcupreempt.c 		per_cpu(rcu_update_flag, cpu)++;
cpu               496 kernel/rcupreempt.c 		per_cpu(rcu_update_flag, cpu)++;
cpu               515 kernel/rcupreempt.c 	int cpu = smp_processor_id();
cpu               516 kernel/rcupreempt.c 	struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
cpu               527 kernel/rcupreempt.c 	if (per_cpu(rcu_update_flag, cpu)) {
cpu               528 kernel/rcupreempt.c 		if (--per_cpu(rcu_update_flag, cpu))
cpu               556 kernel/rcupreempt.c 	struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
cpu               566 kernel/rcupreempt.c 	struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
cpu               603 kernel/rcupreempt.c 	struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
cpu               635 kernel/rcupreempt.c 	struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
cpu               644 kernel/rcupreempt.c 	struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
cpu               689 kernel/rcupreempt.c 	struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
cpu               696 kernel/rcupreempt.c 	struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
cpu               726 kernel/rcupreempt.c 	int cpu;
cpu               751 kernel/rcupreempt.c 	for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
cpu               752 kernel/rcupreempt.c 		per_cpu(rcu_flip_flag, cpu) = rcu_flipped;
cpu               753 kernel/rcupreempt.c 		dyntick_save_progress_counter(cpu);
cpu               766 kernel/rcupreempt.c 	int cpu;
cpu               769 kernel/rcupreempt.c 	for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
cpu               770 kernel/rcupreempt.c 		if (rcu_try_flip_waitack_needed(cpu) &&
cpu               771 kernel/rcupreempt.c 		    per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
cpu               794 kernel/rcupreempt.c 	int cpu;
cpu               801 kernel/rcupreempt.c 	for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
cpu               802 kernel/rcupreempt.c 		sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx];
cpu               816 kernel/rcupreempt.c 	for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
cpu               817 kernel/rcupreempt.c 		per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed;
cpu               818 kernel/rcupreempt.c 		dyntick_save_progress_counter(cpu);
cpu               833 kernel/rcupreempt.c 	int cpu;
cpu               836 kernel/rcupreempt.c 	for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
cpu               837 kernel/rcupreempt.c 		if (rcu_try_flip_waitmb_needed(cpu) &&
cpu               838 kernel/rcupreempt.c 		    per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
cpu               904 kernel/rcupreempt.c 	if (per_cpu(rcu_mb_flag, cpu) == rcu_mb_needed) {
cpu               906 kernel/rcupreempt.c 		per_cpu(rcu_mb_flag, cpu) = rcu_mb_done;
cpu               913 kernel/rcupreempt.c 	struct rcu_data *rdp = RCU_DATA_CPU(cpu);
cpu               929 kernel/rcupreempt.c 	    (idle_cpu(cpu) && !in_softirq() &&
cpu               932 kernel/rcupreempt.c 	     	rcu_qsctr_inc(cpu);
cpu               935 kernel/rcupreempt.c 	rcu_check_mb(cpu);
cpu               956 kernel/rcupreempt.c 	struct rcu_data *rdp = RCU_DATA_CPU(cpu);
cpu               984 kernel/rcupreempt.c 	struct rcu_data *rdp = RCU_DATA_CPU(cpu);
cpu              1011 kernel/rcupreempt.c 	rcu_check_mb(cpu);
cpu              1012 kernel/rcupreempt.c 	if (per_cpu(rcu_flip_flag, cpu) == rcu_flipped) {
cpu              1014 kernel/rcupreempt.c 		per_cpu(rcu_flip_flag, cpu) = rcu_flip_seen;
cpu              1019 kernel/rcupreempt.c 	RCU_DATA_ME()->rcu_flipctr[0] += RCU_DATA_CPU(cpu)->rcu_flipctr[0];
cpu              1020 kernel/rcupreempt.c 	RCU_DATA_ME()->rcu_flipctr[1] += RCU_DATA_CPU(cpu)->rcu_flipctr[1];
cpu              1022 kernel/rcupreempt.c 	RCU_DATA_CPU(cpu)->rcu_flipctr[0] = 0;
cpu              1023 kernel/rcupreempt.c 	RCU_DATA_CPU(cpu)->rcu_flipctr[1] = 0;
cpu              1025 kernel/rcupreempt.c 	cpu_clear(cpu, rcu_cpu_online_map);
cpu              1065 kernel/rcupreempt.c 	cpu_set(cpu, rcu_cpu_online_map);
cpu              1077 kernel/rcupreempt.c 	rdp = RCU_DATA_CPU(cpu);
cpu              1179 kernel/rcupreempt.c 	int cpu;
cpu              1191 kernel/rcupreempt.c 		for_each_online_cpu(cpu) {
cpu              1192 kernel/rcupreempt.c 			dyntick_save_progress_counter_sched(cpu);
cpu              1193 kernel/rcupreempt.c 			save_qsctr_sched(cpu);
cpu              1218 kernel/rcupreempt.c 		for_each_online_cpu(cpu) {
cpu              1219 kernel/rcupreempt.c 			while (rcu_qsctr_inc_needed(cpu) &&
cpu              1220 kernel/rcupreempt.c 			       rcu_qsctr_inc_needed_dyntick(cpu)) {
cpu              1228 kernel/rcupreempt.c 		for_each_online_cpu(cpu) {
cpu              1230 kernel/rcupreempt.c 			rdp = RCU_DATA_CPU(cpu);
cpu              1325 kernel/rcupreempt.c 	struct rcu_data *rdp = RCU_DATA_CPU(cpu);
cpu              1336 kernel/rcupreempt.c 	struct rcu_data *rdp = RCU_DATA_CPU(cpu);
cpu              1349 kernel/rcupreempt.c 	if ((per_cpu(rcu_flip_flag, cpu) == rcu_flipped) ||
cpu              1350 kernel/rcupreempt.c 	    (per_cpu(rcu_mb_flag, cpu) == rcu_mb_needed))
cpu              1366 kernel/rcupreempt.c 	long cpu = (long)hcpu;
cpu              1371 kernel/rcupreempt.c 		rcu_online_cpu(cpu);
cpu              1377 kernel/rcupreempt.c 		rcu_offline_cpu(cpu);
cpu              1391 kernel/rcupreempt.c 	int cpu;
cpu              1396 kernel/rcupreempt.c 	for_each_possible_cpu(cpu) {
cpu              1397 kernel/rcupreempt.c 		rdp = RCU_DATA_CPU(cpu);
cpu              1431 kernel/rcupreempt.c 	for_each_online_cpu(cpu)
cpu              1432 kernel/rcupreempt.c 		rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,	(void *)(long) cpu);
cpu              1452 kernel/rcupreempt.c 	return &RCU_DATA_CPU(cpu)->rcu_flipctr[0];
cpu              1458 kernel/rcupreempt.c 	return per_cpu(rcu_flip_flag, cpu);
cpu              1464 kernel/rcupreempt.c 	return per_cpu(rcu_mb_flag, cpu);
cpu              1476 kernel/rcupreempt.c 	struct rcu_data *rdp = RCU_DATA_CPU(cpu);
cpu               140 kernel/rcupreempt_trace.c 	int cpu;
cpu               143 kernel/rcupreempt_trace.c 	for_each_possible_cpu(cpu) {
cpu               144 kernel/rcupreempt_trace.c 		cp = rcupreempt_trace_cpu(cpu);
cpu               231 kernel/rcupreempt_trace.c 	int cpu;
cpu               239 kernel/rcupreempt_trace.c 	for_each_online_cpu(cpu) {
cpu               240 kernel/rcupreempt_trace.c 		long *flipctr = rcupreempt_flipctr(cpu);
cpu               244 kernel/rcupreempt_trace.c 			       cpu,
cpu               247 kernel/rcupreempt_trace.c 			       rcupreempt_flip_flag(cpu),
cpu               248 kernel/rcupreempt_trace.c 			       rcupreempt_mb_flag(cpu));
cpu               475 kernel/rcutorture.c 	int cpu;
cpu               480 kernel/rcutorture.c 	for_each_possible_cpu(cpu) {
cpu               481 kernel/rcutorture.c 		cnt += sprintf(&page[cnt], " %d(%d,%d)", cpu,
cpu               482 kernel/rcutorture.c 			       per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
cpu               483 kernel/rcutorture.c 			       per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
cpu               749 kernel/rcutorture.c 	int cpu;
cpu               754 kernel/rcutorture.c 	for_each_possible_cpu(cpu) {
cpu               756 kernel/rcutorture.c 			pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
cpu               757 kernel/rcutorture.c 			batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
cpu              1012 kernel/rcutorture.c 	int cpu;
cpu              1059 kernel/rcutorture.c 	for_each_possible_cpu(cpu) {
cpu              1061 kernel/rcutorture.c 			per_cpu(rcu_torture_count, cpu)[i] = 0;
cpu              1062 kernel/rcutorture.c 			per_cpu(rcu_torture_batch, cpu)[i] = 0;
cpu               223 kernel/relay.c 	chan->buf[buf->cpu] = NULL;
cpu               427 kernel/relay.c 	snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu);
cpu               457 kernel/relay.c 		dentry = relay_create_buf_file(chan, buf, cpu);
cpu               463 kernel/relay.c  	buf->cpu = cpu;
cpu               468 kernel/relay.c  		buf->cpu = 0;
cpu               799 kernel/relay.c 	if (cpu >= NR_CPUS || !chan->buf[cpu])
cpu               802 kernel/relay.c 	buf = chan->buf[cpu];
cpu               952 kernel/relay.c 		relay_subbufs_consumed(buf->chan, buf->cpu, 1);
cpu               965 kernel/relay.c 		relay_subbufs_consumed(buf->chan, buf->cpu, 1);
cpu              1186 kernel/relay.c 		relay_subbufs_consumed(rbuf->chan, rbuf->cpu, 1);
cpu               361 kernel/sched.c 	p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
cpu               362 kernel/sched.c 	p->se.parent = task_group(p)->se[cpu];
cpu               366 kernel/sched.c 	p->rt.rt_rq  = task_group(p)->rt_rq[cpu];
cpu               367 kernel/sched.c 	p->rt.parent = task_group(p)->rt_se[cpu];
cpu               569 kernel/sched.c 	int cpu;
cpu               620 kernel/sched.c 	return rq->cpu;
cpu               634 kernel/sched.c 	for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
cpu               636 kernel/sched.c #define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
cpu               639 kernel/sched.c #define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
cpu               664 kernel/sched.c 	int cpu = get_cpu();
cpu               665 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
cpu              1079 kernel/sched.c 	int cpu = (int)(long)hcpu;
cpu              1088 kernel/sched.c 		hrtick_clear(cpu_rq(cpu));
cpu              1158 kernel/sched.c 	int cpu;
cpu              1167 kernel/sched.c 	cpu = task_cpu(p);
cpu              1168 kernel/sched.c 	if (cpu == smp_processor_id())
cpu              1174 kernel/sched.c 		smp_send_reschedule(cpu);
cpu              1179 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
cpu              1184 kernel/sched.c 	resched_task(cpu_curr(cpu));
cpu              1201 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
cpu              1203 kernel/sched.c 	if (cpu == smp_processor_id())
cpu              1226 kernel/sched.c 		smp_send_reschedule(cpu);
cpu              1440 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
cpu              1463 kernel/sched.c 	if (!tg->se[cpu])
cpu              1466 kernel/sched.c 	rq_weight = tg->cfs_rq[cpu]->load.weight;
cpu              1492 kernel/sched.c 	tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
cpu              1493 kernel/sched.c 	tg->cfs_rq[cpu]->rq_weight = rq_weight;
cpu              1500 kernel/sched.c 	__set_se_shares(tg->se[cpu], shares);
cpu              1549 kernel/sched.c 	long cpu = (long)data;
cpu              1552 kernel/sched.c 		load = cpu_rq(cpu)->load.weight;
cpu              1554 kernel/sched.c 		load = tg->parent->cfs_rq[cpu]->h_load;
cpu              1555 kernel/sched.c 		load *= tg->cfs_rq[cpu]->shares;
cpu              1556 kernel/sched.c 		load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
cpu              1559 kernel/sched.c 	tg->cfs_rq[cpu]->h_load = load;
cpu              1584 kernel/sched.c 	walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
cpu              1760 kernel/sched.c 	set_task_rq(p, cpu);
cpu              1768 kernel/sched.c 	task_thread_info(p)->cpu = cpu;
cpu              1789 kernel/sched.c 	return cpu_rq(cpu)->load.weight;
cpu              2003 kernel/sched.c 	int cpu;
cpu              2006 kernel/sched.c 	cpu = task_cpu(p);
cpu              2007 kernel/sched.c 	if ((cpu != smp_processor_id()) && task_curr(p))
cpu              2008 kernel/sched.c 		smp_send_reschedule(cpu);
cpu              2021 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
cpu              2022 kernel/sched.c 	unsigned long total = weighted_cpuload(cpu);
cpu              2036 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
cpu              2037 kernel/sched.c 	unsigned long total = weighted_cpuload(cpu);
cpu              2141 kernel/sched.c 	for_each_domain(cpu, tmp) {
cpu              2165 kernel/sched.c 		group = find_idlest_group(sd, t, cpu);
cpu              2171 kernel/sched.c 		new_cpu = find_idlest_cpu(group, t, cpu, &tmpmask);
cpu              2172 kernel/sched.c 		if (new_cpu == -1 || new_cpu == cpu) {
cpu              2179 kernel/sched.c 		cpu = new_cpu;
cpu              2182 kernel/sched.c 		for_each_domain(cpu, tmp) {
cpu              2191 kernel/sched.c 	return cpu;
cpu              2212 kernel/sched.c 	int cpu, orig_cpu, this_cpu, success = 0;
cpu              2225 kernel/sched.c 		cpu = task_cpu(p);
cpu              2228 kernel/sched.c 			if (cpu_isset(cpu, sd->span)) {
cpu              2245 kernel/sched.c 	cpu = task_cpu(p);
cpu              2246 kernel/sched.c 	orig_cpu = cpu;
cpu              2253 kernel/sched.c 	cpu = p->sched_class->select_task_rq(p, sync);
cpu              2254 kernel/sched.c 	if (cpu != orig_cpu) {
cpu              2255 kernel/sched.c 		set_task_cpu(p, cpu);
cpu              2266 kernel/sched.c 		cpu = task_cpu(p);
cpu              2271 kernel/sched.c 	if (cpu == this_cpu)
cpu              2276 kernel/sched.c 			if (cpu_isset(cpu, sd->span)) {
cpu              2289 kernel/sched.c 	if (orig_cpu != cpu)
cpu              2291 kernel/sched.c 	if (cpu == this_cpu)
cpu              2377 kernel/sched.c 	int cpu = get_cpu();
cpu              2382 kernel/sched.c 	cpu = sched_balance_self(cpu, SD_BALANCE_FORK);
cpu              2384 kernel/sched.c 	set_task_cpu(p, cpu);
cpu              3810 kernel/sched.c 	int cpu = smp_processor_id();
cpu              3813 kernel/sched.c 		cpu_set(cpu, nohz.cpu_mask);
cpu              3814 kernel/sched.c 		cpu_rq(cpu)->in_nohz_recently = 1;
cpu              3819 kernel/sched.c 		if (!cpu_active(cpu) &&
cpu              3820 kernel/sched.c 		    atomic_read(&nohz.load_balancer) == cpu) {
cpu              3821 kernel/sched.c 			if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
cpu              3828 kernel/sched.c 			if (atomic_read(&nohz.load_balancer) == cpu)
cpu              3835 kernel/sched.c 			if (atomic_cmpxchg(&nohz.load_balancer, -1, cpu) == -1)
cpu              3837 kernel/sched.c 		} else if (atomic_read(&nohz.load_balancer) == cpu)
cpu              3840 kernel/sched.c 		if (!cpu_isset(cpu, nohz.cpu_mask))
cpu              3843 kernel/sched.c 		cpu_clear(cpu, nohz.cpu_mask);
cpu              3845 kernel/sched.c 		if (atomic_read(&nohz.load_balancer) == cpu)
cpu              3846 kernel/sched.c 			if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
cpu              3864 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
cpu              3873 kernel/sched.c 	for_each_domain(cpu, sd) {
cpu              3896 kernel/sched.c 			if (load_balance(cpu, rq, sd, idle, &balance, &tmp)) {
cpu              3996 kernel/sched.c 		if (atomic_read(&nohz.load_balancer) == cpu) {
cpu              3997 kernel/sched.c 			cpu_clear(cpu, nohz.cpu_mask);
cpu              4021 kernel/sched.c 	if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu &&
cpu              4023 kernel/sched.c 		resched_cpu(cpu);
cpu              4031 kernel/sched.c 	if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu &&
cpu              4032 kernel/sched.c 	    cpu_isset(cpu, nohz.cpu_mask))
cpu              4264 kernel/sched.c 	int cpu = smp_processor_id();
cpu              4265 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
cpu              4277 kernel/sched.c 	rq->idle_at_tick = idle_cpu(cpu);
cpu              4278 kernel/sched.c 	trigger_load_balance(rq, cpu);
cpu              4426 kernel/sched.c 	int cpu;
cpu              4430 kernel/sched.c 	cpu = smp_processor_id();
cpu              4431 kernel/sched.c 	rq = cpu_rq(cpu);
cpu              4432 kernel/sched.c 	rcu_qsctr_inc(cpu);
cpu              4466 kernel/sched.c 		idle_balance(cpu, rq);
cpu              4483 kernel/sched.c 		cpu = smp_processor_id();
cpu              4484 kernel/sched.c 		rq = cpu_rq(cpu);
cpu              5075 kernel/sched.c 	return cpu_curr(cpu) == cpu_rq(cpu)->idle;
cpu              5084 kernel/sched.c 	return cpu_rq(cpu)->idle;
cpu              5856 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
cpu              5863 kernel/sched.c 	idle->cpus_allowed = cpumask_of_cpu(cpu);
cpu              5864 kernel/sched.c 	__set_task_cpu(idle, cpu);
cpu              6045 kernel/sched.c 	int cpu = (long)data;
cpu              6048 kernel/sched.c 	rq = cpu_rq(cpu);
cpu              6058 kernel/sched.c 		if (cpu_is_offline(cpu)) {
cpu              6064 kernel/sched.c 			active_load_balance(rq, cpu);
cpu              6080 kernel/sched.c 		__migrate_task(req->task, cpu, req->dest_cpu);
cpu              6396 kernel/sched.c 	for_each_domain(cpu, sd)
cpu              6403 kernel/sched.c 	for_each_domain(cpu, sd) {
cpu              6462 kernel/sched.c 		cpu_set(rq->cpu, rq->rd->online);
cpu              6482 kernel/sched.c 		cpu_clear(rq->cpu, rq->rd->online);
cpu              6495 kernel/sched.c 	int cpu = (long)hcpu;
cpu              6503 kernel/sched.c 		p = kthread_create(migration_thread, hcpu, "migration/%d", cpu);
cpu              6506 kernel/sched.c 		kthread_bind(p, cpu);
cpu              6511 kernel/sched.c 		cpu_rq(cpu)->migration_thread = p;
cpu              6517 kernel/sched.c 		wake_up_process(cpu_rq(cpu)->migration_thread);
cpu              6520 kernel/sched.c 		rq = cpu_rq(cpu);
cpu              6523 kernel/sched.c 			BUG_ON(!cpu_isset(cpu, rq->rd->span));
cpu              6533 kernel/sched.c 		if (!cpu_rq(cpu)->migration_thread)
cpu              6536 kernel/sched.c 		kthread_bind(cpu_rq(cpu)->migration_thread,
cpu              6538 kernel/sched.c 		kthread_stop(cpu_rq(cpu)->migration_thread);
cpu              6539 kernel/sched.c 		cpu_rq(cpu)->migration_thread = NULL;
cpu              6545 kernel/sched.c 		migrate_live_tasks(cpu);
cpu              6546 kernel/sched.c 		rq = cpu_rq(cpu);
cpu              6556 kernel/sched.c 		migrate_dead_tasks(cpu);
cpu              6582 kernel/sched.c 		rq = cpu_rq(cpu);
cpu              6585 kernel/sched.c 			BUG_ON(!cpu_isset(cpu, rq->rd->span));
cpu              6605 kernel/sched.c 	void *cpu = (void *)(long)smp_processor_id();
cpu              6609 kernel/sched.c 	err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
cpu              6611 kernel/sched.c 	migration_call(&migration_notifier, CPU_ONLINE, cpu);
cpu              6667 kernel/sched.c 	if (!cpu_isset(cpu, sd->span)) {
cpu              6669 kernel/sched.c 				"CPU%d\n", cpu);
cpu              6671 kernel/sched.c 	if (!cpu_isset(cpu, group->cpumask)) {
cpu              6673 kernel/sched.c 				" CPU%d\n", cpu);
cpu              6727 kernel/sched.c 		printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
cpu              6731 kernel/sched.c 	printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
cpu              6740 kernel/sched.c 		if (sched_domain_debug_one(sd, cpu, level, groupmask))
cpu              6817 kernel/sched.c 		if (cpu_isset(rq->cpu, old_rd->online))
cpu              6820 kernel/sched.c 		cpu_clear(rq->cpu, old_rd->span);
cpu              6829 kernel/sched.c 	cpu_set(rq->cpu, rd->span);
cpu              6830 kernel/sched.c 	if (cpu_isset(rq->cpu, cpu_online_map))
cpu              6872 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
cpu              6893 kernel/sched.c 	sched_domain_debug(sd, cpu);
cpu              7055 kernel/sched.c 		*sg = &per_cpu(sched_group_cpus, cpu);
cpu              7056 kernel/sched.c 	return cpu;
cpu              7075 kernel/sched.c 	*mask = per_cpu(cpu_sibling_map, cpu);
cpu              7088 kernel/sched.c 		*sg = &per_cpu(sched_group_core, cpu);
cpu              7089 kernel/sched.c 	return cpu;
cpu              7102 kernel/sched.c 	*mask = cpu_coregroup_map(cpu);
cpu              7106 kernel/sched.c 	*mask = per_cpu(cpu_sibling_map, cpu);
cpu              7110 kernel/sched.c 	group = cpu;
cpu              7134 kernel/sched.c 	*nodemask = node_to_cpumask(cpu_to_node(cpu));
cpu              7174 kernel/sched.c 	int cpu, i;
cpu              7176 kernel/sched.c 	for_each_cpu_mask_nr(cpu, *cpu_map) {
cpu              7178 kernel/sched.c 			= sched_group_nodes_bycpu[cpu];
cpu              7202 kernel/sched.c 		sched_group_nodes_bycpu[cpu] = NULL;
cpu              7232 kernel/sched.c 	if (cpu != first_cpu(sd->groups->cpumask))
cpu              7949 kernel/sched.c 	int cpu = (int)(long)hcpu;
cpu              7954 kernel/sched.c 		disable_runtime(cpu_rq(cpu));
cpu              7961 kernel/sched.c 		enable_runtime(cpu_rq(cpu));
cpu              8063 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
cpu              8064 kernel/sched.c 	tg->cfs_rq[cpu] = cfs_rq;
cpu              8070 kernel/sched.c 	tg->se[cpu] = se;
cpu              8092 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
cpu              8094 kernel/sched.c 	tg->rt_rq[cpu] = rt_rq;
cpu              8102 kernel/sched.c 	tg->rt_se[cpu] = rt_se;
cpu              8273 kernel/sched.c 		rq->cpu = i;
cpu              8428 kernel/sched.c 	return cpu_curr(cpu);
cpu              8448 kernel/sched.c 	cpu_curr(cpu) = p;
cpu              8511 kernel/sched.c 	list_add_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list,
cpu              8512 kernel/sched.c 			&cpu_rq(cpu)->leaf_cfs_rq_list);
cpu              8517 kernel/sched.c 	list_del_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list);
cpu              8600 kernel/sched.c 	list_add_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list,
cpu              8601 kernel/sched.c 			&cpu_rq(cpu)->leaf_rt_rq_list);
cpu              8606 kernel/sched.c 	list_del_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list);
cpu                69 kernel/sched_clock.c 	return &per_cpu(sched_clock_data, cpu);
cpu                75 kernel/sched_clock.c 	int cpu;
cpu                77 kernel/sched_clock.c 	for_each_possible_cpu(cpu) {
cpu                78 kernel/sched_clock.c 		struct sched_clock_data *scd = cpu_sdc(cpu);
cpu               151 kernel/sched_clock.c 	struct sched_clock_data *scd = cpu_sdc(cpu);
cpu               160 kernel/sched_clock.c 	if (cpu != raw_smp_processor_id()) {
cpu               258 kernel/sched_clock.c 	clock = sched_clock_cpu(cpu);
cpu               106 kernel/sched_cpupri.c 	int                 *currpri = &cp->cpu_to_pri[cpu];
cpu               129 kernel/sched_cpupri.c 		cpu_clear(cpu, vec->mask);
cpu               139 kernel/sched_cpupri.c 		cpu_set(cpu, vec->mask);
cpu               118 kernel/sched_debug.c 	struct rq *rq = &per_cpu(runqueues, cpu);
cpu               133 kernel/sched_debug.c 	SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path);
cpu               135 kernel/sched_debug.c 	SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
cpu               205 kernel/sched_debug.c 	SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, path);
cpu               207 kernel/sched_debug.c 	SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
cpu               227 kernel/sched_debug.c 	struct rq *rq = &per_cpu(runqueues, cpu);
cpu               234 kernel/sched_debug.c 			   cpu, freq / 1000, (freq % 1000));
cpu               237 kernel/sched_debug.c 	SEQ_printf(m, "\ncpu#%d\n", cpu);
cpu               263 kernel/sched_debug.c 	print_cfs_stats(m, cpu);
cpu               264 kernel/sched_debug.c 	print_rt_stats(m, cpu);
cpu               266 kernel/sched_debug.c 	print_rq(m, rq, cpu);
cpu               272 kernel/sched_debug.c 	int cpu;
cpu               293 kernel/sched_debug.c 	for_each_online_cpu(cpu)
cpu               294 kernel/sched_debug.c 		print_cpu(m, cpu);
cpu               971 kernel/sched_fair.c 	if (idle_cpu(cpu) || cpu_rq(cpu)->cfs.nr_running > 1)
cpu               972 kernel/sched_fair.c 		return cpu;
cpu               974 kernel/sched_fair.c 	for_each_domain(cpu, sd) {
cpu               993 kernel/sched_fair.c 	return cpu;
cpu               998 kernel/sched_fair.c 	return cpu;
cpu              1031 kernel/sched_fair.c 	struct sched_entity *se = tg->se[cpu];
cpu              1610 kernel/sched_fair.c 	for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
cpu              1611 kernel/sched_fair.c 		print_cfs_rq(m, cpu, cfs_rq);
cpu                18 kernel/sched_rt.c 	cpu_set(rq->cpu, rq->rd->rto_mask);
cpu                37 kernel/sched_rt.c 	cpu_clear(rq->cpu, rq->rd->rto_mask);
cpu               156 kernel/sched_rt.c 	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
cpu               223 kernel/sched_rt.c 	return &cpu_rq(cpu)->rt;
cpu               562 kernel/sched_rt.c 			cpupri_set(&rq->rd->cpupri, rq->cpu,
cpu               621 kernel/sched_rt.c 			cpupri_set(&rq->rd->cpupri, rq->cpu,
cpu               792 kernel/sched_rt.c 		int cpu = find_lowest_rq(p);
cpu               794 kernel/sched_rt.c 		return (cpu == -1) ? task_cpu(p) : cpu;
cpu               918 kernel/sched_rt.c 	    (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
cpu               943 kernel/sched_rt.c 			if (pick_rt_task(rq, p, cpu)) {
cpu               979 kernel/sched_rt.c 	int cpu      = task_cpu(task);
cpu              1002 kernel/sched_rt.c 	if (cpu_isset(cpu, *lowest_mask))
cpu              1003 kernel/sched_rt.c 		return cpu;
cpu              1009 kernel/sched_rt.c 	if (this_cpu == cpu)
cpu              1012 kernel/sched_rt.c 	for_each_domain(cpu, sd) {
cpu              1039 kernel/sched_rt.c 	int cpu;
cpu              1042 kernel/sched_rt.c 		cpu = find_lowest_rq(task);
cpu              1044 kernel/sched_rt.c 		if ((cpu == -1) || (cpu == rq->cpu))
cpu              1047 kernel/sched_rt.c 		lowest_rq = cpu_rq(cpu);
cpu              1058 kernel/sched_rt.c 				     !cpu_isset(lowest_rq->cpu,
cpu              1138 kernel/sched_rt.c 	set_task_cpu(next_task, lowest_rq->cpu);
cpu              1171 kernel/sched_rt.c 	int this_cpu = this_rq->cpu, ret = 0, cpu;
cpu              1180 kernel/sched_rt.c 	for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) {
cpu              1181 kernel/sched_rt.c 		if (this_cpu == cpu)
cpu              1184 kernel/sched_rt.c 		src_rq = cpu_rq(cpu);
cpu              1344 kernel/sched_rt.c 	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio);
cpu              1355 kernel/sched_rt.c 	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
cpu              1541 kernel/sched_rt.c 	for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
cpu              1542 kernel/sched_rt.c 		print_rt_rq(m, cpu, rt_rq);
cpu                11 kernel/sched_stats.h 	int cpu;
cpu                20 kernel/sched_stats.h 	for_each_online_cpu(cpu) {
cpu                21 kernel/sched_stats.h 		struct rq *rq = cpu_rq(cpu);
cpu                30 kernel/sched_stats.h 		    cpu, rq->yld_both_empty,
cpu                42 kernel/sched_stats.h 		for_each_domain(cpu, sd) {
cpu                70 kernel/smp.c   	struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
cpu                80 kernel/smp.c   		arch_send_call_function_single_ipi(cpu);
cpu               102 kernel/smp.c   	int cpu = get_cpu();
cpu               112 kernel/smp.c   		if (!cpu_isset(cpu, data->cpumask))
cpu               118 kernel/smp.c   		cpu_clear(cpu, data->cpumask);
cpu               221 kernel/smp.c   	if (cpu == me) {
cpu               225 kernel/smp.c   	} else if ((unsigned)cpu < NR_CPUS && cpu_online(cpu)) {
cpu               240 kernel/smp.c   		generic_exec_single(cpu, data);
cpu               265 kernel/smp.c   	generic_exec_single(cpu, data);
cpu               293 kernel/smp.c   	int cpu;
cpu               298 kernel/smp.c   	for_each_cpu_mask(cpu, mask) {
cpu               300 kernel/smp.c   		generic_exec_single(cpu, &data);
cpu               328 kernel/smp.c   	int cpu, num_cpus;
cpu               334 kernel/smp.c   	cpu = smp_processor_id();
cpu               336 kernel/smp.c   	cpu_clear(cpu, allbutself);
cpu               347 kernel/smp.c   		cpu = first_cpu(mask);
cpu               348 kernel/smp.c   		return smp_call_function_single(cpu, func, info, wait);
cpu               189 kernel/softirq.c 	int cpu;
cpu               197 kernel/softirq.c 	cpu = smp_processor_id();
cpu               209 kernel/softirq.c 			rcu_bh_qsctr_inc(cpu);
cpu               258 kernel/softirq.c 	int cpu = smp_processor_id();
cpu               259 kernel/softirq.c 	if (idle_cpu(cpu) && !in_interrupt())
cpu               260 kernel/softirq.c 		tick_nohz_stop_idle(cpu);
cpu               264 kernel/softirq.c 	if (idle_cpu(cpu))
cpu               468 kernel/softirq.c 	int cpu;
cpu               470 kernel/softirq.c 	for_each_possible_cpu(cpu) {
cpu               471 kernel/softirq.c 		per_cpu(tasklet_vec, cpu).tail =
cpu               472 kernel/softirq.c 			&per_cpu(tasklet_vec, cpu).head;
cpu               473 kernel/softirq.c 		per_cpu(tasklet_hi_vec, cpu).tail =
cpu               474 kernel/softirq.c 			&per_cpu(tasklet_hi_vec, cpu).head;
cpu               538 kernel/softirq.c 	BUG_ON(cpu_online(cpu));
cpu               545 kernel/softirq.c 	for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
cpu               550 kernel/softirq.c 				per_cpu(tasklet_vec, cpu).tail = i;
cpu               563 kernel/softirq.c 	if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
cpu               564 kernel/softirq.c 		*(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
cpu               565 kernel/softirq.c 		__get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
cpu               566 kernel/softirq.c 		per_cpu(tasklet_vec, cpu).head = NULL;
cpu               567 kernel/softirq.c 		per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
cpu               571 kernel/softirq.c 	if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
cpu               572 kernel/softirq.c 		*__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
cpu               573 kernel/softirq.c 		__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
cpu               574 kernel/softirq.c 		per_cpu(tasklet_hi_vec, cpu).head = NULL;
cpu               575 kernel/softirq.c 		per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
cpu               635 kernel/softirq.c 	void *cpu = (void *)(long)smp_processor_id();
cpu               636 kernel/softirq.c 	int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
cpu               639 kernel/softirq.c 	cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
cpu                83 kernel/softlockup.c 	int cpu;
cpu                86 kernel/softlockup.c 	for_each_online_cpu(cpu)
cpu                87 kernel/softlockup.c 		per_cpu(touch_timestamp, cpu) = 0;
cpu               353 kernel/softlockup.c 	void *cpu = (void *)(long)smp_processor_id();
cpu               359 kernel/softlockup.c 	err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
cpu               364 kernel/softlockup.c 	cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
cpu                60 kernel/srcu.c  	int cpu;
cpu                64 kernel/srcu.c  	for_each_possible_cpu(cpu)
cpu                65 kernel/srcu.c  		sum += per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx];
cpu              1739 kernel/sys.c   	int cpu = raw_smp_processor_id();
cpu              1741 kernel/sys.c   		err |= put_user(cpu, cpup);
cpu              1743 kernel/sys.c   		err |= put_user(cpu_to_node(cpu), nodep);
cpu               297 kernel/taskstats.c 	unsigned int cpu;
cpu               304 kernel/taskstats.c 		for_each_cpu_mask_nr(cpu, mask) {
cpu               306 kernel/taskstats.c 					 cpu_to_node(cpu));
cpu               313 kernel/taskstats.c 			listeners = &per_cpu(listener_array, cpu);
cpu               323 kernel/taskstats.c 	for_each_cpu_mask_nr(cpu, mask) {
cpu               324 kernel/taskstats.c 		listeners = &per_cpu(listener_array, cpu);
cpu               107 kernel/time/tick-broadcast.c 		cpu_set(cpu, tick_broadcast_mask);
cpu               117 kernel/time/tick-broadcast.c 			int cpu = smp_processor_id();
cpu               119 kernel/time/tick-broadcast.c 			cpu_clear(cpu, tick_broadcast_mask);
cpu               120 kernel/time/tick-broadcast.c 			tick_broadcast_clear_oneshot(cpu);
cpu               132 kernel/time/tick-broadcast.c 	int cpu = smp_processor_id();
cpu               138 kernel/time/tick-broadcast.c 	if (cpu_isset(cpu, mask)) {
cpu               139 kernel/time/tick-broadcast.c 		cpu_clear(cpu, mask);
cpu               140 kernel/time/tick-broadcast.c 		td = &per_cpu(tick_cpu_device, cpu);
cpu               151 kernel/time/tick-broadcast.c 		cpu = first_cpu(mask);
cpu               152 kernel/time/tick-broadcast.c 		td = &per_cpu(tick_cpu_device, cpu);
cpu               213 kernel/time/tick-broadcast.c 	int cpu, bc_stopped;
cpu               217 kernel/time/tick-broadcast.c 	cpu = smp_processor_id();
cpu               218 kernel/time/tick-broadcast.c 	td = &per_cpu(tick_cpu_device, cpu);
cpu               236 kernel/time/tick-broadcast.c 		if (!cpu_isset(cpu, tick_broadcast_mask)) {
cpu               237 kernel/time/tick-broadcast.c 			cpu_set(cpu, tick_broadcast_mask);
cpu               247 kernel/time/tick-broadcast.c 		    cpu_isset(cpu, tick_broadcast_mask)) {
cpu               248 kernel/time/tick-broadcast.c 			cpu_clear(cpu, tick_broadcast_mask);
cpu               301 kernel/time/tick-broadcast.c 	unsigned int cpu = *cpup;
cpu               306 kernel/time/tick-broadcast.c 	cpu_clear(cpu, tick_broadcast_mask);
cpu               394 kernel/time/tick-broadcast.c 	int cpu;
cpu               403 kernel/time/tick-broadcast.c 	for_each_cpu_mask_nr(cpu, tick_broadcast_oneshot_mask) {
cpu               404 kernel/time/tick-broadcast.c 		td = &per_cpu(tick_cpu_device, cpu);
cpu               406 kernel/time/tick-broadcast.c 			cpu_set(cpu, mask);
cpu               446 kernel/time/tick-broadcast.c 	int cpu;
cpu               458 kernel/time/tick-broadcast.c 	cpu = smp_processor_id();
cpu               459 kernel/time/tick-broadcast.c 	td = &per_cpu(tick_cpu_device, cpu);
cpu               466 kernel/time/tick-broadcast.c 		if (!cpu_isset(cpu, tick_broadcast_oneshot_mask)) {
cpu               467 kernel/time/tick-broadcast.c 			cpu_set(cpu, tick_broadcast_oneshot_mask);
cpu               473 kernel/time/tick-broadcast.c 		if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) {
cpu               474 kernel/time/tick-broadcast.c 			cpu_clear(cpu, tick_broadcast_oneshot_mask);
cpu               492 kernel/time/tick-broadcast.c 	cpu_clear(cpu, tick_broadcast_oneshot_mask);
cpu               498 kernel/time/tick-broadcast.c 	int cpu;
cpu               500 kernel/time/tick-broadcast.c 	for_each_cpu_mask_nr(cpu, *mask) {
cpu               501 kernel/time/tick-broadcast.c 		td = &per_cpu(tick_cpu_device, cpu);
cpu               515 kernel/time/tick-broadcast.c 		int cpu = smp_processor_id();
cpu               522 kernel/time/tick-broadcast.c 		tick_do_timer_cpu = cpu;
cpu               531 kernel/time/tick-broadcast.c 		cpu_clear(cpu, mask);
cpu               567 kernel/time/tick-broadcast.c 	unsigned int cpu = *cpup;
cpu               575 kernel/time/tick-broadcast.c 	cpu_clear(cpu, tick_broadcast_oneshot_mask);
cpu                44 kernel/time/tick-common.c 	return &per_cpu(tick_cpu_device, cpu);
cpu                62 kernel/time/tick-common.c 	if (tick_do_timer_cpu == cpu) {
cpu                81 kernel/time/tick-common.c 	int cpu = smp_processor_id();
cpu                84 kernel/time/tick-common.c 	tick_periodic(cpu);
cpu                96 kernel/time/tick-common.c 		tick_periodic(cpu);
cpu               153 kernel/time/tick-common.c 			tick_do_timer_cpu = cpu;
cpu               183 kernel/time/tick-common.c 	if (tick_device_uses_broadcast(newdev, cpu))
cpu               199 kernel/time/tick-common.c 	int cpu, ret = NOTIFY_OK;
cpu               204 kernel/time/tick-common.c 	cpu = smp_processor_id();
cpu               205 kernel/time/tick-common.c 	if (!cpu_isset(cpu, newdev->cpumask))
cpu               208 kernel/time/tick-common.c 	td = &per_cpu(tick_cpu_device, cpu);
cpu               212 kernel/time/tick-common.c 	if (!cpus_equal(newdev->cpumask, cpumask_of_cpu(cpu))) {
cpu               225 kernel/time/tick-common.c 		if (curdev && cpus_equal(curdev->cpumask, cpumask_of_cpu(cpu)))
cpu               257 kernel/time/tick-common.c 	tick_setup_device(td, newdev, cpu, &cpumask_of_cpu(cpu));
cpu               302 kernel/time/tick-common.c 		int cpu = first_cpu(cpu_online_map);
cpu               304 kernel/time/tick-common.c 		tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu :
cpu                41 kernel/time/tick-sched.c 	return &per_cpu(tick_cpu_sched, cpu);
cpu               139 kernel/time/tick-sched.c 	int cpu = smp_processor_id();
cpu               140 kernel/time/tick-sched.c 	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
cpu               147 kernel/time/tick-sched.c 	cpu_clear(cpu, nohz_cpu_mask);
cpu               160 kernel/time/tick-sched.c 	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
cpu               192 kernel/time/tick-sched.c 	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
cpu               219 kernel/time/tick-sched.c 	int cpu;
cpu               223 kernel/time/tick-sched.c 	cpu = smp_processor_id();
cpu               224 kernel/time/tick-sched.c 	ts = &per_cpu(tick_cpu_sched, cpu);
cpu               234 kernel/time/tick-sched.c 	if (unlikely(!cpu_online(cpu))) {
cpu               235 kernel/time/tick-sched.c 		if (cpu == tick_do_timer_cpu)
cpu               273 kernel/time/tick-sched.c 	if (rcu_needs_cpu(cpu))
cpu               286 kernel/time/tick-sched.c 			cpu_set(cpu, nohz_cpu_mask);
cpu               299 kernel/time/tick-sched.c 				cpu_clear(cpu, nohz_cpu_mask);
cpu               317 kernel/time/tick-sched.c 		if (cpu == tick_do_timer_cpu)
cpu               357 kernel/time/tick-sched.c 		cpu_clear(cpu, nohz_cpu_mask);
cpu               387 kernel/time/tick-sched.c 	int cpu = smp_processor_id();
cpu               388 kernel/time/tick-sched.c 	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
cpu               393 kernel/time/tick-sched.c 	tick_nohz_stop_idle(cpu);
cpu               409 kernel/time/tick-sched.c 	cpu_clear(cpu, nohz_cpu_mask);
cpu               471 kernel/time/tick-sched.c 	int cpu = smp_processor_id();
cpu               484 kernel/time/tick-sched.c 		tick_do_timer_cpu = cpu;
cpu               487 kernel/time/tick-sched.c 	if (tick_do_timer_cpu == cpu)
cpu               575 kernel/time/tick-sched.c 	int cpu = smp_processor_id();
cpu               586 kernel/time/tick-sched.c 		tick_do_timer_cpu = cpu;
cpu               590 kernel/time/tick-sched.c 	if (tick_do_timer_cpu == cpu)
cpu               666 kernel/time/tick-sched.c 	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
cpu               682 kernel/time/tick-sched.c 	int cpu;
cpu               684 kernel/time/tick-sched.c 	for_each_possible_cpu(cpu)
cpu               685 kernel/time/tick-sched.c 		set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks);
cpu               129 kernel/time/timer_list.c 	struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
cpu               133 kernel/time/timer_list.c 	SEQ_printf(m, "cpu: %d\n", cpu);
cpu               161 kernel/time/timer_list.c 		struct tick_sched *ts = tick_get_tick_sched(cpu);
cpu               222 kernel/time/timer_list.c 	int cpu;
cpu               234 kernel/time/timer_list.c 	for_each_online_cpu(cpu)
cpu               235 kernel/time/timer_list.c 		   print_tickdevice(m, tick_get_device(cpu));
cpu               245 kernel/time/timer_list.c 	int cpu;
cpu               251 kernel/time/timer_list.c 	for_each_online_cpu(cpu)
cpu               252 kernel/time/timer_list.c 		print_cpu(m, cpu, now);
cpu               348 kernel/time/timer_stats.c 	int cpu;
cpu               350 kernel/time/timer_stats.c 	for_each_online_cpu(cpu) {
cpu               351 kernel/time/timer_stats.c 		spin_lock_irqsave(&per_cpu(lookup_lock, cpu), flags);
cpu               353 kernel/time/timer_stats.c 		spin_unlock_irqrestore(&per_cpu(lookup_lock, cpu), flags);
cpu               408 kernel/time/timer_stats.c 	int cpu;
cpu               410 kernel/time/timer_stats.c 	for_each_possible_cpu(cpu)
cpu               411 kernel/time/timer_stats.c 		spin_lock_init(&per_cpu(lookup_lock, cpu));
cpu               148 kernel/timer.c 	j += cpu * 3;
cpu               164 kernel/timer.c 	j -= cpu * 3;
cpu               200 kernel/timer.c 	return  __round_jiffies(j + jiffies, cpu) - jiffies;
cpu               579 kernel/timer.c 	struct tvec_base *base = per_cpu(tvec_bases, cpu);
cpu               596 kernel/timer.c 	wake_up_idle_cpu(cpu);
cpu               974 kernel/timer.c 	int cpu = smp_processor_id();
cpu               979 kernel/timer.c 	if (rcu_pending(cpu))
cpu               980 kernel/timer.c 		rcu_check_callbacks(cpu, user_tick);
cpu              1366 kernel/timer.c 	if (!tvec_base_done[cpu]) {
cpu              1375 kernel/timer.c 						cpu_to_node(cpu));
cpu              1385 kernel/timer.c 			per_cpu(tvec_bases, cpu) = base;
cpu              1396 kernel/timer.c 		tvec_base_done[cpu] = 1;
cpu              1398 kernel/timer.c 		base = per_cpu(tvec_bases, cpu);
cpu              1435 kernel/timer.c 	BUG_ON(cpu_online(cpu));
cpu              1436 kernel/timer.c 	old_base = per_cpu(tvec_bases, cpu);
cpu              1464 kernel/timer.c 	long cpu = (long)hcpu;
cpu              1468 kernel/timer.c 		if (init_timers_cpu(cpu) < 0)
cpu              1474 kernel/timer.c 		migrate_timers(cpu);
cpu               342 kernel/trace/ftrace.c 	int cpu;
cpu               356 kernel/trace/ftrace.c 	cpu = raw_smp_processor_id();
cpu               357 kernel/trace/ftrace.c 	per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
cpu               358 kernel/trace/ftrace.c 	if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
cpu               392 kernel/trace/ftrace.c 	per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
cpu                44 kernel/trace/trace.c 	for_each_cpu_mask(cpu, tracing_buffer_mask)
cpu                63 kernel/trace/trace.c 	return cpu_clock(cpu);
cpu               138 kernel/trace/trace.c 	int cpu;
cpu               142 kernel/trace/trace.c 		for_each_online_cpu(cpu)
cpu               143 kernel/trace/trace.c 			tracing_reset(tr->data[cpu]);
cpu               250 kernel/trace/trace.c 	struct trace_array_cpu *data = tr->data[cpu];
cpu               252 kernel/trace/trace.c 	max_tr.cpu = cpu;
cpu               255 kernel/trace/trace.c 	data = max_tr.data[cpu];
cpu               515 kernel/trace/trace.c 	__update_max_tr(tr, tsk, cpu);
cpu               530 kernel/trace/trace.c 	struct trace_array_cpu *data = tr->data[cpu];
cpu               538 kernel/trace/trace.c 	flip_trace(max_tr.data[cpu], data);
cpu               541 kernel/trace/trace.c 	__update_max_tr(tr, tsk, cpu);
cpu              1007 kernel/trace/trace.c 	int cpu;
cpu              1013 kernel/trace/trace.c 	cpu = raw_smp_processor_id();
cpu              1014 kernel/trace/trace.c 	data = tr->data[cpu];
cpu              1032 kernel/trace/trace.c 	int cpu;
cpu              1041 kernel/trace/trace.c 	cpu = raw_smp_processor_id();
cpu              1042 kernel/trace/trace.c 	data = tr->data[cpu];
cpu              1083 kernel/trace/trace.c 	if (iter->next_idx[cpu] >= tr->entries ||
cpu              1084 kernel/trace/trace.c 	    iter->next_idx[cpu] >= data->trace_idx ||
cpu              1089 kernel/trace/trace.c 	if (!iter->next_page[cpu]) {
cpu              1093 kernel/trace/trace.c 		iter->next_page[cpu] = &page->lru;
cpu              1094 kernel/trace/trace.c 		iter->next_page_idx[cpu] = data->trace_tail_idx;
cpu              1097 kernel/trace/trace.c 	page = list_entry(iter->next_page[cpu], struct page, lru);
cpu              1102 kernel/trace/trace.c 	WARN_ON(iter->next_page_idx[cpu] >= ENTRIES_PER_PAGE);
cpu              1103 kernel/trace/trace.c 	return &array[iter->next_page_idx[cpu]];
cpu              1112 kernel/trace/trace.c 	int cpu;
cpu              1114 kernel/trace/trace.c 	for_each_tracing_cpu(cpu) {
cpu              1115 kernel/trace/trace.c 		if (!head_page(tr->data[cpu]))
cpu              1117 kernel/trace/trace.c 		ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu);
cpu              1123 kernel/trace/trace.c 			next_cpu = cpu;
cpu              1136 kernel/trace/trace.c 	iter->next_idx[iter->cpu]++;
cpu              1137 kernel/trace/trace.c 	iter->next_page_idx[iter->cpu]++;
cpu              1139 kernel/trace/trace.c 	if (iter->next_page_idx[iter->cpu] >= ENTRIES_PER_PAGE) {
cpu              1140 kernel/trace/trace.c 		struct trace_array_cpu *data = iter->tr->data[iter->cpu];
cpu              1142 kernel/trace/trace.c 		iter->next_page_idx[iter->cpu] = 0;
cpu              1143 kernel/trace/trace.c 		iter->next_page[iter->cpu] =
cpu              1144 kernel/trace/trace.c 			trace_next_list(data, iter->next_page[iter->cpu]);
cpu              1150 kernel/trace/trace.c 	struct trace_array_cpu *data = iter->tr->data[iter->cpu];
cpu              1172 kernel/trace/trace.c 	iter->prev_cpu = iter->cpu;
cpu              1175 kernel/trace/trace.c 	iter->cpu = next_cpu;
cpu              1230 kernel/trace/trace.c 		iter->cpu = 0;
cpu              1356 kernel/trace/trace.c 	struct trace_array_cpu *data = tr->data[tr->cpu];
cpu              1360 kernel/trace/trace.c 	int cpu;
cpu              1366 kernel/trace/trace.c 	for_each_tracing_cpu(cpu) {
cpu              1367 kernel/trace/trace.c 		if (head_page(tr->data[cpu])) {
cpu              1368 kernel/trace/trace.c 			total += tr->data[cpu]->trace_idx;
cpu              1369 kernel/trace/trace.c 			if (tr->data[cpu]->trace_idx > tr->entries)
cpu              1372 kernel/trace/trace.c 				entries += tr->data[cpu]->trace_idx;
cpu              1385 kernel/trace/trace.c 		   tr->cpu,
cpu              1431 kernel/trace/trace.c 	trace_seq_printf(s, "%d", cpu);
cpu              1499 kernel/trace/trace.c 				 entry->pid, cpu, entry->flags,
cpu              1506 kernel/trace/trace.c 		lat_print_generic(s, entry, cpu);
cpu              1579 kernel/trace/trace.c 	ret = trace_seq_printf(s, "[%02d] ", iter->cpu);
cpu              1663 kernel/trace/trace.c 		entry->pid, iter->cpu, entry->t);
cpu              1727 kernel/trace/trace.c 	SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
cpu              1771 kernel/trace/trace.c 	SEQ_PUT_FIELD_RET(s, entry->cpu);
cpu              1800 kernel/trace/trace.c 	int cpu;
cpu              1802 kernel/trace/trace.c 	for_each_tracing_cpu(cpu) {
cpu              1803 kernel/trace/trace.c 		data = iter->tr->data[cpu];
cpu              1828 kernel/trace/trace.c 		return print_lat_fmt(iter, iter->idx, iter->cpu);
cpu              2113 kernel/trace/trace.c 	int err, cpu;
cpu              2122 kernel/trace/trace.c 	for_each_tracing_cpu(cpu) {
cpu              2127 kernel/trace/trace.c 		if (cpu_isset(cpu, tracing_cpumask) &&
cpu              2128 kernel/trace/trace.c 				!cpu_isset(cpu, tracing_cpumask_new)) {
cpu              2129 kernel/trace/trace.c 			atomic_inc(&global_trace.data[cpu]->disabled);
cpu              2131 kernel/trace/trace.c 		if (!cpu_isset(cpu, tracing_cpumask) &&
cpu              2132 kernel/trace/trace.c 				cpu_isset(cpu, tracing_cpumask_new)) {
cpu              2133 kernel/trace/trace.c 			atomic_dec(&global_trace.data[cpu]->disabled);
cpu              2509 kernel/trace/trace.c 	int cpu;
cpu              2605 kernel/trace/trace.c 	for_each_tracing_cpu(cpu) {
cpu              2606 kernel/trace/trace.c 		data = iter->tr->data[cpu];
cpu              2612 kernel/trace/trace.c 		cpu_set(cpu, mask);
cpu              2615 kernel/trace/trace.c 	for_each_cpu_mask(cpu, mask) {
cpu              2616 kernel/trace/trace.c 		data = iter->tr->data[cpu];
cpu              2619 kernel/trace/trace.c 		if (data->overrun > iter->last_overrun[cpu])
cpu              2620 kernel/trace/trace.c 			iter->overrun[cpu] +=
cpu              2621 kernel/trace/trace.c 				data->overrun - iter->last_overrun[cpu];
cpu              2622 kernel/trace/trace.c 		iter->last_overrun[cpu] = data->overrun;
cpu              2642 kernel/trace/trace.c 	for_each_cpu_mask(cpu, mask) {
cpu              2643 kernel/trace/trace.c 		data = iter->tr->data[cpu];
cpu              2647 kernel/trace/trace.c 	for_each_cpu_mask(cpu, mask) {
cpu              2648 kernel/trace/trace.c 		data = iter->tr->data[cpu];
cpu                71 kernel/trace/trace.h 	char			cpu;
cpu               129 kernel/trace/trace.h 	int			cpu;
cpu               180 kernel/trace/trace.h 	int			cpu;
cpu                21 kernel/trace/trace_functions.c 	int cpu;
cpu                23 kernel/trace/trace_functions.c 	tr->time_start = ftrace_now(tr->cpu);
cpu                25 kernel/trace/trace_functions.c 	for_each_online_cpu(cpu)
cpu                26 kernel/trace/trace_functions.c 		tracing_reset(tr->data[cpu]);
cpu                31 kernel/trace/trace_functions.c 	tr->cpu = get_cpu();
cpu                77 kernel/trace/trace_irqsoff.c 	int cpu;
cpu                85 kernel/trace/trace_irqsoff.c 	cpu = raw_smp_processor_id();
cpu                86 kernel/trace/trace_irqsoff.c 	if (likely(!per_cpu(tracing_cpu, cpu)))
cpu                94 kernel/trace/trace_irqsoff.c 	data = tr->data[cpu];
cpu               139 kernel/trace/trace_irqsoff.c 	T1 = ftrace_now(cpu);
cpu               166 kernel/trace/trace_irqsoff.c 	update_max_tr_single(tr, current, cpu);
cpu               175 kernel/trace/trace_irqsoff.c 	data->preempt_timestamp = ftrace_now(cpu);
cpu               183 kernel/trace/trace_irqsoff.c 	int cpu;
cpu               191 kernel/trace/trace_irqsoff.c 	cpu = raw_smp_processor_id();
cpu               193 kernel/trace/trace_irqsoff.c 	if (per_cpu(tracing_cpu, cpu))
cpu               196 kernel/trace/trace_irqsoff.c 	data = tr->data[cpu];
cpu               204 kernel/trace/trace_irqsoff.c 	data->preempt_timestamp = ftrace_now(cpu);
cpu               212 kernel/trace/trace_irqsoff.c 	per_cpu(tracing_cpu, cpu) = 1;
cpu               220 kernel/trace/trace_irqsoff.c 	int cpu;
cpu               225 kernel/trace/trace_irqsoff.c 	cpu = raw_smp_processor_id();
cpu               227 kernel/trace/trace_irqsoff.c 	if (unlikely(per_cpu(tracing_cpu, cpu)))
cpu               228 kernel/trace/trace_irqsoff.c 		per_cpu(tracing_cpu, cpu) = 0;
cpu               235 kernel/trace/trace_irqsoff.c 	data = tr->data[cpu];
cpu               245 kernel/trace/trace_irqsoff.c 	check_critical_timing(tr, data, parent_ip ? : ip, cpu);
cpu                24 kernel/trace/trace_mmiotrace.c 	int cpu;
cpu                27 kernel/trace/trace_mmiotrace.c 	tr->time_start = ftrace_now(tr->cpu);
cpu                29 kernel/trace/trace_mmiotrace.c 	for_each_online_cpu(cpu)
cpu                30 kernel/trace/trace_mmiotrace.c 		tracing_reset(tr->data[cpu]);
cpu               131 kernel/trace/trace_mmiotrace.c 	int cpu;
cpu               133 kernel/trace/trace_mmiotrace.c 	for_each_online_cpu(cpu) {
cpu               134 kernel/trace/trace_mmiotrace.c 		cnt += iter->overrun[cpu];
cpu               135 kernel/trace/trace_mmiotrace.c 		iter->overrun[cpu] = 0;
cpu                30 kernel/trace/trace_sched_switch.c 	int cpu;
cpu                39 kernel/trace/trace_sched_switch.c 	cpu = raw_smp_processor_id();
cpu                40 kernel/trace/trace_sched_switch.c 	data = tr->data[cpu];
cpu                85 kernel/trace/trace_sched_switch.c 	int cpu;
cpu                93 kernel/trace/trace_sched_switch.c 	cpu = raw_smp_processor_id();
cpu                94 kernel/trace/trace_sched_switch.c 	data = tr->data[cpu];
cpu               131 kernel/trace/trace_sched_switch.c 	int cpu;
cpu               133 kernel/trace/trace_sched_switch.c 	tr->time_start = ftrace_now(tr->cpu);
cpu               135 kernel/trace/trace_sched_switch.c 	for_each_online_cpu(cpu)
cpu               136 kernel/trace/trace_sched_switch.c 		tracing_reset(tr->data[cpu]);
cpu                46 kernel/trace/trace_sched_wakeup.c 	int cpu;
cpu                54 kernel/trace/trace_sched_wakeup.c 	cpu = raw_smp_processor_id();
cpu                55 kernel/trace/trace_sched_wakeup.c 	data = tr->data[cpu];
cpu                70 kernel/trace/trace_sched_wakeup.c 	if (task_cpu(wakeup_task) != cpu)
cpu               125 kernel/trace/trace_sched_wakeup.c 	int cpu;
cpu               146 kernel/trace/trace_sched_wakeup.c 	cpu = raw_smp_processor_id();
cpu               147 kernel/trace/trace_sched_wakeup.c 	disabled = atomic_inc_return(&tr->data[cpu]->disabled);
cpu               165 kernel/trace/trace_sched_wakeup.c 	T1 = ftrace_now(cpu);
cpu               184 kernel/trace/trace_sched_wakeup.c 	atomic_dec(&tr->data[cpu]->disabled);
cpu               215 kernel/trace/trace_sched_wakeup.c 	int cpu;
cpu               217 kernel/trace/trace_sched_wakeup.c 	for_each_possible_cpu(cpu) {
cpu               218 kernel/trace/trace_sched_wakeup.c 		data = tr->data[cpu];
cpu               246 kernel/trace/trace_sched_wakeup.c 	int cpu = smp_processor_id();
cpu               255 kernel/trace/trace_sched_wakeup.c 	disabled = atomic_inc_return(&tr->data[cpu]->disabled);
cpu               277 kernel/trace/trace_sched_wakeup.c 	tr->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
cpu               284 kernel/trace/trace_sched_wakeup.c 	atomic_dec(&tr->data[cpu]->disabled);
cpu                87 kernel/trace/trace_selftest.c 	int cpu, ret = 0;
cpu                92 kernel/trace/trace_selftest.c 	for_each_possible_cpu(cpu) {
cpu                93 kernel/trace/trace_selftest.c 		if (!head_page(tr->data[cpu]))
cpu                96 kernel/trace/trace_selftest.c 		cnt += tr->data[cpu]->trace_idx;
cpu                98 kernel/trace/trace_selftest.c 		ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
cpu               137 kernel/trace/trace_sysprof.c 	data = tr->data[cpu];
cpu               201 kernel/trace/trace_sysprof.c 	struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu);
cpu               213 kernel/trace/trace_sysprof.c 	int cpu;
cpu               215 kernel/trace/trace_sysprof.c 	for_each_online_cpu(cpu) {
cpu               216 kernel/trace/trace_sysprof.c 		set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
cpu               217 kernel/trace/trace_sysprof.c 		start_stack_timer(cpu);
cpu               224 kernel/trace/trace_sysprof.c 	struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu);
cpu               231 kernel/trace/trace_sysprof.c 	int cpu;
cpu               233 kernel/trace/trace_sysprof.c 	for_each_online_cpu(cpu)
cpu               234 kernel/trace/trace_sysprof.c 		stop_stack_timer(cpu);
cpu               239 kernel/trace/trace_sysprof.c 	int cpu;
cpu               241 kernel/trace/trace_sysprof.c 	tr->time_start = ftrace_now(tr->cpu);
cpu               243 kernel/trace/trace_sysprof.c 	for_each_online_cpu(cpu)
cpu               244 kernel/trace/trace_sysprof.c 		tracing_reset(tr->data[cpu]);
cpu               101 kernel/workqueue.c 		cpu = singlethread_cpu;
cpu               102 kernel/workqueue.c 	return per_cpu_ptr(wq->cpu_wq, cpu);
cpu               189 kernel/workqueue.c 		__queue_work(wq_per_cpu(wq, cpu), work);
cpu               251 kernel/workqueue.c 		if (unlikely(cpu >= 0))
cpu               252 kernel/workqueue.c 			add_timer_on(timer, cpu);
cpu               413 kernel/workqueue.c 	int cpu;
cpu               418 kernel/workqueue.c 	for_each_cpu_mask_nr(cpu, *cpu_map)
cpu               419 kernel/workqueue.c 		flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
cpu               535 kernel/workqueue.c 	int cpu;
cpu               549 kernel/workqueue.c 	for_each_cpu_mask_nr(cpu, *cpu_map)
cpu               550 kernel/workqueue.c 		wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
cpu               634 kernel/workqueue.c 	return queue_work_on(cpu, keventd_wq, work);
cpu               665 kernel/workqueue.c 	return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
cpu               680 kernel/workqueue.c 	int cpu;
cpu               688 kernel/workqueue.c 	for_each_online_cpu(cpu) {
cpu               689 kernel/workqueue.c 		struct work_struct *work = per_cpu_ptr(works, cpu);
cpu               692 kernel/workqueue.c 		schedule_work_on(cpu, work);
cpu               694 kernel/workqueue.c 	for_each_online_cpu(cpu)
cpu               695 kernel/workqueue.c 		flush_work(per_cpu_ptr(works, cpu));
cpu               741 kernel/workqueue.c 	int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
cpu               746 kernel/workqueue.c 	cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
cpu               757 kernel/workqueue.c 	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
cpu               773 kernel/workqueue.c 	p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
cpu               795 kernel/workqueue.c 		if (cpu >= 0)
cpu               796 kernel/workqueue.c 			kthread_bind(p, cpu);
cpu               809 kernel/workqueue.c 	int err = 0, cpu;
cpu               848 kernel/workqueue.c 		for_each_possible_cpu(cpu) {
cpu               849 kernel/workqueue.c 			cwq = init_cpu_workqueue(wq, cpu);
cpu               850 kernel/workqueue.c 			if (err || !cpu_online(cpu))
cpu               852 kernel/workqueue.c 			err = create_workqueue_thread(cwq, cpu);
cpu               853 kernel/workqueue.c 			start_workqueue_thread(cwq, cpu);
cpu               902 kernel/workqueue.c 	int cpu;
cpu               909 kernel/workqueue.c 	for_each_cpu_mask_nr(cpu, *cpu_map)
cpu               910 kernel/workqueue.c 		cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
cpu               922 kernel/workqueue.c 	unsigned int cpu = (unsigned long)hcpu;
cpu               931 kernel/workqueue.c 		cpu_set(cpu, cpu_populated_map);
cpu               935 kernel/workqueue.c 		cwq = per_cpu_ptr(wq->cpu_wq, cpu);
cpu               939 kernel/workqueue.c 			if (!create_workqueue_thread(cwq, cpu))
cpu               942 kernel/workqueue.c 				wq->name, cpu);
cpu               948 kernel/workqueue.c 			start_workqueue_thread(cwq, cpu);
cpu               962 kernel/workqueue.c 		cpu_clear(cpu, cpu_populated_map);
cpu                29 lib/cpumask.c  	int cpu;
cpu                31 lib/cpumask.c  	for_each_cpu_mask(cpu, *mask) {
cpu                32 lib/cpumask.c  		if (cpu_online(cpu))
cpu                35 lib/cpumask.c  	return cpu;
cpu                19 lib/percpu_counter.c 	int cpu;
cpu                22 lib/percpu_counter.c 	for_each_possible_cpu(cpu) {
cpu                23 lib/percpu_counter.c 		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
cpu                35 lib/percpu_counter.c 	int cpu = get_cpu();
cpu                37 lib/percpu_counter.c 	pcount = per_cpu_ptr(fbc->counters, cpu);
cpu                58 lib/percpu_counter.c 	int cpu;
cpu                62 lib/percpu_counter.c 	for_each_online_cpu(cpu) {
cpu                63 lib/percpu_counter.c 		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
cpu               121 lib/percpu_counter.c 	unsigned int cpu;
cpu               127 lib/percpu_counter.c 	cpu = (unsigned long)hcpu;
cpu               134 lib/percpu_counter.c 		pcount = per_cpu_ptr(fbc->counters, cpu);
cpu              1215 lib/radix-tree.c        int cpu = (long)hcpu;
cpu              1220 lib/radix-tree.c                rtp = &per_cpu(radix_tree_preloads, cpu);
cpu                25 mm/allocpercpu.c 	kfree(pdata->ptrs[cpu]);
cpu                26 mm/allocpercpu.c 	pdata->ptrs[cpu] = NULL;
cpu                36 mm/allocpercpu.c 	int cpu;
cpu                37 mm/allocpercpu.c 	for_each_cpu_mask_nr(cpu, *mask)
cpu                38 mm/allocpercpu.c 		percpu_depopulate(__pdata, cpu);
cpu                58 mm/allocpercpu.c 	int node = cpu_to_node(cpu);
cpu                65 mm/allocpercpu.c 	BUG_ON(pdata->ptrs[cpu]);
cpu                67 mm/allocpercpu.c 		pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
cpu                69 mm/allocpercpu.c 		pdata->ptrs[cpu] = kzalloc(size, gfp);
cpu                70 mm/allocpercpu.c 	return pdata->ptrs[cpu];
cpu                86 mm/allocpercpu.c 	int cpu;
cpu                89 mm/allocpercpu.c 	for_each_cpu_mask_nr(cpu, *mask)
cpu                90 mm/allocpercpu.c 		if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
cpu                94 mm/allocpercpu.c 			cpu_set(cpu, populated);
cpu                71 mm/memcontrol.c 	int cpu = smp_processor_id();
cpu                72 mm/memcontrol.c 	stat->cpustat[cpu].count[idx] += val;
cpu                78 mm/memcontrol.c 	int cpu;
cpu                80 mm/memcontrol.c 	for_each_possible_cpu(cpu)
cpu                81 mm/memcontrol.c 		ret += stat->cpustat[cpu].count[idx];
cpu               906 mm/page_alloc.c 		pset = zone_pcp(zone, cpu);
cpu              1045 mm/page_alloc.c 	int cpu;
cpu              1049 mm/page_alloc.c 	cpu  = get_cpu();
cpu              1053 mm/page_alloc.c 		pcp = &zone_pcp(zone, cpu)->pcp;
cpu              1844 mm/page_alloc.c 	int cpu;
cpu              1854 mm/page_alloc.c 		for_each_online_cpu(cpu) {
cpu              1857 mm/page_alloc.c 			pageset = zone_pcp(zone, cpu);
cpu              1860 mm/page_alloc.c 			       cpu, pageset->pcp.high,
cpu              2724 mm/page_alloc.c 	int node = cpu_to_node(cpu);
cpu              2733 mm/page_alloc.c 		zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
cpu              2735 mm/page_alloc.c 		if (!zone_pcp(zone, cpu))
cpu              2738 mm/page_alloc.c 		setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
cpu              2741 mm/page_alloc.c 			setup_pagelist_highmark(zone_pcp(zone, cpu),
cpu              2752 mm/page_alloc.c 		kfree(zone_pcp(dzone, cpu));
cpu              2753 mm/page_alloc.c 		zone_pcp(dzone, cpu) = NULL;
cpu              2763 mm/page_alloc.c 		struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
cpu              2766 mm/page_alloc.c 		if (pset != &boot_pageset[cpu])
cpu              2768 mm/page_alloc.c 		zone_pcp(zone, cpu) = NULL;
cpu              2776 mm/page_alloc.c 	int cpu = (long)hcpu;
cpu              2782 mm/page_alloc.c 		if (process_zones(cpu))
cpu              2789 mm/page_alloc.c 		free_zone_pagesets(cpu);
cpu              2860 mm/page_alloc.c 	int cpu;
cpu              2863 mm/page_alloc.c 	for (cpu = 0; cpu < NR_CPUS; cpu++) {
cpu              2866 mm/page_alloc.c 		zone_pcp(zone, cpu) = &boot_pageset[cpu];
cpu              2867 mm/page_alloc.c 		setup_pageset(&boot_pageset[cpu],0);
cpu              2869 mm/page_alloc.c 		setup_pageset(zone_pcp(zone,cpu), batch);
cpu              4090 mm/page_alloc.c 	int cpu = (unsigned long)hcpu;
cpu              4093 mm/page_alloc.c 		drain_pages(cpu);
cpu              4101 mm/page_alloc.c 		vm_events_fold_cpu(cpu);
cpu              4110 mm/page_alloc.c 		refresh_cpu_vm_stats(cpu);
cpu              4368 mm/page_alloc.c 	unsigned int cpu;
cpu              4375 mm/page_alloc.c 		for_each_online_cpu(cpu) {
cpu              4378 mm/page_alloc.c 			setup_pagelist_highmark(zone_pcp(zone, cpu), high);
cpu                93 mm/quicklist.c 	int cpu;
cpu                96 mm/quicklist.c 	for_each_online_cpu(cpu) {
cpu                97 mm/quicklist.c 		ql = per_cpu(quicklist, cpu);
cpu               904 mm/slab.c      	node = next_node(cpu_to_node(cpu), node_online_map);
cpu               908 mm/slab.c      	per_cpu(reap_node, cpu) = node;
cpu               935 mm/slab.c      	struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
cpu               943 mm/slab.c      		init_reap_node(cpu);
cpu               945 mm/slab.c      		schedule_delayed_work_on(cpu, reap_work,
cpu               946 mm/slab.c      					__round_jiffies_relative(HZ, cpu));
cpu              1161 mm/slab.c      	int node = cpu_to_node(cpu);
cpu              1170 mm/slab.c      		nc = cachep->array[cpu];
cpu              1171 mm/slab.c      		cachep->array[cpu] = NULL;
cpu              1226 mm/slab.c      	int node = cpu_to_node(cpu);
cpu              1295 mm/slab.c      		cachep->array[cpu] = nc;
cpu              1320 mm/slab.c      	cpuup_canceled(cpu);
cpu              1327 mm/slab.c      	long cpu = (long)hcpu;
cpu              1334 mm/slab.c      		err = cpuup_prepare(cpu);
cpu              1339 mm/slab.c      		start_cpu_timer(cpu);
cpu              1350 mm/slab.c      		cancel_rearming_delayed_work(&per_cpu(reap_work, cpu));
cpu              1352 mm/slab.c      		per_cpu(reap_work, cpu).work.func = NULL;
cpu              1356 mm/slab.c      		start_cpu_timer(cpu);
cpu              1373 mm/slab.c      		cpuup_canceled(cpu);
cpu              1646 mm/slab.c      	int cpu;
cpu              1651 mm/slab.c      	for_each_online_cpu(cpu)
cpu              1652 mm/slab.c      		start_cpu_timer(cpu);
cpu               181 mm/slub.c      	int cpu;		/* Was running on cpu */
cpu               232 mm/slub.c      	return s->cpu_slab[cpu];
cpu               381 mm/slub.c      		p->cpu = smp_processor_id();
cpu               403 mm/slub.c      		s, t->addr, jiffies - t->when, t->cpu, t->pid);
cpu              1452 mm/slub.c      	struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
cpu              1967 mm/slub.c      	struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu);
cpu              1970 mm/slub.c      		per_cpu(kmem_cache_cpu_free, cpu) =
cpu              1976 mm/slub.c      			flags, cpu_to_node(cpu));
cpu              1987 mm/slub.c      	if (c < per_cpu(kmem_cache_cpu, cpu) ||
cpu              1988 mm/slub.c      			c > per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) {
cpu              1992 mm/slub.c      	c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu);
cpu              1993 mm/slub.c      	per_cpu(kmem_cache_cpu_free, cpu) = c;
cpu              1998 mm/slub.c      	int cpu;
cpu              2000 mm/slub.c      	for_each_online_cpu(cpu) {
cpu              2001 mm/slub.c      		struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
cpu              2004 mm/slub.c      			s->cpu_slab[cpu] = NULL;
cpu              2005 mm/slub.c      			free_kmem_cache_cpu(c, cpu);
cpu              2012 mm/slub.c      	int cpu;
cpu              2014 mm/slub.c      	for_each_online_cpu(cpu) {
cpu              2015 mm/slub.c      		struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
cpu              2020 mm/slub.c      		c = alloc_kmem_cache_cpu(s, cpu, flags);
cpu              2025 mm/slub.c      		s->cpu_slab[cpu] = c;
cpu              2037 mm/slub.c      	if (cpu_isset(cpu, kmem_cach_cpu_free_init_once))
cpu              2041 mm/slub.c      		free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu);
cpu              2043 mm/slub.c      	cpu_set(cpu, kmem_cach_cpu_free_init_once);
cpu              2048 mm/slub.c      	int cpu;
cpu              2050 mm/slub.c      	for_each_online_cpu(cpu)
cpu              2051 mm/slub.c      		init_alloc_cpu_cpu(cpu);
cpu              3104 mm/slub.c      		int cpu;
cpu              3117 mm/slub.c      		for_each_online_cpu(cpu)
cpu              3118 mm/slub.c      			get_cpu_slab(s, cpu)->objsize = s->objsize;
cpu              3159 mm/slub.c      	long cpu = (long)hcpu;
cpu              3166 mm/slub.c      		init_alloc_cpu_cpu(cpu);
cpu              3169 mm/slub.c      			s->cpu_slab[cpu] = alloc_kmem_cache_cpu(s, cpu,
cpu              3180 mm/slub.c      			struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
cpu              3183 mm/slub.c      			__flush_cpu_slab(s, cpu);
cpu              3185 mm/slub.c      			free_kmem_cache_cpu(c, cpu);
cpu              3186 mm/slub.c      			s->cpu_slab[cpu] = NULL;
cpu              3510 mm/slub.c      				cpu_set(track->cpu, l->cpus);
cpu              3541 mm/slub.c      	cpu_set(track->cpu, l->cpus);
cpu              3674 mm/slub.c      		int cpu;
cpu              3676 mm/slub.c      		for_each_possible_cpu(cpu) {
cpu              3677 mm/slub.c      			struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
cpu              4074 mm/slub.c      	int cpu;
cpu              4081 mm/slub.c      	for_each_online_cpu(cpu) {
cpu              4082 mm/slub.c      		unsigned x = get_cpu_slab(s, cpu)->stat[si];
cpu              4084 mm/slub.c      		data[cpu] = x;
cpu              4091 mm/slub.c      	for_each_online_cpu(cpu) {
cpu              4092 mm/slub.c      		if (data[cpu] && len < PAGE_SIZE - 20)
cpu              4093 mm/slub.c      			len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
cpu               222 mm/swap.c      	pvec = &per_cpu(lru_add_pvecs, cpu);
cpu               226 mm/swap.c      	pvec = &per_cpu(lru_add_active_pvecs, cpu);
cpu               230 mm/swap.c      	pvec = &per_cpu(lru_rotate_pvecs, cpu);
cpu                25 mm/vmstat.c    	int cpu;
cpu                30 mm/vmstat.c    	for_each_cpu_mask_nr(cpu, *cpumask) {
cpu                31 mm/vmstat.c    		struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
cpu                60 mm/vmstat.c    	struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
cpu               135 mm/vmstat.c    	int cpu;
cpu               145 mm/vmstat.c    		for_each_online_cpu(cpu)
cpu               146 mm/vmstat.c    			zone_pcp(zone, cpu)->stat_threshold = threshold;
cpu               310 mm/vmstat.c    		p = zone_pcp(zone, cpu);
cpu               838 mm/vmstat.c    	struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu);
cpu               841 mm/vmstat.c    	schedule_delayed_work_on(cpu, vmstat_work, HZ + cpu);
cpu               852 mm/vmstat.c    	long cpu = (long)hcpu;
cpu               857 mm/vmstat.c    		start_cpu_timer(cpu);
cpu               861 mm/vmstat.c    		cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu));
cpu               862 mm/vmstat.c    		per_cpu(vmstat_work, cpu).work.func = NULL;
cpu               866 mm/vmstat.c    		start_cpu_timer(cpu);
cpu               883 mm/vmstat.c    	int cpu;
cpu               888 mm/vmstat.c    	for_each_online_cpu(cpu)
cpu               889 mm/vmstat.c    		start_cpu_timer(cpu);
cpu                53 net/bridge/netfilter/ebtables.c    COUNTER_OFFSET(n) * cpu))
cpu               936 net/bridge/netfilter/ebtables.c 	int i, cpu;
cpu               944 net/bridge/netfilter/ebtables.c 	for_each_possible_cpu(cpu) {
cpu               945 net/bridge/netfilter/ebtables.c 		if (cpu == 0)
cpu               947 net/bridge/netfilter/ebtables.c 		counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
cpu              1865 net/core/dev.c 		int cpu = smp_processor_id(); /* ok because BHs are off */
cpu              1867 net/core/dev.c 		if (txq->xmit_lock_owner != cpu) {
cpu              1869 net/core/dev.c 			HARD_TX_LOCK(dev, txq, cpu);
cpu              4548 net/core/dev.c 	unsigned int cpu, oldcpu = (unsigned long)ocpu;
cpu              4555 net/core/dev.c 	cpu = smp_processor_id();
cpu              4556 net/core/dev.c 	sd = &per_cpu(softnet_data, cpu);
cpu              4596 net/core/dev.c 	unsigned int cpu, i, n, chan_idx;
cpu              4600 net/core/dev.c 		for_each_online_cpu(cpu)
cpu              4601 net/core/dev.c 			rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
cpu              4606 net/core/dev.c 	cpu = first_cpu(cpu_online_map);
cpu              4616 net/core/dev.c 			per_cpu(softnet_data, cpu).net_dma = chan;
cpu              4617 net/core/dev.c 			cpu = next_cpu(cpu, cpu_online_map);
cpu                44 net/core/flow.c #define flow_table(cpu) (per_cpu(flow_tables, cpu))
cpu                58 net/core/flow.c 	(per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
cpu                60 net/core/flow.c 	(per_cpu(flow_hash_info, cpu).hash_rnd)
cpu                62 net/core/flow.c 	(per_cpu(flow_hash_info, cpu).count)
cpu                74 net/core/flow.c #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
cpu                92 net/core/flow.c 	flow_count(cpu)--;
cpu               103 net/core/flow.c 		flp = &flow_table(cpu)[i];
cpu               110 net/core/flow.c 			flow_entry_kill(cpu, fle);
cpu               119 net/core/flow.c 	__flow_cache_shrink(cpu, shrink_to);
cpu               124 net/core/flow.c 	get_random_bytes(&flow_hash_rnd(cpu), sizeof(u32));
cpu               125 net/core/flow.c 	flow_hash_rnd_recalc(cpu) = 0;
cpu               127 net/core/flow.c 	__flow_cache_shrink(cpu, 0);
cpu               134 net/core/flow.c 	return (jhash2(k, (sizeof(*key) / sizeof(u32)), flow_hash_rnd(cpu)) &
cpu               173 net/core/flow.c 	int cpu;
cpu               176 net/core/flow.c 	cpu = smp_processor_id();
cpu               181 net/core/flow.c 	if (!flow_table(cpu))
cpu               184 net/core/flow.c 	if (flow_hash_rnd_recalc(cpu))
cpu               185 net/core/flow.c 		flow_new_hash_rnd(cpu);
cpu               186 net/core/flow.c 	hash = flow_hash_code(key, cpu);
cpu               188 net/core/flow.c 	head = &flow_table(cpu)[hash];
cpu               207 net/core/flow.c 		if (flow_count(cpu) > flow_hwm)
cpu               208 net/core/flow.c 			flow_cache_shrink(cpu);
cpu               218 net/core/flow.c 			flow_count(cpu)++;
cpu               253 net/core/flow.c 	int cpu;
cpu               255 net/core/flow.c 	cpu = smp_processor_id();
cpu               259 net/core/flow.c 		fle = flow_table(cpu)[i];
cpu               279 net/core/flow.c 	int cpu;
cpu               282 net/core/flow.c 	cpu = smp_processor_id();
cpu               284 net/core/flow.c 	tasklet = flow_flush_tasklet(cpu);
cpu               321 net/core/flow.c 	flow_table(cpu) = (struct flow_cache_entry **)
cpu               323 net/core/flow.c 	if (!flow_table(cpu))
cpu               326 net/core/flow.c 	flow_hash_rnd_recalc(cpu) = 1;
cpu               327 net/core/flow.c 	flow_count(cpu) = 0;
cpu               329 net/core/flow.c 	tasklet = flow_flush_tasklet(cpu);
cpu              1761 net/core/neighbour.c 		int cpu;
cpu              1766 net/core/neighbour.c 		for_each_possible_cpu(cpu) {
cpu              1769 net/core/neighbour.c 			st = per_cpu_ptr(tbl->stats, cpu);
cpu              2421 net/core/neighbour.c 	int cpu;
cpu              2426 net/core/neighbour.c 	for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
cpu              2427 net/core/neighbour.c 		if (!cpu_possible(cpu))
cpu              2429 net/core/neighbour.c 		*pos = cpu+1;
cpu              2430 net/core/neighbour.c 		return per_cpu_ptr(tbl->stats, cpu);
cpu              2439 net/core/neighbour.c 	int cpu;
cpu              2441 net/core/neighbour.c 	for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
cpu              2442 net/core/neighbour.c 		if (!cpu_possible(cpu))
cpu              2444 net/core/neighbour.c 		*pos = cpu+1;
cpu              2445 net/core/neighbour.c 		return per_cpu_ptr(tbl->stats, cpu);
cpu               391 net/core/pktgen.c 	int cpu;
cpu              3501 net/core/pktgen.c 	int cpu = t->cpu;
cpu              3503 net/core/pktgen.c 	BUG_ON(smp_processor_id() != cpu);
cpu              3508 net/core/pktgen.c 	pr_debug("pktgen: starting pktgen/%d:  pid=%d\n", cpu, task_pid_nr(current));
cpu              3703 net/core/pktgen.c 	t->cpu = cpu;
cpu              3710 net/core/pktgen.c 	p = kthread_create(pktgen_thread_worker, t, "kpktgend_%d", cpu);
cpu              3713 net/core/pktgen.c 		       "for cpu %d\n", t->cpu);
cpu              3718 net/core/pktgen.c 	kthread_bind(p, cpu);
cpu              3791 net/core/pktgen.c 	int cpu;
cpu              3812 net/core/pktgen.c 	for_each_online_cpu(cpu) {
cpu              3815 net/core/pktgen.c 		err = pktgen_create_thread(cpu);
cpu              3818 net/core/pktgen.c 			       "thread for cpu %d (%d)\n", cpu, err);
cpu              1947 net/core/sock.c 	int cpu = smp_processor_id();
cpu              1948 net/core/sock.c 	per_cpu_ptr(net->core.inuse, cpu)->val[prot->inuse_idx] += val;
cpu              1954 net/core/sock.c 	int cpu, idx = prot->inuse_idx;
cpu              1957 net/core/sock.c 	for_each_possible_cpu(cpu)
cpu              1958 net/core/sock.c 		res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
cpu              2000 net/core/sock.c 	int cpu, idx = prot->inuse_idx;
cpu              2003 net/core/sock.c 	for_each_possible_cpu(cpu)
cpu              2004 net/core/sock.c 		res += per_cpu(prot_inuse, cpu).val[idx];
cpu               687 net/ipv4/netfilter/arp_tables.c 	unsigned int cpu;
cpu               705 net/ipv4/netfilter/arp_tables.c 	for_each_possible_cpu(cpu) {
cpu               706 net/ipv4/netfilter/arp_tables.c 		if (cpu == curcpu)
cpu               709 net/ipv4/netfilter/arp_tables.c 		ARPT_ENTRY_ITERATE(t->entries[cpu],
cpu               903 net/ipv4/netfilter/ip_tables.c 	unsigned int cpu;
cpu               921 net/ipv4/netfilter/ip_tables.c 	for_each_possible_cpu(cpu) {
cpu               922 net/ipv4/netfilter/ip_tables.c 		if (cpu == curcpu)
cpu               925 net/ipv4/netfilter/ip_tables.c 		IPT_ENTRY_ITERATE(t->entries[cpu],
cpu               289 net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c 	int cpu;
cpu               294 net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c 	for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
cpu               295 net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c 		if (!cpu_possible(cpu))
cpu               297 net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c 		*pos = cpu+1;
cpu               298 net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c 		return per_cpu_ptr(net->ct.stat, cpu);
cpu               307 net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c 	int cpu;
cpu               309 net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c 	for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
cpu               310 net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c 		if (!cpu_possible(cpu))
cpu               312 net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c 		*pos = cpu+1;
cpu               313 net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c 		return per_cpu_ptr(net->ct.stat, cpu);
cpu               425 net/ipv4/route.c 	int cpu;
cpu               430 net/ipv4/route.c 	for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
cpu               431 net/ipv4/route.c 		if (!cpu_possible(cpu))
cpu               433 net/ipv4/route.c 		*pos = cpu+1;
cpu               434 net/ipv4/route.c 		return &per_cpu(rt_cache_stat, cpu);
cpu               441 net/ipv4/route.c 	int cpu;
cpu               443 net/ipv4/route.c 	for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
cpu               444 net/ipv4/route.c 		if (!cpu_possible(cpu))
cpu               446 net/ipv4/route.c 		*pos = cpu+1;
cpu               447 net/ipv4/route.c 		return &per_cpu(rt_cache_stat, cpu);
cpu              2472 net/ipv4/tcp.c 	int cpu;
cpu              2473 net/ipv4/tcp.c 	for_each_possible_cpu(cpu) {
cpu              2474 net/ipv4/tcp.c 		struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
cpu              2503 net/ipv4/tcp.c 	int cpu;
cpu              2510 net/ipv4/tcp.c 	for_each_possible_cpu(cpu) {
cpu              2517 net/ipv4/tcp.c 		*per_cpu_ptr(pool, cpu) = p;
cpu              2582 net/ipv4/tcp.c 	return (p ? *per_cpu_ptr(p, cpu) : NULL);
cpu               928 net/ipv6/netfilter/ip6_tables.c 	unsigned int cpu;
cpu               946 net/ipv6/netfilter/ip6_tables.c 	for_each_possible_cpu(cpu) {
cpu               947 net/ipv6/netfilter/ip6_tables.c 		if (cpu == curcpu)
cpu               950 net/ipv6/netfilter/ip6_tables.c 		IP6T_ENTRY_ITERATE(t->entries[cpu],
cpu               347 net/iucv/iucv.c 	int cpu = smp_processor_id();
cpu               359 net/iucv/iucv.c 	parm = iucv_param[cpu];
cpu               365 net/iucv/iucv.c 	cpu_set(cpu, iucv_irq_cpumask);
cpu               376 net/iucv/iucv.c 	int cpu = smp_processor_id();
cpu               380 net/iucv/iucv.c 	parm = iucv_param[cpu];
cpu               385 net/iucv/iucv.c 	cpu_clear(cpu, iucv_irq_cpumask);
cpu               396 net/iucv/iucv.c 	int cpu = smp_processor_id();
cpu               400 net/iucv/iucv.c 	if (cpu_isset(cpu, iucv_buffer_cpumask))
cpu               404 net/iucv/iucv.c 	parm = iucv_param[cpu];
cpu               406 net/iucv/iucv.c 	parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]);
cpu               428 net/iucv/iucv.c 		       "on cpu %i returned error 0x%02x (%s)\n", cpu, rc, err);
cpu               433 net/iucv/iucv.c 	cpu_set(cpu, iucv_buffer_cpumask);
cpu               451 net/iucv/iucv.c 	int cpu = smp_processor_id();
cpu               454 net/iucv/iucv.c 	if (!cpu_isset(cpu, iucv_buffer_cpumask))
cpu               461 net/iucv/iucv.c 	parm = iucv_param[cpu];
cpu               465 net/iucv/iucv.c 	cpu_clear(cpu, iucv_buffer_cpumask);
cpu               475 net/iucv/iucv.c 	int cpu;
cpu               478 net/iucv/iucv.c 	for_each_online_cpu(cpu)
cpu               480 net/iucv/iucv.c 		if (cpu_isset(cpu, iucv_buffer_cpumask) &&
cpu               481 net/iucv/iucv.c 		    !cpu_isset(cpu, iucv_irq_cpumask))
cpu               482 net/iucv/iucv.c 			smp_call_function_single(cpu, iucv_allow_cpu,
cpu               495 net/iucv/iucv.c 	int cpu;
cpu               500 net/iucv/iucv.c 	for_each_cpu_mask_nr(cpu, cpumask)
cpu               501 net/iucv/iucv.c 		smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
cpu               515 net/iucv/iucv.c 	int cpu, rc;
cpu               525 net/iucv/iucv.c 	for_each_online_cpu(cpu)
cpu               526 net/iucv/iucv.c 		smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
cpu               559 net/iucv/iucv.c 	long cpu = (long) hcpu;
cpu               564 net/iucv/iucv.c 		iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
cpu               565 net/iucv/iucv.c 					GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
cpu               566 net/iucv/iucv.c 		if (!iucv_irq_data[cpu])
cpu               568 net/iucv/iucv.c 		iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
cpu               569 net/iucv/iucv.c 				     GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
cpu               570 net/iucv/iucv.c 		if (!iucv_param[cpu]) {
cpu               571 net/iucv/iucv.c 			kfree(iucv_irq_data[cpu]);
cpu               572 net/iucv/iucv.c 			iucv_irq_data[cpu] = NULL;
cpu               580 net/iucv/iucv.c 		kfree(iucv_param[cpu]);
cpu               581 net/iucv/iucv.c 		iucv_param[cpu] = NULL;
cpu               582 net/iucv/iucv.c 		kfree(iucv_irq_data[cpu]);
cpu               583 net/iucv/iucv.c 		iucv_irq_data[cpu] = NULL;
cpu               589 net/iucv/iucv.c 		smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
cpu               594 net/iucv/iucv.c 		cpu_clear(cpu, cpumask);
cpu               598 net/iucv/iucv.c 		smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1);
cpu              1600 net/iucv/iucv.c 	int cpu;
cpu              1618 net/iucv/iucv.c 	for_each_online_cpu(cpu) {
cpu              1620 net/iucv/iucv.c 		iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
cpu              1621 net/iucv/iucv.c 				     GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
cpu              1622 net/iucv/iucv.c 		if (!iucv_irq_data[cpu]) {
cpu              1628 net/iucv/iucv.c 		iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
cpu              1629 net/iucv/iucv.c 				  GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
cpu              1630 net/iucv/iucv.c 		if (!iucv_param[cpu]) {
cpu              1650 net/iucv/iucv.c 	for_each_possible_cpu(cpu) {
cpu              1651 net/iucv/iucv.c 		kfree(iucv_param[cpu]);
cpu              1652 net/iucv/iucv.c 		iucv_param[cpu] = NULL;
cpu              1653 net/iucv/iucv.c 		kfree(iucv_irq_data[cpu]);
cpu              1654 net/iucv/iucv.c 		iucv_irq_data[cpu] = NULL;
cpu              1671 net/iucv/iucv.c 	int cpu;
cpu              1680 net/iucv/iucv.c 	for_each_possible_cpu(cpu) {
cpu              1681 net/iucv/iucv.c 		kfree(iucv_param[cpu]);
cpu              1682 net/iucv/iucv.c 		iucv_param[cpu] = NULL;
cpu              1683 net/iucv/iucv.c 		kfree(iucv_irq_data[cpu]);
cpu              1684 net/iucv/iucv.c 		iucv_irq_data[cpu] = NULL;
cpu                84 net/netfilter/nf_conntrack_ecache.c 	int cpu;
cpu                86 net/netfilter/nf_conntrack_ecache.c 	for_each_possible_cpu(cpu) {
cpu                87 net/netfilter/nf_conntrack_ecache.c 		ecache = per_cpu_ptr(net->ct.ecache, cpu);
cpu               198 net/netfilter/nf_conntrack_standalone.c 	int cpu;
cpu               203 net/netfilter/nf_conntrack_standalone.c 	for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
cpu               204 net/netfilter/nf_conntrack_standalone.c 		if (!cpu_possible(cpu))
cpu               206 net/netfilter/nf_conntrack_standalone.c 		*pos = cpu + 1;
cpu               207 net/netfilter/nf_conntrack_standalone.c 		return per_cpu_ptr(net->ct.stat, cpu);
cpu               216 net/netfilter/nf_conntrack_standalone.c 	int cpu;
cpu               218 net/netfilter/nf_conntrack_standalone.c 	for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
cpu               219 net/netfilter/nf_conntrack_standalone.c 		if (!cpu_possible(cpu))
cpu               221 net/netfilter/nf_conntrack_standalone.c 		*pos = cpu + 1;
cpu               222 net/netfilter/nf_conntrack_standalone.c 		return per_cpu_ptr(net->ct.stat, cpu);
cpu               575 net/netfilter/x_tables.c 	int cpu;
cpu               587 net/netfilter/x_tables.c 	for_each_possible_cpu(cpu) {
cpu               589 net/netfilter/x_tables.c 			newinfo->entries[cpu] = kmalloc_node(size,
cpu               591 net/netfilter/x_tables.c 							cpu_to_node(cpu));
cpu               593 net/netfilter/x_tables.c 			newinfo->entries[cpu] = vmalloc_node(size,
cpu               594 net/netfilter/x_tables.c 							cpu_to_node(cpu));
cpu               596 net/netfilter/x_tables.c 		if (newinfo->entries[cpu] == NULL) {
cpu               608 net/netfilter/x_tables.c 	int cpu;
cpu               610 net/netfilter/x_tables.c 	for_each_possible_cpu(cpu) {
cpu               612 net/netfilter/x_tables.c 			kfree(info->entries[cpu]);
cpu               614 net/netfilter/x_tables.c 			vfree(info->entries[cpu]);
cpu              2306 net/socket.c   	int cpu;
cpu              2309 net/socket.c   	for_each_possible_cpu(cpu)
cpu              2310 net/socket.c   	    counter += per_cpu(sockets_in_use, cpu);
cpu               181 net/sunrpc/svc.c 	unsigned int cpu;
cpu               188 net/sunrpc/svc.c 	for_each_online_cpu(cpu) {
cpu               190 net/sunrpc/svc.c 		m->to_pool[cpu] = pidx;
cpu               191 net/sunrpc/svc.c 		m->pool_to[pidx] = cpu;
cpu               346 net/sunrpc/svc.c 			pidx = m->to_pool[cpu];
cpu               349 net/sunrpc/svc.c 			pidx = m->to_pool[cpu_to_node(cpu)];
cpu               300 net/sunrpc/svc_xprt.c 	int cpu;
cpu               306 net/sunrpc/svc_xprt.c 	cpu = get_cpu();
cpu               307 net/sunrpc/svc_xprt.c 	pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
cpu                48 net/xfrm/xfrm_ipcomp.c 	const int cpu = get_cpu();
cpu                49 net/xfrm/xfrm_ipcomp.c 	u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
cpu                50 net/xfrm/xfrm_ipcomp.c 	struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
cpu               142 net/xfrm/xfrm_ipcomp.c 	const int cpu = get_cpu();
cpu               143 net/xfrm/xfrm_ipcomp.c 	u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
cpu               144 net/xfrm/xfrm_ipcomp.c 	struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
cpu               246 net/xfrm/xfrm_ipcomp.c 	int cpu;
cpu               264 net/xfrm/xfrm_ipcomp.c 	for_each_possible_cpu(cpu) {
cpu               265 net/xfrm/xfrm_ipcomp.c 		struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu);
cpu               275 net/xfrm/xfrm_ipcomp.c 	int cpu;
cpu               278 net/xfrm/xfrm_ipcomp.c 	cpu = raw_smp_processor_id();
cpu               284 net/xfrm/xfrm_ipcomp.c 		tfm = *per_cpu_ptr(tfms, cpu);
cpu               304 net/xfrm/xfrm_ipcomp.c 	for_each_possible_cpu(cpu) {
cpu               309 net/xfrm/xfrm_ipcomp.c 		*per_cpu_ptr(tfms, cpu) = tfm;
cpu              1207 security/selinux/selinuxfs.c 	int cpu;
cpu              1209 security/selinux/selinuxfs.c 	for (cpu = *idx; cpu < NR_CPUS; ++cpu) {
cpu              1210 security/selinux/selinuxfs.c 		if (!cpu_possible(cpu))
cpu              1212 security/selinux/selinuxfs.c 		*idx = cpu + 1;
cpu              1213 security/selinux/selinuxfs.c 		return &per_cpu(avc_cache_stats, cpu);
cpu                84 virt/kvm/kvm_main.c 	int cpu;
cpu                87 virt/kvm/kvm_main.c 	cpu = get_cpu();
cpu                89 virt/kvm/kvm_main.c 	kvm_arch_vcpu_load(vcpu, cpu);
cpu               108 virt/kvm/kvm_main.c 	int i, cpu, me;
cpu               120 virt/kvm/kvm_main.c 		cpu = vcpu->cpu;
cpu               121 virt/kvm/kvm_main.c 		if (cpu != -1 && cpu != me)
cpu               122 virt/kvm/kvm_main.c 			cpu_set(cpu, cpus);
cpu               134 virt/kvm/kvm_main.c 	int i, cpu, me;
cpu               146 virt/kvm/kvm_main.c 		cpu = vcpu->cpu;
cpu               147 virt/kvm/kvm_main.c 		if (cpu != -1 && cpu != me)
cpu               148 virt/kvm/kvm_main.c 			cpu_set(cpu, cpus);
cpu               164 virt/kvm/kvm_main.c 	vcpu->cpu = -1;
cpu              1476 virt/kvm/kvm_main.c 	int cpu = raw_smp_processor_id();
cpu              1478 virt/kvm/kvm_main.c 	if (cpu_isset(cpu, cpus_hardware_enabled))
cpu              1480 virt/kvm/kvm_main.c 	cpu_set(cpu, cpus_hardware_enabled);
cpu              1486 virt/kvm/kvm_main.c 	int cpu = raw_smp_processor_id();
cpu              1488 virt/kvm/kvm_main.c 	if (!cpu_isset(cpu, cpus_hardware_enabled))
cpu              1490 virt/kvm/kvm_main.c 	cpu_clear(cpu, cpus_hardware_enabled);
cpu              1497 virt/kvm/kvm_main.c 	int cpu = (long)v;
cpu              1503 virt/kvm/kvm_main.c 		       cpu);
cpu              1508 virt/kvm/kvm_main.c 		       cpu);
cpu              1509 virt/kvm/kvm_main.c 		smp_call_function_single(cpu, hardware_disable, NULL, 1);
cpu              1513 virt/kvm/kvm_main.c 		       cpu);
cpu              1514 virt/kvm/kvm_main.c 		smp_call_function_single(cpu, hardware_enable, NULL, 1);
cpu              1692 virt/kvm/kvm_main.c 	kvm_arch_vcpu_load(vcpu, cpu);
cpu              1707 virt/kvm/kvm_main.c 	int cpu;
cpu              1728 virt/kvm/kvm_main.c 	for_each_online_cpu(cpu) {
cpu              1729 virt/kvm/kvm_main.c 		smp_call_function_single(cpu,