per_cpu 599 arch/x86/kernel/acpi/boot.c per_cpu(x86_cpu_to_apicid, cpu) = -1; per_cpu 666 arch/x86/kernel/apic_32.c struct clock_event_device *evt = &per_cpu(lapic_events, cpu); per_cpu 693 arch/x86/kernel/apic_32.c per_cpu(irq_stat, cpu).apic_timer_irqs++; per_cpu 1559 arch/x86/kernel/apic_32.c per_cpu(x86_cpu_to_apicid, cpu) = apicid; per_cpu 1560 arch/x86/kernel/apic_32.c per_cpu(x86_bios_cpu_apicid, cpu) = apicid; per_cpu 549 arch/x86/kernel/apic_64.c struct clock_event_device *evt = &per_cpu(lapic_events, cpu); per_cpu 576 arch/x86/kernel/apic_64.c per_cpu(irq_stat, cpu).apic_timer_irqs++; per_cpu 1497 arch/x86/kernel/apic_64.c per_cpu(x86_cpu_to_apicid, cpu) = apicid; per_cpu 1498 arch/x86/kernel/apic_64.c per_cpu(x86_bios_cpu_apicid, cpu) = apicid; per_cpu 1709 arch/x86/kernel/apic_64.c id = per_cpu(x86_bios_cpu_apicid, i); per_cpu 957 arch/x86/kernel/cpu/common.c struct tss_struct *t = &per_cpu(init_tss, cpu); per_cpu 958 arch/x86/kernel/cpu/common.c struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu); per_cpu 1078 arch/x86/kernel/cpu/common.c struct tss_struct *t = &per_cpu(init_tss, cpu); per_cpu 222 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c switch (per_cpu(drv_data, first_cpu(*mask))->cpu_feature) { per_cpu 229 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c perf = per_cpu(drv_data, first_cpu(*mask))->acpi_data; per_cpu 330 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c retval = per_cpu(drv_data, policy->cpu)->max_freq * perf_percent / 100; per_cpu 341 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu); per_cpu 385 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); per_cpu 481 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); per_cpu 585 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c per_cpu(drv_data, cpu) = data; per_cpu 611 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c policy->cpus = per_cpu(cpu_core_map, cpu); per_cpu 730 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c per_cpu(drv_data, cpu) = NULL; per_cpu 737 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); per_cpu 743 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c per_cpu(drv_data, policy->cpu) = NULL; per_cpu 754 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); per_cpu 203 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c policy->cpus = per_cpu(cpu_sibling_map, policy->cpu); per_cpu 642 arch/x86/kernel/cpu/cpufreq/powernow-k8.c if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu) per_cpu 796 arch/x86/kernel/cpu/cpufreq/powernow-k8.c if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu) per_cpu 1016 arch/x86/kernel/cpu/cpufreq/powernow-k8.c struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); per_cpu 1092 arch/x86/kernel/cpu/cpufreq/powernow-k8.c struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); per_cpu 1180 arch/x86/kernel/cpu/cpufreq/powernow-k8.c pol->cpus = per_cpu(cpu_core_map, pol->cpu); per_cpu 1211 arch/x86/kernel/cpu/cpufreq/powernow-k8.c per_cpu(powernow_data, pol->cpu) = data; per_cpu 1225 arch/x86/kernel/cpu/cpufreq/powernow-k8.c struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); per_cpu 1247 arch/x86/kernel/cpu/cpufreq/powernow-k8.c first = first_cpu(per_cpu(cpu_core_map, cpu)); per_cpu 1248 arch/x86/kernel/cpu/cpufreq/powernow-k8.c data = per_cpu(powernow_data, first); per_cpu 260 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c per_cpu(centrino_model, policy->cpu) = model; per_cpu 295 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) || per_cpu 296 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) || per_cpu 297 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) { per_cpu 302 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c if ((!per_cpu(centrino_model, cpu)) || per_cpu 303 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c (!per_cpu(centrino_model, cpu)->op_points)) per_cpu 308 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c per_cpu(centrino_model, cpu)->op_points[i].frequency per_cpu 311 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c if (msr == per_cpu(centrino_model, cpu)->op_points[i].index) per_cpu 312 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c return per_cpu(centrino_model, cpu)-> per_cpu 316 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c return per_cpu(centrino_model, cpu)->op_points[i-1].frequency; per_cpu 376 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i]; per_cpu 378 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c if (!per_cpu(centrino_cpu, policy->cpu)) { per_cpu 415 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c per_cpu(centrino_model, policy->cpu)->op_points); per_cpu 420 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c per_cpu(centrino_model, policy->cpu)->op_points, policy->cpu); per_cpu 429 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c if (!per_cpu(centrino_model, cpu)) per_cpu 434 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c per_cpu(centrino_model, cpu) = NULL; per_cpu 449 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c per_cpu(centrino_model, policy->cpu)->op_points); per_cpu 486 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c if (unlikely(per_cpu(centrino_model, cpu) == NULL)) { per_cpu 492 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c per_cpu(centrino_model, cpu)->op_points, per_cpu 534 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c msr = per_cpu(centrino_model, cpu)->op_points[newstate].index; per_cpu 325 arch/x86/kernel/cpu/cpufreq/speedstep-ich.c policy->cpus = per_cpu(cpu_sibling_map, policy->cpu); per_cpu 446 arch/x86/kernel/cpu/intel_cacheinfo.c per_cpu(cpu_llc_id, cpu) = l2_id; per_cpu 453 arch/x86/kernel/cpu/intel_cacheinfo.c per_cpu(cpu_llc_id, cpu) = l3_id; per_cpu 480 arch/x86/kernel/cpu/intel_cacheinfo.c #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) per_cpu 502 arch/x86/kernel/cpu/intel_cacheinfo.c if (i != cpu && per_cpu(cpuid4_info, i)) { per_cpu 533 arch/x86/kernel/cpu/intel_cacheinfo.c kfree(per_cpu(cpuid4_info, cpu)); per_cpu 534 arch/x86/kernel/cpu/intel_cacheinfo.c per_cpu(cpuid4_info, cpu) = NULL; per_cpu 547 arch/x86/kernel/cpu/intel_cacheinfo.c per_cpu(cpuid4_info, cpu) = kzalloc( per_cpu 549 arch/x86/kernel/cpu/intel_cacheinfo.c if (per_cpu(cpuid4_info, cpu) == NULL) per_cpu 574 arch/x86/kernel/cpu/intel_cacheinfo.c kfree(per_cpu(cpuid4_info, cpu)); per_cpu 575 arch/x86/kernel/cpu/intel_cacheinfo.c per_cpu(cpuid4_info, cpu) = NULL; per_cpu 599 arch/x86/kernel/cpu/intel_cacheinfo.c #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y])) per_cpu 836 arch/x86/kernel/cpu/intel_cacheinfo.c kfree(per_cpu(cache_kobject, cpu)); per_cpu 837 arch/x86/kernel/cpu/intel_cacheinfo.c kfree(per_cpu(index_kobject, cpu)); per_cpu 838 arch/x86/kernel/cpu/intel_cacheinfo.c per_cpu(cache_kobject, cpu) = NULL; per_cpu 839 arch/x86/kernel/cpu/intel_cacheinfo.c per_cpu(index_kobject, cpu) = NULL; per_cpu 855 arch/x86/kernel/cpu/intel_cacheinfo.c per_cpu(cache_kobject, cpu) = per_cpu 857 arch/x86/kernel/cpu/intel_cacheinfo.c if (unlikely(per_cpu(cache_kobject, cpu) == NULL)) per_cpu 860 arch/x86/kernel/cpu/intel_cacheinfo.c per_cpu(index_kobject, cpu) = kzalloc( per_cpu 862 arch/x86/kernel/cpu/intel_cacheinfo.c if (unlikely(per_cpu(index_kobject, cpu) == NULL)) per_cpu 886 arch/x86/kernel/cpu/intel_cacheinfo.c retval = kobject_init_and_add(per_cpu(cache_kobject, cpu), per_cpu 900 arch/x86/kernel/cpu/intel_cacheinfo.c per_cpu(cache_kobject, cpu), per_cpu 906 arch/x86/kernel/cpu/intel_cacheinfo.c kobject_put(per_cpu(cache_kobject, cpu)); per_cpu 914 arch/x86/kernel/cpu/intel_cacheinfo.c kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD); per_cpu 923 arch/x86/kernel/cpu/intel_cacheinfo.c if (per_cpu(cpuid4_info, cpu) == NULL) per_cpu 931 arch/x86/kernel/cpu/intel_cacheinfo.c kobject_put(per_cpu(cache_kobject, cpu)); per_cpu 836 arch/x86/kernel/cpu/mcheck/mce_64.c memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject)); per_cpu 837 arch/x86/kernel/cpu/mcheck/mce_64.c per_cpu(device_mce,cpu).id = cpu; per_cpu 838 arch/x86/kernel/cpu/mcheck/mce_64.c per_cpu(device_mce,cpu).cls = &mce_sysclass; per_cpu 840 arch/x86/kernel/cpu/mcheck/mce_64.c err = sysdev_register(&per_cpu(device_mce,cpu)); per_cpu 845 arch/x86/kernel/cpu/mcheck/mce_64.c err = sysdev_create_file(&per_cpu(device_mce,cpu), per_cpu 855 arch/x86/kernel/cpu/mcheck/mce_64.c sysdev_remove_file(&per_cpu(device_mce,cpu), per_cpu 858 arch/x86/kernel/cpu/mcheck/mce_64.c sysdev_unregister(&per_cpu(device_mce,cpu)); per_cpu 871 arch/x86/kernel/cpu/mcheck/mce_64.c sysdev_remove_file(&per_cpu(device_mce,cpu), per_cpu 873 arch/x86/kernel/cpu/mcheck/mce_64.c sysdev_unregister(&per_cpu(device_mce,cpu)); per_cpu 152 arch/x86/kernel/cpu/mcheck/mce_amd_64.c per_cpu(bank_map, cpu) |= (1 << bank); per_cpu 195 arch/x86/kernel/cpu/mcheck/mce_amd_64.c if (!(per_cpu(bank_map, m.cpu) & (1 << bank))) per_cpu 429 arch/x86/kernel/cpu/mcheck/mce_amd_64.c if (per_cpu(threshold_banks, cpu)[bank]->blocks) per_cpu 431 arch/x86/kernel/cpu/mcheck/mce_amd_64.c &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj); per_cpu 433 arch/x86/kernel/cpu/mcheck/mce_amd_64.c per_cpu(threshold_banks, cpu)[bank]->blocks = b; per_cpu 436 arch/x86/kernel/cpu/mcheck/mce_amd_64.c per_cpu(threshold_banks, cpu)[bank]->kobj, per_cpu 478 arch/x86/kernel/cpu/mcheck/mce_amd_64.c i = first_cpu(per_cpu(cpu_core_map, cpu)); per_cpu 485 arch/x86/kernel/cpu/mcheck/mce_amd_64.c if (per_cpu(threshold_banks, cpu)[bank]) per_cpu 488 arch/x86/kernel/cpu/mcheck/mce_amd_64.c b = per_cpu(threshold_banks, i)[bank]; per_cpu 493 arch/x86/kernel/cpu/mcheck/mce_amd_64.c err = sysfs_create_link(&per_cpu(device_mce, cpu).kobj, per_cpu 498 arch/x86/kernel/cpu/mcheck/mce_amd_64.c b->cpus = per_cpu(cpu_core_map, cpu); per_cpu 499 arch/x86/kernel/cpu/mcheck/mce_amd_64.c per_cpu(threshold_banks, cpu)[bank] = b; per_cpu 510 arch/x86/kernel/cpu/mcheck/mce_amd_64.c b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj); per_cpu 517 arch/x86/kernel/cpu/mcheck/mce_amd_64.c b->cpus = per_cpu(cpu_core_map, cpu); per_cpu 520 arch/x86/kernel/cpu/mcheck/mce_amd_64.c per_cpu(threshold_banks, cpu)[bank] = b; per_cpu 534 arch/x86/kernel/cpu/mcheck/mce_amd_64.c err = sysfs_create_link(&per_cpu(device_mce, i).kobj, per_cpu 539 arch/x86/kernel/cpu/mcheck/mce_amd_64.c per_cpu(threshold_banks, i)[bank] = b; per_cpu 545 arch/x86/kernel/cpu/mcheck/mce_amd_64.c per_cpu(threshold_banks, cpu)[bank] = NULL; per_cpu 558 arch/x86/kernel/cpu/mcheck/mce_amd_64.c if (!(per_cpu(bank_map, cpu) & (1 << bank))) per_cpu 579 arch/x86/kernel/cpu/mcheck/mce_amd_64.c struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank]; per_cpu 590 arch/x86/kernel/cpu/mcheck/mce_amd_64.c kfree(per_cpu(threshold_banks, cpu)[bank]->blocks); per_cpu 591 arch/x86/kernel/cpu/mcheck/mce_amd_64.c per_cpu(threshold_banks, cpu)[bank]->blocks = NULL; per_cpu 600 arch/x86/kernel/cpu/mcheck/mce_amd_64.c b = per_cpu(threshold_banks, cpu)[bank]; per_cpu 613 arch/x86/kernel/cpu/mcheck/mce_amd_64.c sysfs_remove_link(&per_cpu(device_mce, cpu).kobj, name); per_cpu 614 arch/x86/kernel/cpu/mcheck/mce_amd_64.c per_cpu(threshold_banks, cpu)[bank] = NULL; per_cpu 624 arch/x86/kernel/cpu/mcheck/mce_amd_64.c sysfs_remove_link(&per_cpu(device_mce, i).kobj, name); per_cpu 625 arch/x86/kernel/cpu/mcheck/mce_amd_64.c per_cpu(threshold_banks, i)[bank] = NULL; per_cpu 634 arch/x86/kernel/cpu/mcheck/mce_amd_64.c per_cpu(threshold_banks, cpu)[bank] = NULL; per_cpu 642 arch/x86/kernel/cpu/mcheck/mce_amd_64.c if (!(per_cpu(bank_map, cpu) & (1 << bank))) per_cpu 47 arch/x86/kernel/cpu/mcheck/therm_throt.c per_cpu(thermal_throttle_##name, cpu)); \ per_cpu 18 arch/x86/kernel/cpu/proc.c cpus_weight(per_cpu(cpu_core_map, cpu))); per_cpu 60 arch/x86/kernel/cpu/proc.c cpus_weight(per_cpu(cpu_core_map, cpu))); per_cpu 203 arch/x86/kernel/ds.c #define this_system_context per_cpu(system_context, smp_processor_id()) per_cpu 54 arch/x86/kernel/dumpstack_64.c unsigned long end = per_cpu(orig_ist, cpu).ist[k]; per_cpu 227 arch/x86/kernel/genapic_flat_64.c return per_cpu(x86_cpu_to_apicid, cpu); per_cpu 66 arch/x86/kernel/genx2apic_cluster.c __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_logical_apicid, query_cpu), per_cpu 102 arch/x86/kernel/genx2apic_cluster.c return per_cpu(x86_cpu_to_logical_apicid, cpu); per_cpu 137 arch/x86/kernel/genx2apic_cluster.c per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR); per_cpu 64 arch/x86/kernel/genx2apic_phys.c __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu), per_cpu 100 arch/x86/kernel/genx2apic_phys.c return per_cpu(x86_cpu_to_apicid, cpu); per_cpu 117 arch/x86/kernel/genx2apic_uv_x.c apicid = per_cpu(x86_cpu_to_apicid, cpu); per_cpu 170 arch/x86/kernel/genx2apic_uv_x.c return per_cpu(x86_cpu_to_apicid, cpu); per_cpu 419 arch/x86/kernel/genx2apic_uv_x.c pnode = uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid, cpu)); per_cpu 443 arch/x86/kernel/genx2apic_uv_x.c cpu, per_cpu(x86_cpu_to_apicid, cpu), pnode, nid, per_cpu 398 arch/x86/kernel/io_apic_32.c (idle_cpu(cpu) && ((now) - per_cpu(irq_stat, (cpu)).idle_timestamp > 1)) per_cpu 402 arch/x86/kernel/io_apic_32.c #define CPU_TO_PACKAGEINDEX(i) (first_cpu(per_cpu(cpu_sibling_map, i))) per_cpu 623 arch/x86/kernel/io_apic_32.c for_each_cpu_mask(j, per_cpu(cpu_sibling_map, min_loaded)) { per_cpu 843 arch/x86/kernel/io_apic_64.c if (per_cpu(vector_irq, new_cpu)[vector] != -1) per_cpu 853 arch/x86/kernel/io_apic_64.c per_cpu(vector_irq, new_cpu)[vector] = irq; per_cpu 885 arch/x86/kernel/io_apic_64.c per_cpu(vector_irq, cpu)[vector] = -1; per_cpu 902 arch/x86/kernel/io_apic_64.c per_cpu(vector_irq, cpu)[vector] = irq; per_cpu 906 arch/x86/kernel/io_apic_64.c irq = per_cpu(vector_irq, cpu)[vector]; per_cpu 910 arch/x86/kernel/io_apic_64.c per_cpu(vector_irq, cpu)[vector] = -1; per_cpu 70 arch/x86/kernel/ioport.c tss = &per_cpu(init_tss, get_cpu()); per_cpu 157 arch/x86/kernel/ipi.c if (per_cpu(x86_cpu_to_apicid, i) == apic_id) per_cpu 315 arch/x86/kernel/irq_32.c per_cpu(irq_stat,j).apic_timer_irqs); per_cpu 322 arch/x86/kernel/irq_32.c per_cpu(irq_stat,j).irq_resched_count); per_cpu 327 arch/x86/kernel/irq_32.c per_cpu(irq_stat,j).irq_call_count); per_cpu 332 arch/x86/kernel/irq_32.c per_cpu(irq_stat,j).irq_tlb_count); per_cpu 339 arch/x86/kernel/irq_32.c per_cpu(irq_stat,j).irq_thermal_count); per_cpu 346 arch/x86/kernel/irq_32.c per_cpu(irq_stat,j).irq_spurious_count); per_cpu 365 arch/x86/kernel/irq_32.c sum += per_cpu(irq_stat, cpu).apic_timer_irqs; per_cpu 368 arch/x86/kernel/irq_32.c sum += per_cpu(irq_stat, cpu).irq_resched_count; per_cpu 369 arch/x86/kernel/irq_32.c sum += per_cpu(irq_stat, cpu).irq_call_count; per_cpu 370 arch/x86/kernel/irq_32.c sum += per_cpu(irq_stat, cpu).irq_tlb_count; per_cpu 373 arch/x86/kernel/irq_32.c sum += per_cpu(irq_stat, cpu).irq_thermal_count; per_cpu 376 arch/x86/kernel/irq_32.c sum += per_cpu(irq_stat, cpu).irq_spurious_count; per_cpu 43 arch/x86/kernel/kvm.c return &per_cpu(para_state, raw_smp_processor_id()); per_cpu 95 arch/x86/kernel/kvmclock.c low = (int)__pa(&per_cpu(hv_clock, cpu)) | 1; per_cpu 96 arch/x86/kernel/kvmclock.c high = ((u64)__pa(&per_cpu(hv_clock, cpu)) >> 32); per_cpu 90 arch/x86/kernel/nmi.c return per_cpu(irq_stat, cpu).apic_timer_irqs + per_cpu 91 arch/x86/kernel/nmi.c per_cpu(irq_stat, cpu).irq0_irqs; per_cpu 130 arch/x86/kernel/nmi.c per_cpu(wd_enabled, cpu) = 0; per_cpu 159 arch/x86/kernel/nmi.c if (!per_cpu(wd_enabled, cpu)) per_cpu 376 arch/x86/kernel/nmi.c if (per_cpu(nmi_touch, cpu) != 1) per_cpu 377 arch/x86/kernel/nmi.c per_cpu(nmi_touch, cpu) = 1; per_cpu 238 arch/x86/kernel/process_32.c struct tss_struct *tss = &per_cpu(init_tss, cpu); per_cpu 556 arch/x86/kernel/process_32.c struct tss_struct *tss = &per_cpu(init_tss, cpu); per_cpu 219 arch/x86/kernel/process_64.c struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); per_cpu 554 arch/x86/kernel/process_64.c struct tss_struct *tss = &per_cpu(init_tss, cpu); per_cpu 65 arch/x86/kernel/setup_percpu.c per_cpu(x86_cpu_to_apicid, cpu) = per_cpu 67 arch/x86/kernel/setup_percpu.c per_cpu(x86_bios_cpu_apicid, cpu) = per_cpu 70 arch/x86/kernel/setup_percpu.c per_cpu(x86_cpu_to_node_map, cpu) = per_cpu 234 arch/x86/kernel/setup_percpu.c per_cpu(x86_cpu_to_node_map, cpu) = node; per_cpu 303 arch/x86/kernel/setup_percpu.c return per_cpu(x86_cpu_to_node_map, cpu); per_cpu 322 arch/x86/kernel/setup_percpu.c return per_cpu(x86_cpu_to_node_map, cpu); per_cpu 89 arch/x86/kernel/smpboot.c #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) per_cpu 90 arch/x86/kernel/smpboot.c #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) per_cpu 343 arch/x86/kernel/smpboot.c per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; per_cpu 454 arch/x86/kernel/smpboot.c cpu_set(i, per_cpu(cpu_sibling_map, cpu)); per_cpu 455 arch/x86/kernel/smpboot.c cpu_set(cpu, per_cpu(cpu_sibling_map, i)); per_cpu 456 arch/x86/kernel/smpboot.c cpu_set(i, per_cpu(cpu_core_map, cpu)); per_cpu 457 arch/x86/kernel/smpboot.c cpu_set(cpu, per_cpu(cpu_core_map, i)); per_cpu 463 arch/x86/kernel/smpboot.c cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); per_cpu 469 arch/x86/kernel/smpboot.c per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); per_cpu 475 arch/x86/kernel/smpboot.c if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && per_cpu 476 arch/x86/kernel/smpboot.c per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { per_cpu 481 arch/x86/kernel/smpboot.c cpu_set(i, per_cpu(cpu_core_map, cpu)); per_cpu 482 arch/x86/kernel/smpboot.c cpu_set(cpu, per_cpu(cpu_core_map, i)); per_cpu 486 arch/x86/kernel/smpboot.c if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) { per_cpu 491 arch/x86/kernel/smpboot.c if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) per_cpu 514 arch/x86/kernel/smpboot.c return per_cpu(cpu_core_map, cpu); per_cpu 861 arch/x86/kernel/smpboot.c per_cpu(current_task, cpu) = c_idle.idle; per_cpu 951 arch/x86/kernel/smpboot.c per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID; per_cpu 995 arch/x86/kernel/smpboot.c per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; per_cpu 1048 arch/x86/kernel/smpboot.c cpu_set(0, per_cpu(cpu_sibling_map, 0)); per_cpu 1049 arch/x86/kernel/smpboot.c cpu_set(0, per_cpu(cpu_core_map, 0)); per_cpu 1249 arch/x86/kernel/smpboot.c per_cpu(cpu_state, me) = CPU_ONLINE; per_cpu 1309 arch/x86/kernel/smpboot.c for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) { per_cpu 1310 arch/x86/kernel/smpboot.c cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); per_cpu 1314 arch/x86/kernel/smpboot.c if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) per_cpu 1318 arch/x86/kernel/smpboot.c for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu)) per_cpu 1319 arch/x86/kernel/smpboot.c cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); per_cpu 1320 arch/x86/kernel/smpboot.c cpus_clear(per_cpu(cpu_sibling_map, cpu)); per_cpu 1321 arch/x86/kernel/smpboot.c cpus_clear(per_cpu(cpu_core_map, cpu)); per_cpu 1389 arch/x86/kernel/smpboot.c if (per_cpu(cpu_state, cpu) == CPU_DEAD) { per_cpu 27 arch/x86/kernel/smpcommon.c per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu]; per_cpu 28 arch/x86/kernel/smpcommon.c per_cpu(cpu_number, cpu) = cpu; per_cpu 78 arch/x86/kernel/time_32.c per_cpu(irq_stat, smp_processor_id()).irq0_irqs++; per_cpu 37 arch/x86/kernel/tlb_32.c if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) per_cpu 39 arch/x86/kernel/tlb_32.c cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask); per_cpu 107 arch/x86/kernel/tlb_32.c if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) { per_cpu 108 arch/x86/kernel/tlb_32.c if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) { per_cpu 235 arch/x86/kernel/tlb_32.c if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY) per_cpu 248 arch/x86/kernel/tlb_32.c per_cpu(cpu_tlbstate, cpu).state = 0; per_cpu 249 arch/x86/kernel/tlb_32.c per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm; per_cpu 132 arch/x86/kernel/tlb_64.c f = &per_cpu(flush_state, sender); per_cpu 172 arch/x86/kernel/tlb_64.c f = &per_cpu(flush_state, sender); per_cpu 204 arch/x86/kernel/tlb_64.c spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock); per_cpu 426 arch/x86/kernel/tlb_uv.c apicid = per_cpu(x86_cpu_to_apicid, cur_cpu); per_cpu 469 arch/x86/kernel/tlb_uv.c stat = &per_cpu(ptcstats, cpu); per_cpu 630 arch/x86/kernel/tlb_uv.c bcp = (struct bau_control *)&per_cpu(bau_control, i); per_cpu 739 arch/x86/kernel/tlb_uv.c apicid = per_cpu(x86_cpu_to_apicid, cur_cpu); per_cpu 49 arch/x86/kernel/topology.c per_cpu(cpu_devices, num).cpu.hotpluggable = 1; per_cpu 50 arch/x86/kernel/topology.c return register_cpu(&per_cpu(cpu_devices, num).cpu, num); per_cpu 56 arch/x86/kernel/topology.c unregister_cpu(&per_cpu(cpu_devices, num).cpu); per_cpu 62 arch/x86/kernel/topology.c return register_cpu(&per_cpu(cpu_devices, num).cpu, num); per_cpu 135 arch/x86/kernel/traps.c tss = &per_cpu(init_tss, cpu); per_cpu 566 arch/x86/kernel/tsc.c scale = &per_cpu(cyc2ns, cpu); per_cpu 151 arch/x86/kernel/vm86_32.c tss = &per_cpu(init_tss, get_cpu()); per_cpu 328 arch/x86/kernel/vm86_32.c tss = &per_cpu(init_tss, get_cpu()); per_cpu 296 arch/x86/kvm/svm.c svm_data = per_cpu(svm_data, me); per_cpu 322 arch/x86/kvm/svm.c = per_cpu(svm_data, raw_smp_processor_id()); per_cpu 327 arch/x86/kvm/svm.c per_cpu(svm_data, raw_smp_processor_id()) = NULL; per_cpu 346 arch/x86/kvm/svm.c per_cpu(svm_data, cpu) = svm_data; per_cpu 1517 arch/x86/kvm/svm.c struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); per_cpu 1526 arch/x86/kvm/svm.c struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); per_cpu 339 arch/x86/kvm/vmx.c if (per_cpu(current_vmcs, cpu) == vmx->vmcs) per_cpu 340 arch/x86/kvm/vmx.c per_cpu(current_vmcs, cpu) = NULL; per_cpu 632 arch/x86/kvm/vmx.c &per_cpu(vcpus_on_cpu, cpu)); per_cpu 636 arch/x86/kvm/vmx.c if (per_cpu(current_vmcs, cpu) != vmx->vmcs) { per_cpu 639 arch/x86/kvm/vmx.c per_cpu(current_vmcs, cpu) = vmx->vmcs; per_cpu 1043 arch/x86/kvm/vmx.c u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); per_cpu 1046 arch/x86/kvm/vmx.c INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu)); per_cpu 1067 arch/x86/kvm/vmx.c list_for_each_entry_safe(vmx, n, &per_cpu(vcpus_on_cpu, cpu), per_cpu 1235 arch/x86/kvm/vmx.c free_vmcs(per_cpu(vmxarea, cpu)); per_cpu 1251 arch/x86/kvm/vmx.c per_cpu(vmxarea, cpu) = vmcs; per_cpu 533 arch/x86/mach-voyager/voyager_smp.c per_cpu(current_task, cpu) = idle; per_cpu 783 arch/x86/mach-voyager/voyager_smp.c if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) per_cpu 785 arch/x86/mach-voyager/voyager_smp.c cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask); per_cpu 804 arch/x86/mach-voyager/voyager_smp.c if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) { per_cpu 805 arch/x86/mach-voyager/voyager_smp.c if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) { per_cpu 1065 arch/x86/mach-voyager/voyager_smp.c if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY) per_cpu 1129 arch/x86/mach-voyager/voyager_smp.c if (--per_cpu(prof_counter, cpu) <= 0) { per_cpu 1138 arch/x86/mach-voyager/voyager_smp.c per_cpu(prof_counter, cpu) = per_cpu(prof_multiplier, cpu); per_cpu 1139 arch/x86/mach-voyager/voyager_smp.c if (per_cpu(prof_counter, cpu) != per_cpu 1140 arch/x86/mach-voyager/voyager_smp.c per_cpu(prof_old_multiplier, cpu)) { per_cpu 1142 arch/x86/mach-voyager/voyager_smp.c per_cpu(prof_old_multiplier, cpu) = per_cpu 1143 arch/x86/mach-voyager/voyager_smp.c per_cpu(prof_counter, cpu); per_cpu 1227 arch/x86/mach-voyager/voyager_smp.c per_cpu(prof_multiplier, i) = multiplier; per_cpu 119 arch/x86/oprofile/nmi_int.c if (model->check_ctrs(args->regs, &per_cpu(cpu_msrs, cpu))) per_cpu 156 arch/x86/oprofile/nmi_int.c struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); per_cpu 164 arch/x86/oprofile/nmi_int.c kfree(per_cpu(cpu_msrs, i).counters); per_cpu 165 arch/x86/oprofile/nmi_int.c per_cpu(cpu_msrs, i).counters = NULL; per_cpu 166 arch/x86/oprofile/nmi_int.c kfree(per_cpu(cpu_msrs, i).controls); per_cpu 167 arch/x86/oprofile/nmi_int.c per_cpu(cpu_msrs, i).controls = NULL; per_cpu 179 arch/x86/oprofile/nmi_int.c per_cpu(cpu_msrs, i).counters = kmalloc(counters_size, per_cpu 181 arch/x86/oprofile/nmi_int.c if (!per_cpu(cpu_msrs, i).counters) { per_cpu 185 arch/x86/oprofile/nmi_int.c per_cpu(cpu_msrs, i).controls = kmalloc(controls_size, per_cpu 187 arch/x86/oprofile/nmi_int.c if (!per_cpu(cpu_msrs, i).controls) { per_cpu 202 arch/x86/oprofile/nmi_int.c struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); per_cpu 206 arch/x86/oprofile/nmi_int.c per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC); per_cpu 235 arch/x86/oprofile/nmi_int.c model->fill_in_addresses(&per_cpu(cpu_msrs, 0)); per_cpu 238 arch/x86/oprofile/nmi_int.c memcpy(per_cpu(cpu_msrs, cpu).counters, per_cpu 239 arch/x86/oprofile/nmi_int.c per_cpu(cpu_msrs, 0).counters, per_cpu 242 arch/x86/oprofile/nmi_int.c memcpy(per_cpu(cpu_msrs, cpu).controls, per_cpu 243 arch/x86/oprofile/nmi_int.c per_cpu(cpu_msrs, 0).controls, per_cpu 292 arch/x86/oprofile/nmi_int.c apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); per_cpu 383 arch/x86/oprofile/op_model_p4.c return (cpu != first_cpu(per_cpu(cpu_sibling_map, cpu))); per_cpu 70 arch/x86/power/cpu_32.c struct tss_struct *t = &per_cpu(init_tss, cpu); per_cpu 146 arch/x86/power/cpu_64.c struct tss_struct *t = &per_cpu(init_tss, cpu); per_cpu 229 arch/x86/vdso/vdso32-setup.c struct tss_struct *tss = &per_cpu(init_tss, cpu); per_cpu 132 arch/x86/xen/enlighten.c per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; per_cpu 137 arch/x86/xen/enlighten.c vcpup = &per_cpu(xen_vcpu_info, cpu); per_cpu 156 arch/x86/xen/enlighten.c per_cpu(xen_vcpu, cpu) = vcpup; per_cpu 1668 arch/x86/xen/enlighten.c per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; per_cpu 32 arch/x86/xen/irq.c per_cpu(vector_irq, cpu)[i] = i; per_cpu 1081 arch/x86/xen/mmu.c if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd)) per_cpu 110 arch/x86/xen/smp.c per_cpu(resched_irq, cpu) = rc; per_cpu 121 arch/x86/xen/smp.c per_cpu(callfunc_irq, cpu) = rc; per_cpu 129 arch/x86/xen/smp.c per_cpu(debug_irq, cpu) = rc; per_cpu 140 arch/x86/xen/smp.c per_cpu(callfuncsingle_irq, cpu) = rc; per_cpu 145 arch/x86/xen/smp.c if (per_cpu(resched_irq, cpu) >= 0) per_cpu 146 arch/x86/xen/smp.c unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); per_cpu 147 arch/x86/xen/smp.c if (per_cpu(callfunc_irq, cpu) >= 0) per_cpu 148 arch/x86/xen/smp.c unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); per_cpu 149 arch/x86/xen/smp.c if (per_cpu(debug_irq, cpu) >= 0) per_cpu 150 arch/x86/xen/smp.c unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); per_cpu 151 arch/x86/xen/smp.c if (per_cpu(callfuncsingle_irq, cpu) >= 0) per_cpu 152 arch/x86/xen/smp.c unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL); per_cpu 268 arch/x86/xen/smp.c per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir); per_cpu 295 arch/x86/xen/smp.c per_cpu(current_task, cpu) = idle; per_cpu 304 arch/x86/xen/smp.c per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; per_cpu 307 arch/x86/xen/smp.c per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; per_cpu 323 arch/x86/xen/smp.c while(per_cpu(cpu_state, cpu) != CPU_ONLINE) { per_cpu 354 arch/x86/xen/smp.c unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); per_cpu 355 arch/x86/xen/smp.c unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); per_cpu 356 arch/x86/xen/smp.c unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); per_cpu 357 arch/x86/xen/smp.c unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL); per_cpu 309 arch/x86/xen/spinlock.c if (per_cpu(lock_spinners, cpu) == xl) { per_cpu 354 arch/x86/xen/spinlock.c per_cpu(lock_kicker_irq, cpu) = irq; per_cpu 362 arch/x86/xen/spinlock.c unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL); per_cpu 100 arch/x86/xen/time.c return per_cpu(runstate, vcpu).state == RUNSTATE_runnable; per_cpu 107 arch/x86/xen/time.c area.addr.v = &per_cpu(runstate, cpu); per_cpu 444 arch/x86/xen/time.c evt = &per_cpu(xen_clock_events, cpu); per_cpu 457 arch/x86/xen/time.c evt = &per_cpu(xen_clock_events, cpu); per_cpu 91 block/blk-softirq.c list_splice_init(&per_cpu(blk_cpu_done, cpu), per_cpu 169 block/blk-softirq.c INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); per_cpu 104 block/blk.h return first_cpu(per_cpu(cpu_sibling_map, cpu)); per_cpu 3229 fs/buffer.c tot += per_cpu(bh_accounting, i).nr; per_cpu 3259 fs/buffer.c struct bh_lru *b = &per_cpu(bh_lrus, cpu); per_cpu 3265 fs/buffer.c get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr; per_cpu 3266 fs/buffer.c per_cpu(bh_accounting, cpu).nr = 0; per_cpu 409 fs/file.c struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu); per_cpu 189 fs/namespace.c struct mnt_writer *writer = &per_cpu(mnt_writers, cpu); per_cpu 204 fs/namespace.c cpu_writer = &per_cpu(mnt_writers, cpu); per_cpu 278 fs/namespace.c cpu_writer = &per_cpu(mnt_writers, cpu); per_cpu 616 fs/namespace.c struct mnt_writer *cpu_writer = &per_cpu(mnt_writers, cpu); per_cpu 66 fs/xfs/linux-2.6/xfs_stats.c val += *(((__u32*)&per_cpu(xfsstats, c) + j)); per_cpu 74 fs/xfs/linux-2.6/xfs_stats.c xs_xstrat_bytes += per_cpu(xfsstats, i).xs_xstrat_bytes; per_cpu 75 fs/xfs/linux-2.6/xfs_stats.c xs_write_bytes += per_cpu(xfsstats, i).xs_write_bytes; per_cpu 76 fs/xfs/linux-2.6/xfs_stats.c xs_read_bytes += per_cpu(xfsstats, i).xs_read_bytes; per_cpu 133 fs/xfs/linux-2.6/xfs_stats.h #define XFS_STATS_INC(v) (per_cpu(xfsstats, current_cpu()).v++) per_cpu 134 fs/xfs/linux-2.6/xfs_stats.h #define XFS_STATS_DEC(v) (per_cpu(xfsstats, current_cpu()).v--) per_cpu 135 fs/xfs/linux-2.6/xfs_stats.h #define XFS_STATS_ADD(v, inc) (per_cpu(xfsstats, current_cpu()).v += (inc)) per_cpu 44 fs/xfs/linux-2.6/xfs_sysctl.c vn_active = per_cpu(xfsstats, c).vn_active; per_cpu 45 fs/xfs/linux-2.6/xfs_sysctl.c memset(&per_cpu(xfsstats, c), 0, per_cpu 47 fs/xfs/linux-2.6/xfs_sysctl.c per_cpu(xfsstats, c).vn_active = vn_active; per_cpu 4 include/asm-x86/bigsmp/apic.h #define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu)) per_cpu 90 include/asm-x86/bigsmp/apic.h return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); per_cpu 44 include/asm-x86/desc.h return per_cpu(gdt_page, cpu).gdt; per_cpu 4 include/asm-x86/es7000/apic.h #define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu) per_cpu 81 include/asm-x86/es7000/apic.h int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); per_cpu 103 include/asm-x86/es7000/apic.h return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); per_cpu 23 include/asm-x86/hardirq_32.h #define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member) per_cpu 132 include/asm-x86/ipi.h __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), per_cpu 119 include/asm-x86/mach-default/mach_apic.h return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); per_cpu 8 include/asm-x86/mmu_context_32.h if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) per_cpu 9 include/asm-x86/mmu_context_32.h per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY; per_cpu 23 include/asm-x86/mmu_context_32.h per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; per_cpu 24 include/asm-x86/mmu_context_32.h per_cpu(cpu_tlbstate, cpu).active_mm = next; per_cpu 39 include/asm-x86/mmu_context_32.h per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; per_cpu 40 include/asm-x86/mmu_context_32.h BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next); per_cpu 200 include/asm-x86/percpu.h per_cpu(_name, _cpu)) per_cpu 212 include/asm-x86/percpu.h #define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu) per_cpu 137 include/asm-x86/processor.h #define cpu_data(cpu) per_cpu(cpu_info, cpu) per_cpu 147 include/asm-x86/smp.h #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) per_cpu 112 include/asm-x86/summit/apic.h return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); per_cpu 51 include/asm-x86/timer.h return cyc * per_cpu(cyc2ns, smp_processor_id()) >> CYC2NS_SCALE_FACTOR; per_cpu 93 include/asm-x86/topology.h return per_cpu(x86_cpu_to_node_map, cpu); per_cpu 102 include/asm-x86/topology.h return per_cpu(x86_cpu_to_node_map, cpu); per_cpu 226 include/asm-x86/topology.h #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) per_cpu 227 include/asm-x86/topology.h #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) per_cpu 136 include/asm-x86/uv/uv_hub.h #define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu)) per_cpu 207 include/linux/elevator.h __val += per_cpu(name, __cpu); \ per_cpu 36 include/linux/kernel_stat.h #define kstat_cpu(cpu) per_cpu(kstat, cpu) per_cpu 122 include/linux/rcuclassic.h struct rcu_data *rdp = &per_cpu(rcu_data, cpu); per_cpu 127 include/linux/rcuclassic.h struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); per_cpu 55 include/linux/rcupreempt.h struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); per_cpu 87 kernel/fork.c total += per_cpu(process_counts, cpu); per_cpu 708 kernel/hrtimer.c struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu); per_cpu 1582 kernel/hrtimer.c struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); per_cpu 1687 kernel/hrtimer.c old_base = &per_cpu(hrtimer_bases, cpu); per_cpu 183 kernel/lockdep.c &per_cpu(lock_stats, cpu)[class - lock_classes]; per_cpu 207 kernel/lockdep.c &per_cpu(lock_stats, cpu)[class - lock_classes]; per_cpu 241 kernel/profile.c per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu); per_cpu 249 kernel/profile.c j = per_cpu(cpu_profile_flip, get_cpu()); per_cpu 253 kernel/profile.c struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j]; per_cpu 272 kernel/profile.c i = per_cpu(cpu_profile_flip, get_cpu()); per_cpu 276 kernel/profile.c struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; per_cpu 294 kernel/profile.c hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)]; per_cpu 343 kernel/profile.c per_cpu(cpu_profile_flip, cpu) = 0; per_cpu 344 kernel/profile.c if (!per_cpu(cpu_profile_hits, cpu)[1]) { per_cpu 350 kernel/profile.c per_cpu(cpu_profile_hits, cpu)[1] = page_address(page); per_cpu 352 kernel/profile.c if (!per_cpu(cpu_profile_hits, cpu)[0]) { per_cpu 358 kernel/profile.c per_cpu(cpu_profile_hits, cpu)[0] = page_address(page); per_cpu 362 kernel/profile.c page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); per_cpu 363 kernel/profile.c per_cpu(cpu_profile_hits, cpu)[1] = NULL; per_cpu 375 kernel/profile.c if (per_cpu(cpu_profile_hits, cpu)[0]) { per_cpu 376 kernel/profile.c page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); per_cpu 377 kernel/profile.c per_cpu(cpu_profile_hits, cpu)[0] = NULL; per_cpu 380 kernel/profile.c if (per_cpu(cpu_profile_hits, cpu)[1]) { per_cpu 381 kernel/profile.c page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); per_cpu 382 kernel/profile.c per_cpu(cpu_profile_hits, cpu)[1] = NULL; per_cpu 543 kernel/profile.c per_cpu(cpu_profile_hits, cpu)[1] per_cpu 550 kernel/profile.c per_cpu(cpu_profile_hits, cpu)[0] per_cpu 561 kernel/profile.c if (per_cpu(cpu_profile_hits, cpu)[0]) { per_cpu 562 kernel/profile.c page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); per_cpu 563 kernel/profile.c per_cpu(cpu_profile_hits, cpu)[0] = NULL; per_cpu 566 kernel/profile.c if (per_cpu(cpu_profile_hits, cpu)[1]) { per_cpu 567 kernel/profile.c page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); per_cpu 568 kernel/profile.c per_cpu(cpu_profile_hits, cpu)[1] = NULL; per_cpu 509 kernel/rcuclassic.c &per_cpu(rcu_data, cpu)); per_cpu 511 kernel/rcuclassic.c &per_cpu(rcu_bh_data, cpu)); per_cpu 653 kernel/rcuclassic.c return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) || per_cpu 654 kernel/rcuclassic.c __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu)); per_cpu 665 kernel/rcuclassic.c struct rcu_data *rdp = &per_cpu(rcu_data, cpu); per_cpu 666 kernel/rcuclassic.c struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu); per_cpu 735 kernel/rcuclassic.c struct rcu_data *rdp = &per_cpu(rcu_data, cpu); per_cpu 736 kernel/rcuclassic.c struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu); per_cpu 96 kernel/rcupdate.c struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu); per_cpu 203 kernel/rcupreempt.c #define RCU_DATA_CPU(cpu) (&per_cpu(rcu_data, cpu)) per_cpu 421 kernel/rcupreempt.c if (per_cpu(rcu_flip_flag, cpu) == rcu_flipped) { per_cpu 423 kernel/rcupreempt.c per_cpu(rcu_flip_flag, cpu) = rcu_flip_seen; per_cpu 446 kernel/rcupreempt.c struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); per_cpu 448 kernel/rcupreempt.c if (per_cpu(rcu_update_flag, cpu)) per_cpu 449 kernel/rcupreempt.c per_cpu(rcu_update_flag, cpu)++; per_cpu 496 kernel/rcupreempt.c per_cpu(rcu_update_flag, cpu)++; per_cpu 516 kernel/rcupreempt.c struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); per_cpu 527 kernel/rcupreempt.c if (per_cpu(rcu_update_flag, cpu)) { per_cpu 528 kernel/rcupreempt.c if (--per_cpu(rcu_update_flag, cpu)) per_cpu 556 kernel/rcupreempt.c struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); per_cpu 566 kernel/rcupreempt.c struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); per_cpu 603 kernel/rcupreempt.c struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); per_cpu 635 kernel/rcupreempt.c struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); per_cpu 644 kernel/rcupreempt.c struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); per_cpu 689 kernel/rcupreempt.c struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); per_cpu 696 kernel/rcupreempt.c struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); per_cpu 752 kernel/rcupreempt.c per_cpu(rcu_flip_flag, cpu) = rcu_flipped; per_cpu 771 kernel/rcupreempt.c per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { per_cpu 817 kernel/rcupreempt.c per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; per_cpu 838 kernel/rcupreempt.c per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { per_cpu 904 kernel/rcupreempt.c if (per_cpu(rcu_mb_flag, cpu) == rcu_mb_needed) { per_cpu 906 kernel/rcupreempt.c per_cpu(rcu_mb_flag, cpu) = rcu_mb_done; per_cpu 1012 kernel/rcupreempt.c if (per_cpu(rcu_flip_flag, cpu) == rcu_flipped) { per_cpu 1014 kernel/rcupreempt.c per_cpu(rcu_flip_flag, cpu) = rcu_flip_seen; per_cpu 1349 kernel/rcupreempt.c if ((per_cpu(rcu_flip_flag, cpu) == rcu_flipped) || per_cpu 1350 kernel/rcupreempt.c (per_cpu(rcu_mb_flag, cpu) == rcu_mb_needed)) per_cpu 1458 kernel/rcupreempt.c return per_cpu(rcu_flip_flag, cpu); per_cpu 1464 kernel/rcupreempt.c return per_cpu(rcu_mb_flag, cpu); per_cpu 756 kernel/rcutorture.c pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i]; per_cpu 757 kernel/rcutorture.c batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i]; per_cpu 1061 kernel/rcutorture.c per_cpu(rcu_torture_count, cpu)[i] = 0; per_cpu 1062 kernel/rcutorture.c per_cpu(rcu_torture_batch, cpu)[i] = 0; per_cpu 636 kernel/sched.c #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) per_cpu 7055 kernel/sched.c *sg = &per_cpu(sched_group_cpus, cpu); per_cpu 7075 kernel/sched.c *mask = per_cpu(cpu_sibling_map, cpu); per_cpu 7079 kernel/sched.c *sg = &per_cpu(sched_group_core, group); per_cpu 7088 kernel/sched.c *sg = &per_cpu(sched_group_core, cpu); per_cpu 7106 kernel/sched.c *mask = per_cpu(cpu_sibling_map, cpu); per_cpu 7113 kernel/sched.c *sg = &per_cpu(sched_group_phys, group); per_cpu 7139 kernel/sched.c *sg = &per_cpu(sched_group_allnodes, group); per_cpu 7154 kernel/sched.c sd = &per_cpu(phys_domains, j); per_cpu 7433 kernel/sched.c sd = &per_cpu(allnodes_domains, i); per_cpu 7443 kernel/sched.c sd = &per_cpu(node_domains, i); per_cpu 7454 kernel/sched.c sd = &per_cpu(phys_domains, i); per_cpu 7465 kernel/sched.c sd = &per_cpu(core_domains, i); per_cpu 7477 kernel/sched.c sd = &per_cpu(cpu_domains, i); per_cpu 7480 kernel/sched.c sd->span = per_cpu(cpu_sibling_map, i); per_cpu 7494 kernel/sched.c *this_sibling_map = per_cpu(cpu_sibling_map, i); per_cpu 7577 kernel/sched.c sd = &per_cpu(node_domains, j); per_cpu 7621 kernel/sched.c struct sched_domain *sd = &per_cpu(cpu_domains, i); per_cpu 7628 kernel/sched.c struct sched_domain *sd = &per_cpu(core_domains, i); per_cpu 7635 kernel/sched.c struct sched_domain *sd = &per_cpu(phys_domains, i); per_cpu 7657 kernel/sched.c sd = &per_cpu(cpu_domains, i); per_cpu 7659 kernel/sched.c sd = &per_cpu(core_domains, i); per_cpu 7661 kernel/sched.c sd = &per_cpu(phys_domains, i); per_cpu 8244 kernel/sched.c &per_cpu(init_cfs_rq, i), per_cpu 8245 kernel/sched.c &per_cpu(init_sched_entity, i), i, 1, per_cpu 8259 kernel/sched.c &per_cpu(init_rt_rq, i), per_cpu 8260 kernel/sched.c &per_cpu(init_sched_rt_entity, i), i, 1, per_cpu 69 kernel/sched_clock.c return &per_cpu(sched_clock_data, cpu); per_cpu 118 kernel/sched_debug.c struct rq *rq = &per_cpu(runqueues, cpu); per_cpu 148 kernel/sched_debug.c rq0_min_vruntime = per_cpu(runqueues, 0).cfs.min_vruntime; per_cpu 227 kernel/sched_debug.c struct rq *rq = &per_cpu(runqueues, cpu); per_cpu 41 kernel/smp.c struct call_single_queue *q = &per_cpu(call_single_queue, i); per_cpu 70 kernel/smp.c struct call_single_queue *dst = &per_cpu(call_single_queue, cpu); per_cpu 471 kernel/softirq.c per_cpu(tasklet_vec, cpu).tail = per_cpu 472 kernel/softirq.c &per_cpu(tasklet_vec, cpu).head; per_cpu 473 kernel/softirq.c per_cpu(tasklet_hi_vec, cpu).tail = per_cpu 474 kernel/softirq.c &per_cpu(tasklet_hi_vec, cpu).head; per_cpu 545 kernel/softirq.c for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { per_cpu 550 kernel/softirq.c per_cpu(tasklet_vec, cpu).tail = i; per_cpu 563 kernel/softirq.c if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { per_cpu 564 kernel/softirq.c *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head; per_cpu 565 kernel/softirq.c __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail; per_cpu 566 kernel/softirq.c per_cpu(tasklet_vec, cpu).head = NULL; per_cpu 567 kernel/softirq.c per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; per_cpu 571 kernel/softirq.c if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { per_cpu 572 kernel/softirq.c *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head; per_cpu 573 kernel/softirq.c __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail; per_cpu 574 kernel/softirq.c per_cpu(tasklet_hi_vec, cpu).head = NULL; per_cpu 575 kernel/softirq.c per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; per_cpu 599 kernel/softirq.c per_cpu(ksoftirqd, hotcpu) = p; per_cpu 603 kernel/softirq.c wake_up_process(per_cpu(ksoftirqd, hotcpu)); per_cpu 608 kernel/softirq.c if (!per_cpu(ksoftirqd, hotcpu)) per_cpu 611 kernel/softirq.c kthread_bind(per_cpu(ksoftirqd, hotcpu), per_cpu 617 kernel/softirq.c p = per_cpu(ksoftirqd, hotcpu); per_cpu 618 kernel/softirq.c per_cpu(ksoftirqd, hotcpu) = NULL; per_cpu 87 kernel/softlockup.c per_cpu(touch_timestamp, cpu) = 0; per_cpu 98 kernel/softlockup.c unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu); per_cpu 104 kernel/softlockup.c if (!per_cpu(watchdog_task, this_cpu) || softlockup_thresh <= 0) { per_cpu 107 kernel/softlockup.c per_cpu(touch_timestamp, this_cpu) = 0; per_cpu 116 kernel/softlockup.c print_timestamp = per_cpu(print_timestamp, this_cpu); per_cpu 135 kernel/softlockup.c wake_up_process(per_cpu(watchdog_task, this_cpu)); per_cpu 141 kernel/softlockup.c per_cpu(print_timestamp, this_cpu) = touch_timestamp; per_cpu 294 kernel/softlockup.c BUG_ON(per_cpu(watchdog_task, hotcpu)); per_cpu 300 kernel/softlockup.c per_cpu(touch_timestamp, hotcpu) = 0; per_cpu 301 kernel/softlockup.c per_cpu(watchdog_task, hotcpu) = p; per_cpu 307 kernel/softlockup.c wake_up_process(per_cpu(watchdog_task, hotcpu)); per_cpu 322 kernel/softlockup.c if (!per_cpu(watchdog_task, hotcpu)) per_cpu 325 kernel/softlockup.c kthread_bind(per_cpu(watchdog_task, hotcpu), per_cpu 329 kernel/softlockup.c p = per_cpu(watchdog_task, hotcpu); per_cpu 330 kernel/softlockup.c per_cpu(watchdog_task, hotcpu) = NULL; per_cpu 313 kernel/taskstats.c listeners = &per_cpu(listener_array, cpu); per_cpu 324 kernel/taskstats.c listeners = &per_cpu(listener_array, cpu); per_cpu 592 kernel/taskstats.c INIT_LIST_HEAD(&(per_cpu(listener_array, i).list)); per_cpu 593 kernel/taskstats.c init_rwsem(&(per_cpu(listener_array, i).sem)); per_cpu 140 kernel/time/tick-broadcast.c td = &per_cpu(tick_cpu_device, cpu); per_cpu 152 kernel/time/tick-broadcast.c td = &per_cpu(tick_cpu_device, cpu); per_cpu 218 kernel/time/tick-broadcast.c td = &per_cpu(tick_cpu_device, cpu); per_cpu 404 kernel/time/tick-broadcast.c td = &per_cpu(tick_cpu_device, cpu); per_cpu 459 kernel/time/tick-broadcast.c td = &per_cpu(tick_cpu_device, cpu); per_cpu 501 kernel/time/tick-broadcast.c td = &per_cpu(tick_cpu_device, cpu); per_cpu 44 kernel/time/tick-common.c return &per_cpu(tick_cpu_device, cpu); per_cpu 208 kernel/time/tick-common.c td = &per_cpu(tick_cpu_device, cpu); per_cpu 285 kernel/time/tick-common.c struct tick_device *td = &per_cpu(tick_cpu_device, *cpup); per_cpu 41 kernel/time/tick-sched.c return &per_cpu(tick_cpu_sched, cpu); per_cpu 140 kernel/time/tick-sched.c struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); per_cpu 160 kernel/time/tick-sched.c struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); per_cpu 192 kernel/time/tick-sched.c struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); per_cpu 224 kernel/time/tick-sched.c ts = &per_cpu(tick_cpu_sched, cpu); per_cpu 388 kernel/time/tick-sched.c struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); per_cpu 666 kernel/time/tick-sched.c struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); per_cpu 685 kernel/time/tick-sched.c set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks); per_cpu 129 kernel/time/timer_list.c struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); per_cpu 248 kernel/time/timer_stats.c lock = &per_cpu(lookup_lock, raw_smp_processor_id()); per_cpu 351 kernel/time/timer_stats.c spin_lock_irqsave(&per_cpu(lookup_lock, cpu), flags); per_cpu 353 kernel/time/timer_stats.c spin_unlock_irqrestore(&per_cpu(lookup_lock, cpu), flags); per_cpu 411 kernel/time/timer_stats.c spin_lock_init(&per_cpu(lookup_lock, cpu)); per_cpu 579 kernel/timer.c struct tvec_base *base = per_cpu(tvec_bases, cpu); per_cpu 1385 kernel/timer.c per_cpu(tvec_bases, cpu) = base; per_cpu 1398 kernel/timer.c base = per_cpu(tvec_bases, cpu); per_cpu 1436 kernel/timer.c old_base = per_cpu(tvec_bases, cpu); per_cpu 357 kernel/trace/ftrace.c per_cpu(ftrace_shutdown_disable_cpu, cpu)++; per_cpu 358 kernel/trace/ftrace.c if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1) per_cpu 392 kernel/trace/ftrace.c per_cpu(ftrace_shutdown_disable_cpu, cpu)--; per_cpu 3064 kernel/trace/trace.c data = global_trace.data[i] = &per_cpu(global_trace_cpu, i); per_cpu 3065 kernel/trace/trace.c max_tr.data[i] = &per_cpu(max_data, i); per_cpu 86 kernel/trace/trace_irqsoff.c if (likely(!per_cpu(tracing_cpu, cpu))) per_cpu 193 kernel/trace/trace_irqsoff.c if (per_cpu(tracing_cpu, cpu)) per_cpu 212 kernel/trace/trace_irqsoff.c per_cpu(tracing_cpu, cpu) = 1; per_cpu 227 kernel/trace/trace_irqsoff.c if (unlikely(per_cpu(tracing_cpu, cpu))) per_cpu 228 kernel/trace/trace_irqsoff.c per_cpu(tracing_cpu, cpu) = 0; per_cpu 201 kernel/trace/trace_sysprof.c struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu); per_cpu 224 kernel/trace/trace_sysprof.c struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu); per_cpu 1220 lib/radix-tree.c rtp = &per_cpu(radix_tree_preloads, cpu); per_cpu 98 lib/random32.c struct rnd_state *state = &per_cpu(net_rand_state, i); per_cpu 113 lib/random32.c struct rnd_state *state = &per_cpu(net_rand_state,i); per_cpu 141 lib/random32.c struct rnd_state *state = &per_cpu(net_rand_state,i); per_cpu 97 mm/quicklist.c ql = per_cpu(quicklist, cpu); per_cpu 908 mm/slab.c per_cpu(reap_node, cpu) = node; per_cpu 935 mm/slab.c struct delayed_work *reap_work = &per_cpu(reap_work, cpu); per_cpu 1350 mm/slab.c cancel_rearming_delayed_work(&per_cpu(reap_work, cpu)); per_cpu 1352 mm/slab.c per_cpu(reap_work, cpu).work.func = NULL; per_cpu 1967 mm/slub.c struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu); per_cpu 1970 mm/slub.c per_cpu(kmem_cache_cpu_free, cpu) = per_cpu 1987 mm/slub.c if (c < per_cpu(kmem_cache_cpu, cpu) || per_cpu 1988 mm/slub.c c > per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) { per_cpu 1992 mm/slub.c c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu); per_cpu 1993 mm/slub.c per_cpu(kmem_cache_cpu_free, cpu) = c; per_cpu 2041 mm/slub.c free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu); per_cpu 3666 mm/slub.c unsigned long *per_cpu; per_cpu 3671 mm/slub.c per_cpu = nodes + nr_node_ids; per_cpu 3693 mm/slub.c per_cpu[c->node]++; per_cpu 222 mm/swap.c pvec = &per_cpu(lru_add_pvecs, cpu); per_cpu 226 mm/swap.c pvec = &per_cpu(lru_add_active_pvecs, cpu); per_cpu 230 mm/swap.c pvec = &per_cpu(lru_rotate_pvecs, cpu); per_cpu 522 mm/swap.c committed = &per_cpu(committed_space, (long)hcpu); per_cpu 31 mm/vmstat.c struct vm_event_state *this = &per_cpu(vm_event_states, cpu); per_cpu 60 mm/vmstat.c struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu); per_cpu 838 mm/vmstat.c struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu); per_cpu 861 mm/vmstat.c cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu)); per_cpu 862 mm/vmstat.c per_cpu(vmstat_work, cpu).work.func = NULL; per_cpu 2659 net/core/dev.c rc = &per_cpu(netdev_rx_stat, *pos); per_cpu 4556 net/core/dev.c sd = &per_cpu(softnet_data, cpu); per_cpu 4557 net/core/dev.c oldsd = &per_cpu(softnet_data, oldcpu); per_cpu 4601 net/core/dev.c rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL); per_cpu 4616 net/core/dev.c per_cpu(softnet_data, cpu).net_dma = chan; per_cpu 4892 net/core/dev.c queue = &per_cpu(softnet_data, i); per_cpu 44 net/core/flow.c #define flow_table(cpu) (per_cpu(flow_tables, cpu)) per_cpu 58 net/core/flow.c (per_cpu(flow_hash_info, cpu).hash_rnd_recalc) per_cpu 60 net/core/flow.c (per_cpu(flow_hash_info, cpu).hash_rnd) per_cpu 62 net/core/flow.c (per_cpu(flow_hash_info, cpu).count) per_cpu 74 net/core/flow.c #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu)) per_cpu 2004 net/core/sock.c res += per_cpu(prot_inuse, cpu).val[idx]; per_cpu 434 net/ipv4/route.c return &per_cpu(rt_cache_stat, cpu); per_cpu 447 net/ipv4/route.c return &per_cpu(rt_cache_stat, cpu); per_cpu 2310 net/socket.c counter += per_cpu(sockets_in_use, cpu); per_cpu 77 security/selinux/avc.c per_cpu(avc_cache_stats, get_cpu()).field++; \ per_cpu 1213 security/selinux/selinuxfs.c return &per_cpu(avc_cache_stats, cpu);