NR_CPUS 1506 arch/x86/kernel/apic_32.c if (num_processors >= NR_CPUS) { NR_CPUS 1508 arch/x86/kernel/apic_32.c " Processor ignored.\n", NR_CPUS); NR_CPUS 1444 arch/x86/kernel/apic_64.c if (num_processors >= NR_CPUS) { NR_CPUS 1446 arch/x86/kernel/apic_64.c " Processor ignored.\n", NR_CPUS); NR_CPUS 1702 arch/x86/kernel/apic_64.c for (i = 0; i < NR_CPUS; i++) { NR_CPUS 356 arch/x86/kernel/cpu/common.c if (smp_num_siblings > NR_CPUS) { NR_CPUS 33 arch/x86/kernel/cpu/cpufreq/e_powersaver.c static struct eps_cpu_data *eps_cpu[NR_CPUS]; NR_CPUS 52 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c static int has_N44_O17_errata[NR_CPUS]; NR_CPUS 652 arch/x86/kernel/cpu/mcheck/mce_amd_64.c if (cpu >= NR_CPUS) NR_CPUS 124 arch/x86/kernel/cpuid.c if (cpu >= NR_CPUS || !cpu_online(cpu)) { NR_CPUS 101 arch/x86/kernel/genx2apic_cluster.c if ((unsigned)cpu < NR_CPUS) NR_CPUS 99 arch/x86/kernel/genx2apic_phys.c if ((unsigned)cpu < NR_CPUS) NR_CPUS 37 arch/x86/kernel/head64.c static struct x8664_pda *__cpu_pda[NR_CPUS] __initdata; NR_CPUS 39 arch/x86/kernel/head64.c static struct x8664_pda *__cpu_pda[NR_CPUS] __read_mostly; NR_CPUS 391 arch/x86/kernel/io_apic_32.c } irq_cpu_data[NR_CPUS]; NR_CPUS 427 arch/x86/kernel/io_apic_32.c if (cpu >= NR_CPUS) NR_CPUS 432 arch/x86/kernel/io_apic_32.c cpu = NR_CPUS-1; NR_CPUS 83 arch/x86/kernel/irq_32.c static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; NR_CPUS 84 arch/x86/kernel/irq_32.c static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; NR_CPUS 86 arch/x86/kernel/irq_32.c static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; NR_CPUS 87 arch/x86/kernel/irq_32.c static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; NR_CPUS 405 arch/x86/kernel/irq_32.c if (any_online_cpu(mask) == NR_CPUS) { NR_CPUS 440 arch/x86/kernel/kgdb.c static int was_in_debug_nmi[NR_CPUS]; NR_CPUS 107 arch/x86/kernel/microcode_core.c struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; NR_CPUS 139 arch/x86/kernel/msr.c if (cpu >= NR_CPUS || !cpu_online(cpu)) { NR_CPUS 149 arch/x86/kernel/numaq_32.c int quad_local_to_mp_bus_id [NR_CPUS/4][4]; NR_CPUS 424 arch/x86/kernel/reboot.c if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) && NR_CPUS 88 arch/x86/kernel/setup_percpu.c unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; NR_CPUS 182 arch/x86/kernel/setup_percpu.c NR_CPUS, nr_cpu_ids, nr_node_ids); NR_CPUS 92 arch/x86/kernel/smpboot.c static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; NR_CPUS 141 arch/x86/kernel/smpboot.c int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 }; NR_CPUS 170 arch/x86/kernel/smpboot.c u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly = NR_CPUS 171 arch/x86/kernel/smpboot.c { [0 ... NR_CPUS-1] = BAD_APICID }; NR_CPUS 1159 arch/x86/kernel/smpboot.c c->cpu_index = NR_CPUS; NR_CPUS 1290 arch/x86/kernel/smpboot.c if (possible > NR_CPUS) NR_CPUS 1291 arch/x86/kernel/smpboot.c possible = NR_CPUS; NR_CPUS 34 arch/x86/mach-voyager/voyager_smp.c static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = NR_CPUS 35 arch/x86/mach-voyager/voyager_smp.c {[0 ... NR_CPUS-1] = ~0UL }; NR_CPUS 46 arch/x86/mach-voyager/voyager_smp.c struct voyager_qic_cpi *voyager_quad_cpi_addr[NR_CPUS] __cacheline_aligned; NR_CPUS 223 arch/x86/mach-voyager/voyager_smp.c static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; NR_CPUS 226 arch/x86/mach-voyager/voyager_smp.c static __u16 vic_irq_enable_mask[NR_CPUS] __cacheline_aligned = { 0 }; NR_CPUS 238 arch/x86/mach-voyager/voyager_smp.c static long vic_intr_count[NR_CPUS] __cacheline_aligned = { 0 }; NR_CPUS 239 arch/x86/mach-voyager/voyager_smp.c static unsigned long vic_tick[NR_CPUS] __cacheline_aligned = { 0 }; NR_CPUS 242 arch/x86/mach-voyager/voyager_smp.c static unsigned long vic_cpi_mailbox[NR_CPUS] __cacheline_aligned; NR_CPUS 359 arch/x86/mach-voyager/voyager_smp.c for (i = 0; i < NR_CPUS; i++) { NR_CPUS 671 arch/x86/mach-voyager/voyager_smp.c for (i = 0; i < NR_CPUS; i++) { NR_CPUS 1226 arch/x86/mach-voyager/voyager_smp.c for (i = 0; i < NR_CPUS; ++i) NR_CPUS 1256 arch/x86/mach-voyager/voyager_smp.c for (i = 0; i < NR_CPUS; i++) NR_CPUS 281 arch/x86/mm/numa_64.c for (i = 0; i < NR_CPUS; i++) { NR_CPUS 552 arch/x86/mm/numa_64.c for (i = 0; i < NR_CPUS; i++) NR_CPUS 385 arch/x86/mm/srat_64.c for (i = 0; i < NR_CPUS; i++) { NR_CPUS 161 arch/x86/xen/smp.c for (i = 0; i < NR_CPUS; i++) { NR_CPUS 199 arch/x86/xen/smp.c for (cpu = NR_CPUS - 1; !cpu_possible(cpu); cpu--) NR_CPUS 237 crypto/async_tx/async_tx.c if (NR_CPUS > 1) NR_CPUS 91 fs/gfs2/glock.c # if NR_CPUS >= 32 NR_CPUS 93 fs/gfs2/glock.c # elif NR_CPUS >= 16 NR_CPUS 95 fs/gfs2/glock.c # elif NR_CPUS >= 8 NR_CPUS 97 fs/gfs2/glock.c # elif NR_CPUS >= 4 NR_CPUS 22 include/asm-generic/percpu.h extern unsigned long __per_cpu_offset[NR_CPUS]; NR_CPUS 60 include/asm-m32r/smp.h extern volatile int cpu_2_physid[NR_CPUS]; NR_CPUS 15 include/asm-mn10300/mmu.h unsigned long tlbpid[NR_CPUS]; /* TLB PID for this process on NR_CPUS 35 include/asm-mn10300/mmu_context.h extern unsigned long mmu_context_cache[NR_CPUS]; NR_CPUS 96 include/asm-mn10300/mmu_context.h int num_cpus = NR_CPUS, i; NR_CPUS 112 include/asm-parisc/processor.h extern struct cpuinfo_parisc cpu_data[NR_CPUS]; NR_CPUS 25 include/asm-um/smp.h extern struct task_struct *idle_threads[NR_CPUS]; NR_CPUS 15 include/asm-x86/bigsmp/apic.h static unsigned long cpu = NR_CPUS; NR_CPUS 17 include/asm-x86/bigsmp/apic.h if (cpu >= NR_CPUS) NR_CPUS 21 include/asm-x86/bigsmp/apic.h } while (cpu >= NR_CPUS); NR_CPUS 89 include/asm-x86/bigsmp/apic.h if (mps_cpu < NR_CPUS) NR_CPUS 104 include/asm-x86/bigsmp/apic.h if (cpu >= NR_CPUS) NR_CPUS 102 include/asm-x86/es7000/apic.h else if (mps_cpu < NR_CPUS) NR_CPUS 122 include/asm-x86/es7000/apic.h if (cpu >= NR_CPUS) NR_CPUS 157 include/asm-x86/es7000/apic.h if (num_bits_set == NR_CPUS) NR_CPUS 114 include/asm-x86/irq_vectors.h # if NR_CPUS < MAX_IO_APICS NR_CPUS 115 include/asm-x86/irq_vectors.h # define NR_IRQS (NR_VECTORS + (32 * NR_CPUS)) NR_CPUS 127 include/asm-x86/irq_vectors.h # if (224 >= 32 * NR_CPUS) NR_CPUS 130 include/asm-x86/irq_vectors.h # define NR_IRQ_VECTORS (32 * NR_CPUS) NR_CPUS 18 include/asm-x86/lguest.h #define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * NR_CPUS) NR_CPUS 118 include/asm-x86/mach-default/mach_apic.h if (mps_cpu < NR_CPUS && cpu_present(mps_cpu)) NR_CPUS 20 include/asm-x86/mpspec.h extern int quad_local_to_mp_bus_id [NR_CPUS/4][4]; NR_CPUS 20 include/asm-x86/mpspec_def.h # if NR_CPUS <= 255 NR_CPUS 68 include/asm-x86/numaq/apic.h if (cpu >= NR_CPUS) NR_CPUS 183 include/asm-x86/percpu.h __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \ NR_CPUS 184 include/asm-x86/percpu.h { [0 ... NR_CPUS-1] = _initvalue }; \ NR_CPUS 58 include/asm-x86/spinlock.h #if (NR_CPUS < 256) NR_CPUS 56 include/asm-x86/summit/apic.h for (count = 0, i = NR_CPUS; --i >= 0; ) { NR_CPUS 101 include/asm-x86/summit/apic.h if (cpu >= NR_CPUS) NR_CPUS 111 include/asm-x86/summit/apic.h if (mps_cpu < NR_CPUS) NR_CPUS 150 include/asm-x86/summit/apic.h if (num_bits_set == NR_CPUS) NR_CPUS 497 include/asm-x86/voyager.h extern struct voyager_qic_cpi *voyager_quad_cpi_addr[NR_CPUS]; NR_CPUS 18 include/linux/blockgroup_lock.h #if NR_CPUS >= 32 NR_CPUS 20 include/linux/blockgroup_lock.h #elif NR_CPUS >= 16 NR_CPUS 22 include/linux/blockgroup_lock.h #elif NR_CPUS >= 8 NR_CPUS 24 include/linux/blockgroup_lock.h #elif NR_CPUS >= 4 NR_CPUS 26 include/linux/blockgroup_lock.h #elif NR_CPUS >= 2 NR_CPUS 141 include/linux/cpumask.h typedef struct { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; NR_CPUS 156 include/linux/cpumask.h #define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS) NR_CPUS 162 include/linux/cpumask.h #define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS) NR_CPUS 177 include/linux/cpumask.h #define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS) NR_CPUS 184 include/linux/cpumask.h #define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS) NR_CPUS 191 include/linux/cpumask.h #define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS) NR_CPUS 199 include/linux/cpumask.h __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS) NR_CPUS 206 include/linux/cpumask.h #define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS) NR_CPUS 213 include/linux/cpumask.h #define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS) NR_CPUS 220 include/linux/cpumask.h #define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS) NR_CPUS 227 include/linux/cpumask.h #define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS) NR_CPUS 234 include/linux/cpumask.h #define cpus_empty(src) __cpus_empty(&(src), NR_CPUS) NR_CPUS 240 include/linux/cpumask.h #define cpus_full(cpumask) __cpus_full(&(cpumask), NR_CPUS) NR_CPUS 246 include/linux/cpumask.h #define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS) NR_CPUS 253 include/linux/cpumask.h __cpus_shift_right(&(dst), &(src), (n), NR_CPUS) NR_CPUS 261 include/linux/cpumask.h __cpus_shift_left(&(dst), &(src), (n), NR_CPUS) NR_CPUS 276 include/linux/cpumask.h cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; NR_CPUS 293 include/linux/cpumask.h #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) NR_CPUS 295 include/linux/cpumask.h #if NR_CPUS <= BITS_PER_LONG NR_CPUS 299 include/linux/cpumask.h [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ NR_CPUS 308 include/linux/cpumask.h [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ NR_CPUS 309 include/linux/cpumask.h [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ NR_CPUS 320 include/linux/cpumask.h [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ NR_CPUS 330 include/linux/cpumask.h #if NR_CPUS > BITS_PER_LONG NR_CPUS 340 include/linux/cpumask.h __cpumask_scnprintf((buf), (len), &(src), NR_CPUS) NR_CPUS 348 include/linux/cpumask.h __cpumask_parse_user((ubuf), (ulen), &(dst), NR_CPUS) NR_CPUS 356 include/linux/cpumask.h __cpulist_scnprintf((buf), (len), &(src), NR_CPUS) NR_CPUS 363 include/linux/cpumask.h #define cpulist_parse(buf, dst) __cpulist_parse((buf), &(dst), NR_CPUS) NR_CPUS 370 include/linux/cpumask.h __cpu_remap((oldbit), &(old), &(new), NR_CPUS) NR_CPUS 378 include/linux/cpumask.h __cpus_remap(&(dst), &(src), &(old), &(new), NR_CPUS) NR_CPUS 386 include/linux/cpumask.h __cpus_onto(&(dst), &(orig), &(relmap), NR_CPUS) NR_CPUS 394 include/linux/cpumask.h __cpus_fold(&(dst), &(orig), sz, NR_CPUS) NR_CPUS 401 include/linux/cpumask.h #if NR_CPUS == 1 NR_CPUS 423 include/linux/cpumask.h (cpu) < NR_CPUS; ) NR_CPUS 426 include/linux/cpumask.h #if NR_CPUS <= 64 NR_CPUS 506 include/linux/cpumask.h #if NR_CPUS > 1 NR_CPUS 140 include/linux/init_task.h .nr_cpus_allowed = NR_CPUS, \ NR_CPUS 24 include/linux/mm_types.h #define USE_SPLIT_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) NR_CPUS 227 include/linux/mmzone.h struct per_cpu_pageset *pageset[NR_CPUS]; NR_CPUS 229 include/linux/mmzone.h struct per_cpu_pageset pageset[NR_CPUS]; NR_CPUS 346 include/linux/module.h struct module_ref ref[NR_CPUS]; NR_CPUS 27 include/linux/percpu_counter.h #if NR_CPUS >= 16 NR_CPUS 28 include/linux/percpu_counter.h #define FBC_BATCH (NR_CPUS*2) NR_CPUS 30 include/linux/percpu_counter.h #define FBC_BATCH (NR_CPUS*4) NR_CPUS 68 include/linux/relay.h struct rchan_buf *buf[NR_CPUS]; /* per-cpu channel buffers */ NR_CPUS 55 include/linux/seq_file.h return seq_bitmap(m, mask->bits, NR_CPUS); NR_CPUS 106 include/linux/slub_def.h struct kmem_cache_cpu *cpu_slab[NR_CPUS]; NR_CPUS 131 init/main.c unsigned int __initdata setup_max_cpus = NR_CPUS; NR_CPUS 167 init/main.c #define setup_max_cpus NR_CPUS NR_CPUS 367 init/main.c #if NR_CPUS > BITS_PER_LONG NR_CPUS 373 init/main.c int nr_cpu_ids __read_mostly = NR_CPUS; NR_CPUS 388 init/main.c unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; NR_CPUS 145 kernel/audit.c #define AUDIT_MAXFREE (2*NR_CPUS) NR_CPUS 459 kernel/compat.c if (NR_CPUS <= BITS_PER_COMPAT_LONG) NR_CPUS 492 kernel/cpu.c const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { NR_CPUS 1587 kernel/cpuset.c .max_write_len = (100U + 6 * NR_CPUS), NR_CPUS 1118 kernel/kexec.c if ((cpu < 0) || (cpu >= NR_CPUS)) NR_CPUS 75 kernel/kgdb.c } kgdb_info[NR_CPUS]; NR_CPUS 124 kernel/kgdb.c static atomic_t passive_cpu_wait[NR_CPUS]; NR_CPUS 125 kernel/kgdb.c static atomic_t cpu_in_kgdb[NR_CPUS]; NR_CPUS 1468 kernel/kgdb.c for (i = 0; i < NR_CPUS; i++) NR_CPUS 1514 kernel/kgdb.c for (i = NR_CPUS-1; i >= 0; i--) NR_CPUS 939 kernel/kprobes.c rp->maxactive = max(10, 2 * NR_CPUS); NR_CPUS 941 kernel/kprobes.c rp->maxactive = NR_CPUS; NR_CPUS 551 kernel/module.c for (i = 0; i < NR_CPUS; i++) NR_CPUS 694 kernel/module.c for (i = 0; i < NR_CPUS; i++) NR_CPUS 799 kernel/relay.c if (cpu >= NR_CPUS || !chan->buf[cpu]) NR_CPUS 3885 kernel/sched.c if (interval > HZ*NR_CPUS/10) NR_CPUS 3886 kernel/sched.c interval = HZ*NR_CPUS/10; NR_CPUS 6905 kernel/sched.c static int __initdata ints[NR_CPUS]; NR_CPUS 6911 kernel/sched.c if (ints[i] < NR_CPUS) NR_CPUS 7318 kernel/sched.c #if NR_CPUS > 128 NR_CPUS 23 kernel/sched_cpupri.h int cpu_to_pri[NR_CPUS]; NR_CPUS 968 kernel/sched_rt.c if (first != NR_CPUS) NR_CPUS 12 kernel/sched_stats.h int mask_len = NR_CPUS/32 * 9; NR_CPUS 225 kernel/smp.c } else if ((unsigned)cpu < NR_CPUS && cpu_online(cpu)) { NR_CPUS 45 kernel/softirq.c irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned; NR_CPUS 115 kernel/stop_machine.c threads = kcalloc(NR_CPUS, sizeof(threads[0]), GFP_KERNEL); NR_CPUS 36 kernel/taskstats.c #define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS) NR_CPUS 304 kernel/time/tick-common.c tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : NR_CPUS 1364 kernel/timer.c static char __cpuinitdata tvec_base_done[NR_CPUS]; NR_CPUS 2085 kernel/trace/trace.c static char mask_str[NR_CPUS + 1]; NR_CPUS 2101 kernel/trace/trace.c count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1); NR_CPUS 132 kernel/trace/trace.h struct trace_array_cpu *data[NR_CPUS]; NR_CPUS 174 kernel/trace/trace.h long last_overrun[NR_CPUS]; NR_CPUS 175 kernel/trace/trace.h long overrun[NR_CPUS]; NR_CPUS 187 kernel/trace/trace.h unsigned long next_idx[NR_CPUS]; NR_CPUS 188 kernel/trace/trace.h struct list_head *next_page[NR_CPUS]; NR_CPUS 189 kernel/trace/trace.h unsigned next_page_idx[NR_CPUS]; NR_CPUS 8 lib/cpumask.c return min_t(int, NR_CPUS, find_first_bit(srcp->bits, NR_CPUS)); NR_CPUS 14 lib/cpumask.c return min_t(int, NR_CPUS, find_next_bit(srcp->bits, NR_CPUS, n+1)); NR_CPUS 18 lib/cpumask.c #if NR_CPUS > 64 NR_CPUS 62 mm/memcontrol.c struct mem_cgroup_stat_cpu cpustat[NR_CPUS]; NR_CPUS 2715 mm/page_alloc.c static struct per_cpu_pageset boot_pageset[NR_CPUS]; NR_CPUS 2863 mm/page_alloc.c for (cpu = 0; cpu < NR_CPUS; cpu++) { NR_CPUS 382 mm/slab.c struct array_cache *array[NR_CPUS]; NR_CPUS 3895 mm/slab.c struct array_cache *new[NR_CPUS]; NR_CPUS 495 mm/swap.c #define ACCT_THRESHOLD max(16, NR_CPUS * 2) NR_CPUS 979 net/bridge/netfilter/ebtables.c if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) / NR_CPUS - NR_CPUS 2426 net/core/neighbour.c for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) { NR_CPUS 2441 net/core/neighbour.c for (cpu = *pos; cpu < NR_CPUS; ++cpu) { NR_CPUS 294 net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) { NR_CPUS 309 net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c for (cpu = *pos; cpu < NR_CPUS; ++cpu) { NR_CPUS 214 net/ipv4/route.c # if NR_CPUS >= 32 NR_CPUS 216 net/ipv4/route.c # elif NR_CPUS >= 16 NR_CPUS 218 net/ipv4/route.c # elif NR_CPUS >= 8 NR_CPUS 220 net/ipv4/route.c # elif NR_CPUS >= 4 NR_CPUS 430 net/ipv4/route.c for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) { NR_CPUS 443 net/ipv4/route.c for (cpu = *pos; cpu < NR_CPUS; ++cpu) { NR_CPUS 100 net/iucv/iucv.c static struct iucv_irq_data *iucv_irq_data[NR_CPUS]; NR_CPUS 280 net/iucv/iucv.c static union iucv_param *iucv_param[NR_CPUS]; NR_CPUS 203 net/netfilter/nf_conntrack_standalone.c for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) { NR_CPUS 218 net/netfilter/nf_conntrack_standalone.c for (cpu = *pos; cpu < NR_CPUS; ++cpu) { NR_CPUS 1209 security/selinux/selinuxfs.c for (cpu = *idx; cpu < NR_CPUS; ++cpu) {