nr_cpu_ids       1707 arch/x86/kernel/apic_64.c 		else if (i < nr_cpu_ids) {
nr_cpu_ids        583 arch/x86/kernel/cpu/mcheck/mce_64.c 	cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
nr_cpu_ids        163 arch/x86/kernel/cpu/proc.c 	if ((*pos) < nr_cpu_ids && cpu_online(*pos))
nr_cpu_ids        226 arch/x86/kernel/genapic_flat_64.c 	if ((unsigned)cpu < nr_cpu_ids)
nr_cpu_ids        169 arch/x86/kernel/genx2apic_uv_x.c 	if ((unsigned)cpu < nr_cpu_ids)
nr_cpu_ids        142 arch/x86/kernel/nmi.c 	prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
nr_cpu_ids        111 arch/x86/kernel/setup_percpu.c 		unsigned long tsize = nr_cpu_ids * sizeof(void *);
nr_cpu_ids        112 arch/x86/kernel/setup_percpu.c 		unsigned long asize = size * (nr_cpu_ids - 1);
nr_cpu_ids        182 arch/x86/kernel/setup_percpu.c 		NR_CPUS, nr_cpu_ids, nr_node_ids);
nr_cpu_ids       1060 arch/x86/kernel/smpboot.c 	if (def_to_bigsmp && nr_cpu_ids > 8) {
nr_cpu_ids       1082 arch/x86/kernel/smpboot.c 		nr_cpu_ids = 8;
nr_cpu_ids       1299 arch/x86/kernel/smpboot.c 	nr_cpu_ids = possible;
nr_cpu_ids       1234 fs/ext4/ext4.h #define EXT4_FREEBLOCKS_WATERMARK (4 * (FBC_BATCH * nr_cpu_ids))
nr_cpu_ids         40 include/linux/backing-dev.h #define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
nr_cpu_ids        146 include/linux/backing-dev.h 	return nr_cpu_ids * BDI_STAT_BATCH;
nr_cpu_ids        412 include/linux/cpumask.h extern int nr_cpu_ids;
nr_cpu_ids        436 include/linux/cpumask.h #define cpus_weight_nr(cpumask)	__cpus_weight(&(cpumask), nr_cpu_ids)
nr_cpu_ids        440 include/linux/cpumask.h 		(cpu) < nr_cpu_ids; )
nr_cpu_ids        393 include/linux/netfilter/x_tables.h 			  + nr_cpu_ids * sizeof(char *))
nr_cpu_ids        373 init/main.c    int nr_cpu_ids __read_mostly = NR_CPUS;
nr_cpu_ids        384 init/main.c    	nr_cpu_ids = highest_cpu + 1;
nr_cpu_ids       4012 kernel/sched.c 			if (ilb < nr_cpu_ids)
nr_cpu_ids       6129 kernel/sched.c 		if (dest_cpu >= nr_cpu_ids)
nr_cpu_ids       6133 kernel/sched.c 		if (dest_cpu >= nr_cpu_ids) {
nr_cpu_ids       7974 kernel/sched.c 	sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
nr_cpu_ids       8123 kernel/sched.c 	alloc_size += 2 * nr_cpu_ids * sizeof(void **);
nr_cpu_ids       8126 kernel/sched.c 	alloc_size += 2 * nr_cpu_ids * sizeof(void **);
nr_cpu_ids       8140 kernel/sched.c 		ptr += nr_cpu_ids * sizeof(void **);
nr_cpu_ids       8143 kernel/sched.c 		ptr += nr_cpu_ids * sizeof(void **);
nr_cpu_ids       8147 kernel/sched.c 		ptr += nr_cpu_ids * sizeof(void **);
nr_cpu_ids       8150 kernel/sched.c 		ptr += nr_cpu_ids * sizeof(void **);
nr_cpu_ids       8155 kernel/sched.c 		ptr += nr_cpu_ids * sizeof(void **);
nr_cpu_ids       8158 kernel/sched.c 		ptr += nr_cpu_ids * sizeof(void **);
nr_cpu_ids       8162 kernel/sched.c 		ptr += nr_cpu_ids * sizeof(void **);
nr_cpu_ids       8165 kernel/sched.c 		ptr += nr_cpu_ids * sizeof(void **);
nr_cpu_ids       8477 kernel/sched.c 	tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
nr_cpu_ids       8480 kernel/sched.c 	tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
nr_cpu_ids       8565 kernel/sched.c 	tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
nr_cpu_ids       8568 kernel/sched.c 	tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
nr_cpu_ids        150 kernel/time/clocksource.c 		if (next_cpu >= nr_cpu_ids)
nr_cpu_ids         21 lib/cpumask.c  	return min_t(int, nr_cpu_ids,
nr_cpu_ids         22 lib/cpumask.c  				find_next_bit(srcp->bits, nr_cpu_ids, n+1));
nr_cpu_ids        187 lib/proportions.c #define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
nr_cpu_ids        239 lib/proportions.c 		if (val < (nr_cpu_ids * PROP_BATCH))
nr_cpu_ids        116 mm/allocpercpu.c 	size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
nr_cpu_ids       1847 mm/slub.c      		min_objects = 4 * (fls(nr_cpu_ids) + 1);
nr_cpu_ids       3022 mm/slub.c      				nr_cpu_ids * sizeof(struct kmem_cache_cpu *);
nr_cpu_ids       3032 mm/slub.c      		nr_cpu_ids, nr_node_ids);
nr_cpu_ids       4076 mm/slub.c      	int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
nr_cpu_ids       1929 mm/vmscan.c    			if (any_online_cpu(*mask) < nr_cpu_ids)
nr_cpu_ids        872 net/bridge/netfilter/ebtables.c 			vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack)));
nr_cpu_ids        985 net/bridge/netfilter/ebtables.c 	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
nr_cpu_ids       1124 net/bridge/netfilter/ebtables.c 	countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids;
nr_cpu_ids       2657 net/core/dev.c 	while (*pos < nr_cpu_ids)
nr_cpu_ids       4642 net/core/dev.c 		for (i = 0; i < nr_cpu_ids; i++)
nr_cpu_ids       4657 net/core/dev.c 		for (i = 0; i < nr_cpu_ids; i++)
nr_cpu_ids       4684 net/core/dev.c 	net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
nr_cpu_ids       1149 net/ipv4/icmp.c 		kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
nr_cpu_ids        808 net/ipv6/icmp.c 		kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
nr_cpu_ids        179 net/sunrpc/svc.c 	unsigned int maxpools = nr_cpu_ids;