cpus_weight 609 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c if (bios_with_sw_any_bug && cpus_weight(policy->cpus) == 1) { cpus_weight 18 arch/x86/kernel/cpu/proc.c cpus_weight(per_cpu(cpu_core_map, cpu))); cpus_weight 60 arch/x86/kernel/cpu/proc.c cpus_weight(per_cpu(cpu_core_map, cpu))); cpus_weight 1620 arch/x86/kernel/io_apic_64.c cfg->move_cleanup_count = cpus_weight(cleanup_mask); cpus_weight 1745 arch/x86/kernel/io_apic_64.c cfg->move_cleanup_count = cpus_weight(cleanup_mask); cpus_weight 2452 arch/x86/kernel/io_apic_64.c cfg->move_cleanup_count = cpus_weight(cleanup_mask); cpus_weight 486 arch/x86/kernel/smpboot.c if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) { cpus_weight 1314 arch/x86/kernel/smpboot.c if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) cpus_weight 440 arch/x86/mm/mmio-mod.c if (cpus_weight(downed_cpus) == 0) cpus_weight 86 include/asm-m32r/smp.h return cpus_weight(cpu_callout_map); cpus_weight 155 include/asm-x86/es7000/apic.h num_bits_set = cpus_weight(cpumask); cpus_weight 152 include/asm-x86/smp.h return cpus_weight(cpu_callout_map); cpus_weight 148 include/asm-x86/summit/apic.h num_bits_set = cpus_weight(cpumask); cpus_weight 429 include/linux/cpumask.h #define cpus_weight_nr(cpumask) cpus_weight(cpumask) cpus_weight 44 include/linux/topology.h cpus_weight(*__tmp__); \ cpus_weight 1004 kernel/cpuset.c fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */ cpus_weight 1527 kernel/sched.c rq_weight = cpus_weight(sd->span) * NICE_0_LOAD; cpus_weight 2181 kernel/sched.c weight = cpus_weight(span); cpus_weight 2183 kernel/sched.c if (weight <= cpus_weight(tmp->span)) cpus_weight 3827 kernel/sched.c if (cpus_weight(nohz.cpu_mask) == num_online_cpus()) { cpus_weight 4022 kernel/sched.c cpus_weight(nohz.cpu_mask) == num_online_cpus()) { cpus_weight 5970 kernel/sched.c p->rt.nr_cpus_allowed = cpus_weight(*new_mask); cpus_weight 6691 kernel/sched.c if (!cpus_weight(group->cpumask)) { cpus_weight 6755 kernel/sched.c if (cpus_weight(sd->span) == 1) cpus_weight 7431 kernel/sched.c if (cpus_weight(*cpu_map) > cpus_weight 7432 kernel/sched.c SD_NODES_PER_DOMAIN*cpus_weight(*nodemask)) { cpus_weight 244 kernel/sched_rt.c weight = cpus_weight(rd->span); cpus_weight 1311 kernel/sched_rt.c int weight = cpus_weight(*new_mask); cpus_weight 338 kernel/smp.c num_cpus = cpus_weight(mask); cpus_weight 4611 net/core/dev.c n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask)) cpus_weight 4613 net/core/dev.c cpus_weight(net_dma->channel_mask)) ? 1 : 0));