cpuid              98 arch/x86/kernel/acpi/cstate.c 	cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
cpuid             206 arch/x86/kernel/alternative.c 		if (!boot_cpu_has(a->cpuid))
cpuid              45 arch/x86/kernel/cpu/addon_cpuid_features.c 		cpuid(cb->level, &regs[CR_EAX], &regs[CR_EBX],
cpuid             439 arch/x86/kernel/cpu/centaur.c 			cpuid(0x80000005, &aa, &bb, &cc, &dd);
cpuid             271 arch/x86/kernel/cpu/common.c 	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
cpuid             272 arch/x86/kernel/cpu/common.c 	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
cpuid             273 arch/x86/kernel/cpu/common.c 	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
cpuid             296 arch/x86/kernel/cpu/common.c 		cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
cpuid             309 arch/x86/kernel/cpu/common.c 	cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
cpuid             348 arch/x86/kernel/cpu/common.c 	cpuid(1, &eax, &ebx, &ecx, &edx);
cpuid             427 arch/x86/kernel/cpu/common.c 	cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
cpuid             436 arch/x86/kernel/cpu/common.c 		cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
cpuid             459 arch/x86/kernel/cpu/common.c 		cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
cpuid              81 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 	struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
cpuid             152 arch/x86/kernel/cpu/cpufreq/longrun.c 	cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
cpuid             219 arch/x86/kernel/cpu/cpufreq/longrun.c 	cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
cpuid             230 arch/x86/kernel/cpu/cpufreq/longrun.c 		cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
cpuid             199 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c 	int cpuid = 0;
cpuid             207 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c 	cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask;
cpuid             208 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c 	switch (cpuid) {
cpuid              50 arch/x86/kernel/cpu/cpufreq/powernow-k7.c 	u32 cpuid;
cpuid             141 arch/x86/kernel/cpu/cpufreq/powernow-k7.c 	cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
cpuid             478 arch/x86/kernel/cpu/cpufreq/powernow-k7.c 				if ((etuple == pst->cpuid) && check_fsb(pst->fsbspeed) &&
cpuid             483 arch/x86/kernel/cpu/cpufreq/powernow-k7.c 						 pst->cpuid, pst->fsbspeed, pst->maxfid, pst->startvid);
cpuid             512 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 		cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
cpuid             518 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 		cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
cpuid             701 arch/x86/kernel/cpu/cpufreq/powernow-k8.c 		if ((psb->cpuid == 0x00000fc0) || (psb->cpuid == 0x00000fe0) ){
cpuid             189 arch/x86/kernel/cpu/cpufreq/powernow-k8.h 	u32 cpuid;
cpuid             200 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c {	.cpu_id		= cpuid,	\
cpuid             211 arch/x86/kernel/cpu/intel_cacheinfo.c 	cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
cpuid             212 arch/x86/kernel/cpu/intel_cacheinfo.c 	cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
cpuid             392 arch/x86/kernel/cpu/intel_cacheinfo.c 			cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
cpuid             661 arch/x86/kernel/cpu/perfctr-watchdog.c 	cpuid(10, &(eax.full), &ebx, &unused, &unused);
cpuid              35 arch/x86/kernel/cpu/transmeta.c 		cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags);
cpuid              46 arch/x86/kernel/cpu/transmeta.c 		cpuid(0x80860002, &new_cpu_rev, &cms_rev1, &cms_rev2, &dummy);
cpuid              59 arch/x86/kernel/cpu/transmeta.c 		cpuid(0x80860003,
cpuid              64 arch/x86/kernel/cpu/transmeta.c 		cpuid(0x80860004,
cpuid              69 arch/x86/kernel/cpu/transmeta.c 		cpuid(0x80860005,
cpuid              74 arch/x86/kernel/cpu/transmeta.c 		cpuid(0x80860006,
cpuid             165 arch/x86/kernel/ipi.c 	int apicid, cpuid;
cpuid             174 arch/x86/kernel/ipi.c 	cpuid = convert_apicid_to_cpu(apicid);
cpuid             176 arch/x86/kernel/ipi.c 	return cpuid >= 0 ? cpuid : 0;
cpuid             307 arch/x86/kernel/paravirt.c 	.cpuid = native_cpuid,
cpuid             219 arch/x86/kernel/process.c 	cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
cpuid             201 arch/x86/kernel/smpboot.c 	int cpuid, phys_id;
cpuid             216 arch/x86/kernel/smpboot.c 	cpuid = smp_processor_id();
cpuid             217 arch/x86/kernel/smpboot.c 	if (cpu_isset(cpuid, cpu_callin_map)) {
cpuid             219 arch/x86/kernel/smpboot.c 					phys_id, cpuid);
cpuid             221 arch/x86/kernel/smpboot.c 	pr_debug("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
cpuid             239 arch/x86/kernel/smpboot.c 		if (cpu_isset(cpuid, cpu_callout_map))
cpuid             246 arch/x86/kernel/smpboot.c 		      __func__, cpuid);
cpuid             262 arch/x86/kernel/smpboot.c 	notify_cpu_starting(cpuid);
cpuid             272 arch/x86/kernel/smpboot.c 	pr_debug("Stack at about %p\n", &cpuid);
cpuid             277 arch/x86/kernel/smpboot.c 	smp_store_cpu_info(cpuid);
cpuid             282 arch/x86/kernel/smpboot.c 	cpu_set(cpuid, cpu_callin_map);
cpuid              63 arch/x86/kernel/vmi_32.c 	void (*cpuid)(void /* non-c */);
cpuid             175 arch/x86/kernel/vmi_32.c                       : "0" (*ax), "2" (*cx), "r" (vmi_ops.cpuid));
cpuid             779 arch/x86/kernel/vmi_32.c 	para_wrap(pv_cpu_ops.cpuid, vmi_cpuid, cpuid, CPUID);
cpuid             260 arch/x86/kvm/svm.c 	cpuid(0x80000000, &eax, &ebx, &ecx, &edx);
cpuid             266 arch/x86/kvm/svm.c 	cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
cpuid             948 arch/x86/kvm/x86.c 		struct kvm_cpuid2 cpuid;
cpuid             951 arch/x86/kvm/x86.c 		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
cpuid             953 arch/x86/kvm/x86.c 		r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
cpuid             959 arch/x86/kvm/x86.c 		if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
cpuid            1019 arch/x86/kvm/x86.c 	if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
cpuid            1022 arch/x86/kvm/x86.c 	cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
cpuid            1027 arch/x86/kvm/x86.c 			   cpuid->nent * sizeof(struct kvm_cpuid_entry)))
cpuid            1029 arch/x86/kvm/x86.c 	for (i = 0; i < cpuid->nent; i++) {
cpuid            1041 arch/x86/kvm/x86.c 	vcpu->arch.cpuid_nent = cpuid->nent;
cpuid            1058 arch/x86/kvm/x86.c 	if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
cpuid            1062 arch/x86/kvm/x86.c 			   cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
cpuid            1064 arch/x86/kvm/x86.c 	vcpu->arch.cpuid_nent = cpuid->nent;
cpuid            1078 arch/x86/kvm/x86.c 	if (cpuid->nent < vcpu->arch.cpuid_nent)
cpuid            1087 arch/x86/kvm/x86.c 	cpuid->nent = vcpu->arch.cpuid_nent;
cpuid            1219 arch/x86/kvm/x86.c 	if (cpuid->nent < 1)
cpuid            1222 arch/x86/kvm/x86.c 	cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
cpuid            1226 arch/x86/kvm/x86.c 	do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
cpuid            1228 arch/x86/kvm/x86.c 	for (func = 1; func <= limit && nent < cpuid->nent; ++func)
cpuid            1230 arch/x86/kvm/x86.c 				&nent, cpuid->nent);
cpuid            1232 arch/x86/kvm/x86.c 	if (nent >= cpuid->nent)
cpuid            1235 arch/x86/kvm/x86.c 	do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
cpuid            1237 arch/x86/kvm/x86.c 	for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
cpuid            1239 arch/x86/kvm/x86.c 			       &nent, cpuid->nent);
cpuid            1244 arch/x86/kvm/x86.c 	cpuid->nent = nent;
cpuid            1347 arch/x86/kvm/x86.c 		struct kvm_cpuid cpuid;
cpuid            1350 arch/x86/kvm/x86.c 		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
cpuid            1352 arch/x86/kvm/x86.c 		r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
cpuid            1359 arch/x86/kvm/x86.c 		struct kvm_cpuid2 cpuid;
cpuid            1362 arch/x86/kvm/x86.c 		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
cpuid            1364 arch/x86/kvm/x86.c 		r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
cpuid            1372 arch/x86/kvm/x86.c 		struct kvm_cpuid2 cpuid;
cpuid            1375 arch/x86/kvm/x86.c 		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
cpuid            1377 arch/x86/kvm/x86.c 		r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
cpuid            1382 arch/x86/kvm/x86.c 		if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
cpuid             990 arch/x86/lguest/boot.c 	pv_cpu_ops.cpuid = lguest_cpuid;
cpuid             326 arch/x86/mach-voyager/voyager_smp.c 		__u8 cpuid = hard_smp_processor_id();
cpuid             332 arch/x86/mach-voyager/voyager_smp.c 			if (((cpuid >> 2) & 0x03) == i)
cpuid             423 arch/x86/mach-voyager/voyager_smp.c 	__u8 cpuid = hard_smp_processor_id();
cpuid             442 arch/x86/mach-voyager/voyager_smp.c 		    voyager_quad_cpi_addr[cpuid]->qic_cpi[VIC_CPU_BOOT_CPI].cpi;
cpuid             449 arch/x86/mach-voyager/voyager_smp.c 	VDEBUG(("VOYAGER SMP: CPU%d, stack at about %p\n", cpuid, &cpuid));
cpuid             451 arch/x86/mach-voyager/voyager_smp.c 	notify_cpu_starting(cpuid);
cpuid             460 arch/x86/mach-voyager/voyager_smp.c 	smp_store_cpu_info(cpuid);
cpuid             470 arch/x86/mach-voyager/voyager_smp.c 	cpu_set(cpuid, cpu_callin_map);
cpuid             475 arch/x86/mach-voyager/voyager_smp.c 	while (!cpu_isset(cpuid, smp_commenced_mask))
cpuid             481 arch/x86/mach-voyager/voyager_smp.c 	cpu_set(cpuid, cpu_online_map);
cpuid             601 arch/x86/mm/init_32.c 		cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
cpuid            1202 arch/x86/xen/enlighten.c 	.cpuid = xen_cpuid,
cpuid             199 include/asm-m68k/bootinfo.h 	unsigned long cpuid;
cpuid             319 include/asm-m68k/bootinfo.h 	unsigned long cpuid;
cpuid              78 include/asm-parisc/processor.h 		unsigned long cpuid;
cpuid              94 include/asm-parisc/processor.h 	unsigned long cpuid;        /* aka slot_number or set to NO_PROC_ID */
cpuid              47 include/asm-x86/alternative.h 	u8  cpuid;		/* cpuid bit set for replacement */
cpuid             128 include/asm-x86/kvm_para.h 	cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
cpuid             138 include/asm-x86/paravirt.h 	void (*cpuid)(unsigned int *eax, unsigned int *ebx,
cpuid             630 include/asm-x86/paravirt.h 	PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
cpuid             638 include/asm-x86/processor.h 	cpuid(op, &eax, &ebx, &ecx, &edx);
cpuid             647 include/asm-x86/processor.h 	cpuid(op, &eax, &ebx, &ecx, &edx);
cpuid             656 include/asm-x86/processor.h 	cpuid(op, &eax, &ebx, &ecx, &edx);
cpuid             665 include/asm-x86/processor.h 	cpuid(op, &eax, &ebx, &ecx, &edx);
cpuid             169 include/asm-x86/xen/interface.h #define XEN_CPUID          XEN_EMULATE_PREFIX cpuid
cpuid             130 include/linux/smp.h 	WARN_ON(cpuid != 0);	\