kvm 71 arch/x86/kvm/i8254.c &kvm->arch.vpit->pit_state.channels[channel]; kvm 73 arch/x86/kvm/i8254.c WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); kvm 96 arch/x86/kvm/i8254.c WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); kvm 98 arch/x86/kvm/i8254.c return kvm->arch.vpit->pit_state.channels[channel].gate; kvm 104 arch/x86/kvm/i8254.c &kvm->arch.vpit->pit_state.channels[channel]; kvm 108 arch/x86/kvm/i8254.c WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); kvm 134 arch/x86/kvm/i8254.c &kvm->arch.vpit->pit_state.channels[channel]; kvm 138 arch/x86/kvm/i8254.c WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); kvm 169 arch/x86/kvm/i8254.c &kvm->arch.vpit->pit_state.channels[channel]; kvm 171 arch/x86/kvm/i8254.c WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); kvm 174 arch/x86/kvm/i8254.c c->latched_count = pit_get_count(kvm, channel); kvm 182 arch/x86/kvm/i8254.c &kvm->arch.vpit->pit_state.channels[channel]; kvm 184 arch/x86/kvm/i8254.c WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); kvm 188 arch/x86/kvm/i8254.c c->status = ((pit_get_out(kvm, channel) << 7) | kvm 198 arch/x86/kvm/i8254.c struct kvm_vcpu *vcpu0 = ps->pit->kvm->vcpus[0]; kvm 216 arch/x86/kvm/i8254.c struct kvm_pit *pit = vcpu->kvm->arch.vpit; kvm 241 arch/x86/kvm/i8254.c struct kvm_pit *pit = vcpu->kvm->arch.vpit; kvm 278 arch/x86/kvm/i8254.c struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; kvm 318 arch/x86/kvm/i8254.c mutex_lock(&kvm->arch.vpit->pit_state.lock); kvm 319 arch/x86/kvm/i8254.c pit_load_count(kvm, channel, val); kvm 320 arch/x86/kvm/i8254.c mutex_unlock(&kvm->arch.vpit->pit_state.lock); kvm 328 arch/x86/kvm/i8254.c struct kvm *kvm = pit->kvm; kvm 350 arch/x86/kvm/i8254.c pit_latch_count(kvm, channel); kvm 352 arch/x86/kvm/i8254.c pit_latch_status(kvm, channel); kvm 360 arch/x86/kvm/i8254.c pit_latch_count(kvm, channel); kvm 377 arch/x86/kvm/i8254.c pit_load_count(kvm, addr, val); kvm 380 arch/x86/kvm/i8254.c pit_load_count(kvm, addr, val << 8); kvm 387 arch/x86/kvm/i8254.c pit_load_count(kvm, addr, s->write_latch | (val << 8)); kvm 401 arch/x86/kvm/i8254.c struct kvm *kvm = pit->kvm; kvm 433 arch/x86/kvm/i8254.c count = pit_get_count(kvm, addr); kvm 437 arch/x86/kvm/i8254.c count = pit_get_count(kvm, addr); kvm 441 arch/x86/kvm/i8254.c count = pit_get_count(kvm, addr); kvm 446 arch/x86/kvm/i8254.c count = pit_get_count(kvm, addr); kvm 472 arch/x86/kvm/i8254.c struct kvm *kvm = pit->kvm; kvm 477 arch/x86/kvm/i8254.c pit_set_gate(kvm, 2, val & 1); kvm 486 arch/x86/kvm/i8254.c struct kvm *kvm = pit->kvm; kvm 494 arch/x86/kvm/i8254.c ret = ((pit_state->speaker_data_on << 1) | pit_get_gate(kvm, 2) | kvm 495 arch/x86/kvm/i8254.c (pit_get_out(kvm, 2) << 5) | (refresh_clock << 4)); kvm 518 arch/x86/kvm/i8254.c pit_load_count(pit->kvm, i, 0); kvm 543 arch/x86/kvm/i8254.c kvm_io_bus_register_dev(&kvm->pio_bus, &pit->dev); kvm 549 arch/x86/kvm/i8254.c kvm_io_bus_register_dev(&kvm->pio_bus, &pit->speaker_dev); kvm 551 arch/x86/kvm/i8254.c kvm->arch.vpit = pit; kvm 552 arch/x86/kvm/i8254.c pit->kvm = kvm; kvm 569 arch/x86/kvm/i8254.c if (kvm->arch.vpit) { kvm 570 arch/x86/kvm/i8254.c mutex_lock(&kvm->arch.vpit->pit_state.lock); kvm 571 arch/x86/kvm/i8254.c timer = &kvm->arch.vpit->pit_state.pit_timer.timer; kvm 573 arch/x86/kvm/i8254.c mutex_unlock(&kvm->arch.vpit->pit_state.lock); kvm 574 arch/x86/kvm/i8254.c kfree(kvm->arch.vpit); kvm 580 arch/x86/kvm/i8254.c mutex_lock(&kvm->lock); kvm 581 arch/x86/kvm/i8254.c kvm_ioapic_set_irq(kvm->arch.vioapic, 0, 1); kvm 582 arch/x86/kvm/i8254.c kvm_ioapic_set_irq(kvm->arch.vioapic, 0, 0); kvm 583 arch/x86/kvm/i8254.c kvm_pic_set_irq(pic_irqchip(kvm), 0, 1); kvm 584 arch/x86/kvm/i8254.c kvm_pic_set_irq(pic_irqchip(kvm), 0, 0); kvm 585 arch/x86/kvm/i8254.c mutex_unlock(&kvm->lock); kvm 590 arch/x86/kvm/i8254.c struct kvm_pit *pit = vcpu->kvm->arch.vpit; kvm 591 arch/x86/kvm/i8254.c struct kvm *kvm = vcpu->kvm; kvm 605 arch/x86/kvm/i8254.c __inject_pit_timer_intr(kvm); kvm 613 arch/x86/kvm/i8254.c struct kvm_arch *arch = &vcpu->kvm->arch; kvm 45 arch/x86/kvm/i8254.h struct kvm *kvm; kvm 423 arch/x86/kvm/i8259.c struct kvm *kvm = opaque; kvm 424 arch/x86/kvm/i8259.c struct kvm_vcpu *vcpu = kvm->vcpus[0]; kvm 426 arch/x86/kvm/i8259.c pic_irqchip(kvm)->output = level; kvm 440 arch/x86/kvm/i8259.c s->irq_request_opaque = kvm; kvm 451 arch/x86/kvm/i8259.c kvm_io_bus_register_dev(&kvm->pio_bus, &s->dev); kvm 53 arch/x86/kvm/irq.c s = pic_irqchip(v->kvm); /* PIC */ kvm 73 arch/x86/kvm/irq.c s = pic_irqchip(v->kvm); kvm 35 arch/x86/kvm/irq.h struct kvm; kvm 75 arch/x86/kvm/irq.h return kvm->arch.vpic; kvm 80 arch/x86/kvm/irq.h return pic_irqchip(kvm) != NULL; kvm 406 arch/x86/kvm/lapic.c last = kvm->arch.round_robin_prev_vcpu; kvm 412 arch/x86/kvm/lapic.c if (kvm->vcpus[next] == NULL || !test_bit(next, &bitmap)) kvm 414 arch/x86/kvm/lapic.c apic = kvm->vcpus[next]->arch.apic; kvm 419 arch/x86/kvm/lapic.c kvm->arch.round_robin_prev_vcpu = next; kvm 432 arch/x86/kvm/lapic.c apic = kvm_apic_round_robin(kvm, vector, bitmap); kvm 453 arch/x86/kvm/lapic.c kvm_ioapic_update_eoi(apic->vcpu->kvm, vector); kvm 481 arch/x86/kvm/lapic.c vcpu = apic->vcpu->kvm->vcpus[i]; kvm 496 arch/x86/kvm/lapic.c target = kvm_get_lowest_prio_vcpu(vcpu->kvm, vector, lpr_map); kvm 1135 arch/x86/kvm/lapic.c if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr) kvm 1152 arch/x86/kvm/lapic.c if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr) kvm 1172 arch/x86/kvm/lapic.c if (!irqchip_in_kernel(vcpu->kvm)) kvm 378 arch/x86/kvm/mmu.c write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn)); kvm 386 arch/x86/kvm/mmu.c write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn)); kvm 393 arch/x86/kvm/mmu.c struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); kvm 409 arch/x86/kvm/mmu.c addr = gfn_to_hva(kvm, gfn); kvm 424 arch/x86/kvm/mmu.c if (has_wrprotected_page(vcpu->kvm, large_gfn)) kvm 427 arch/x86/kvm/mmu.c if (!host_largepage_backed(vcpu->kvm, large_gfn)) kvm 430 arch/x86/kvm/mmu.c slot = gfn_to_memslot(vcpu->kvm, large_gfn); kvm 447 arch/x86/kvm/mmu.c slot = gfn_to_memslot(kvm, gfn); kvm 475 arch/x86/kvm/mmu.c gfn = unalias_gfn(vcpu->kvm, gfn); kvm 478 arch/x86/kvm/mmu.c rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage); kvm 545 arch/x86/kvm/mmu.c rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte)); kvm 610 arch/x86/kvm/mmu.c gfn = unalias_gfn(kvm, gfn); kvm 611 arch/x86/kvm/mmu.c rmapp = gfn_to_rmap(kvm, gfn, 0); kvm 613 arch/x86/kvm/mmu.c spte = rmap_next(kvm, rmapp, NULL); kvm 622 arch/x86/kvm/mmu.c spte = rmap_next(kvm, rmapp, spte); kvm 627 arch/x86/kvm/mmu.c spte = rmap_next(kvm, rmapp, NULL); kvm 633 arch/x86/kvm/mmu.c rmapp = gfn_to_rmap(kvm, gfn, 1); kvm 634 arch/x86/kvm/mmu.c spte = rmap_next(kvm, rmapp, NULL); kvm 641 arch/x86/kvm/mmu.c rmap_remove(kvm, spte); kvm 642 arch/x86/kvm/mmu.c --kvm->stat.lpages; kvm 647 arch/x86/kvm/mmu.c spte = rmap_next(kvm, rmapp, spte); kvm 651 arch/x86/kvm/mmu.c kvm_flush_remote_tlbs(kvm); kvm 653 arch/x86/kvm/mmu.c account_shadowed(kvm, gfn); kvm 661 arch/x86/kvm/mmu.c while ((spte = rmap_next(kvm, rmapp, NULL))) { kvm 664 arch/x86/kvm/mmu.c rmap_remove(kvm, spte); kvm 681 arch/x86/kvm/mmu.c for (i = 0; i < kvm->nmemslots; i++) { kvm 682 arch/x86/kvm/mmu.c struct kvm_memory_slot *memslot = &kvm->memslots[i]; kvm 693 arch/x86/kvm/mmu.c retval |= handler(kvm, &memslot->rmap[gfn_offset]); kvm 694 arch/x86/kvm/mmu.c retval |= handler(kvm, kvm 706 arch/x86/kvm/mmu.c return kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); kvm 718 arch/x86/kvm/mmu.c spte = rmap_next(kvm, rmapp, NULL); kvm 728 arch/x86/kvm/mmu.c spte = rmap_next(kvm, rmapp, spte); kvm 735 arch/x86/kvm/mmu.c return kvm_handle_hva(kvm, hva, kvm_age_rmapp); kvm 761 arch/x86/kvm/mmu.c ++kvm->arch.n_free_mmu_pages; kvm 778 arch/x86/kvm/mmu.c list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); kvm 783 arch/x86/kvm/mmu.c --vcpu->kvm->arch.n_free_mmu_pages; kvm 880 arch/x86/kvm/mmu.c bucket = &kvm->arch.mmu_page_hash[index]; kvm 919 arch/x86/kvm/mmu.c bucket = &vcpu->kvm->arch.mmu_page_hash[index]; kvm 926 arch/x86/kvm/mmu.c ++vcpu->kvm->stat.mmu_cache_miss; kvm 935 arch/x86/kvm/mmu.c rmap_write_protect(vcpu->kvm, gfn); kvm 955 arch/x86/kvm/mmu.c rmap_remove(kvm, &pt[i]); kvm 958 arch/x86/kvm/mmu.c kvm_flush_remote_tlbs(kvm); kvm 971 arch/x86/kvm/mmu.c --kvm->stat.lpages; kvm 972 arch/x86/kvm/mmu.c rmap_remove(kvm, &pt[i]); kvm 977 arch/x86/kvm/mmu.c kvm_flush_remote_tlbs(kvm); kvm 990 arch/x86/kvm/mmu.c if (kvm->vcpus[i]) kvm 991 arch/x86/kvm/mmu.c kvm->vcpus[i]->arch.last_pte_updated = NULL; kvm 998 arch/x86/kvm/mmu.c ++kvm->stat.mmu_shadow_zapped; kvm 1013 arch/x86/kvm/mmu.c kvm_mmu_page_unlink_children(kvm, sp); kvm 1016 arch/x86/kvm/mmu.c unaccount_shadowed(kvm, sp->gfn); kvm 1018 arch/x86/kvm/mmu.c kvm_mmu_free_page(kvm, sp); kvm 1021 arch/x86/kvm/mmu.c list_move(&sp->link, &kvm->arch.active_mmu_pages); kvm 1023 arch/x86/kvm/mmu.c kvm_reload_remote_mmus(kvm); kvm 1025 arch/x86/kvm/mmu.c unaccount_shadowed(kvm, sp->gfn); kvm 1027 arch/x86/kvm/mmu.c kvm_mmu_reset_last_pte_updated(kvm); kvm 1042 arch/x86/kvm/mmu.c if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) > kvm 1044 arch/x86/kvm/mmu.c int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages kvm 1045 arch/x86/kvm/mmu.c - kvm->arch.n_free_mmu_pages; kvm 1050 arch/x86/kvm/mmu.c page = container_of(kvm->arch.active_mmu_pages.prev, kvm 1052 arch/x86/kvm/mmu.c kvm_mmu_zap_page(kvm, page); kvm 1055 arch/x86/kvm/mmu.c kvm->arch.n_free_mmu_pages = 0; kvm 1058 arch/x86/kvm/mmu.c kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages kvm 1059 arch/x86/kvm/mmu.c - kvm->arch.n_alloc_mmu_pages; kvm 1061 arch/x86/kvm/mmu.c kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages; kvm 1075 arch/x86/kvm/mmu.c bucket = &kvm->arch.mmu_page_hash[index]; kvm 1080 arch/x86/kvm/mmu.c kvm_mmu_zap_page(kvm, sp); kvm 1090 arch/x86/kvm/mmu.c while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) { kvm 1092 arch/x86/kvm/mmu.c kvm_mmu_zap_page(kvm, sp); kvm 1098 arch/x86/kvm/mmu.c int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn)); kvm 1114 arch/x86/kvm/mmu.c page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); kvm 1149 arch/x86/kvm/mmu.c rmap_remove(vcpu->kvm, shadow_pte); kvm 1185 arch/x86/kvm/mmu.c shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn); kvm 1187 arch/x86/kvm/mmu.c (largepage && has_wrprotected_page(vcpu->kvm, gfn))) { kvm 1201 arch/x86/kvm/mmu.c mark_page_dirty(vcpu->kvm, gfn); kvm 1210 arch/x86/kvm/mmu.c ++vcpu->kvm->stat.lpages; kvm 1212 arch/x86/kvm/mmu.c page_header_update_slot(vcpu->kvm, shadow_pte, gfn); kvm 1296 arch/x86/kvm/mmu.c mmu_seq = vcpu->kvm->mmu_notifier_seq; kvm 1298 arch/x86/kvm/mmu.c pfn = gfn_to_pfn(vcpu->kvm, gfn); kvm 1307 arch/x86/kvm/mmu.c spin_lock(&vcpu->kvm->mmu_lock); kvm 1313 arch/x86/kvm/mmu.c spin_unlock(&vcpu->kvm->mmu_lock); kvm 1319 arch/x86/kvm/mmu.c spin_unlock(&vcpu->kvm->mmu_lock); kvm 1332 arch/x86/kvm/mmu.c spin_lock(&vcpu->kvm->mmu_lock); kvm 1339 arch/x86/kvm/mmu.c kvm_mmu_zap_page(vcpu->kvm, sp); kvm 1341 arch/x86/kvm/mmu.c spin_unlock(&vcpu->kvm->mmu_lock); kvm 1352 arch/x86/kvm/mmu.c kvm_mmu_zap_page(vcpu->kvm, sp); kvm 1356 arch/x86/kvm/mmu.c spin_unlock(&vcpu->kvm->mmu_lock); kvm 1454 arch/x86/kvm/mmu.c mmu_seq = vcpu->kvm->mmu_notifier_seq; kvm 1456 arch/x86/kvm/mmu.c pfn = gfn_to_pfn(vcpu->kvm, gfn); kvm 1462 arch/x86/kvm/mmu.c spin_lock(&vcpu->kvm->mmu_lock); kvm 1468 arch/x86/kvm/mmu.c spin_unlock(&vcpu->kvm->mmu_lock); kvm 1473 arch/x86/kvm/mmu.c spin_unlock(&vcpu->kvm->mmu_lock); kvm 1647 arch/x86/kvm/mmu.c spin_lock(&vcpu->kvm->mmu_lock); kvm 1650 arch/x86/kvm/mmu.c spin_unlock(&vcpu->kvm->mmu_lock); kvm 1674 arch/x86/kvm/mmu.c rmap_remove(vcpu->kvm, spte); kvm 1682 arch/x86/kvm/mmu.c --vcpu->kvm->stat.lpages; kvm 1693 arch/x86/kvm/mmu.c ++vcpu->kvm->stat.mmu_pde_zapped; kvm 1698 arch/x86/kvm/mmu.c ++vcpu->kvm->stat.mmu_pte_updated; kvm 1721 arch/x86/kvm/mmu.c kvm_flush_remote_tlbs(vcpu->kvm); kvm 1755 arch/x86/kvm/mmu.c r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8); kvm 1775 arch/x86/kvm/mmu.c vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq; kvm 1777 arch/x86/kvm/mmu.c pfn = gfn_to_pfn(vcpu->kvm, gfn); kvm 1822 arch/x86/kvm/mmu.c spin_lock(&vcpu->kvm->mmu_lock); kvm 1825 arch/x86/kvm/mmu.c ++vcpu->kvm->stat.mmu_pte_write; kvm 1838 arch/x86/kvm/mmu.c bucket = &vcpu->kvm->arch.mmu_page_hash[index]; kvm 1858 arch/x86/kvm/mmu.c kvm_mmu_zap_page(vcpu->kvm, sp); kvm 1859 arch/x86/kvm/mmu.c ++vcpu->kvm->stat.mmu_flooded; kvm 1885 arch/x86/kvm/mmu.c r = kvm_read_guest_atomic(vcpu->kvm, kvm 1902 arch/x86/kvm/mmu.c spin_unlock(&vcpu->kvm->mmu_lock); kvm 1916 arch/x86/kvm/mmu.c spin_lock(&vcpu->kvm->mmu_lock); kvm 1917 arch/x86/kvm/mmu.c r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); kvm 1918 arch/x86/kvm/mmu.c spin_unlock(&vcpu->kvm->mmu_lock); kvm 1925 arch/x86/kvm/mmu.c while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) { kvm 1928 arch/x86/kvm/mmu.c sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev, kvm 1930 arch/x86/kvm/mmu.c kvm_mmu_zap_page(vcpu->kvm, sp); kvm 1931 arch/x86/kvm/mmu.c ++vcpu->kvm->stat.mmu_recycled; kvm 1988 arch/x86/kvm/mmu.c while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) { kvm 1989 arch/x86/kvm/mmu.c sp = container_of(vcpu->kvm->arch.active_mmu_pages.next, kvm 1991 arch/x86/kvm/mmu.c kvm_mmu_zap_page(vcpu->kvm, sp); kvm 2004 arch/x86/kvm/mmu.c if (vcpu->kvm->arch.n_requested_mmu_pages) kvm 2005 arch/x86/kvm/mmu.c vcpu->kvm->arch.n_free_mmu_pages = kvm 2006 arch/x86/kvm/mmu.c vcpu->kvm->arch.n_requested_mmu_pages; kvm 2008 arch/x86/kvm/mmu.c vcpu->kvm->arch.n_free_mmu_pages = kvm 2009 arch/x86/kvm/mmu.c vcpu->kvm->arch.n_alloc_mmu_pages; kvm 2058 arch/x86/kvm/mmu.c list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) { kvm 2077 arch/x86/kvm/mmu.c spin_lock(&kvm->mmu_lock); kvm 2078 arch/x86/kvm/mmu.c list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) kvm 2079 arch/x86/kvm/mmu.c kvm_mmu_zap_page(kvm, sp); kvm 2080 arch/x86/kvm/mmu.c spin_unlock(&kvm->mmu_lock); kvm 2082 arch/x86/kvm/mmu.c kvm_flush_remote_tlbs(kvm); kvm 2089 arch/x86/kvm/mmu.c page = container_of(kvm->arch.active_mmu_pages.prev, kvm 2091 arch/x86/kvm/mmu.c kvm_mmu_zap_page(kvm, page); kvm 2096 arch/x86/kvm/mmu.c struct kvm *kvm; kvm 2097 arch/x86/kvm/mmu.c struct kvm *kvm_freed = NULL; kvm 2102 arch/x86/kvm/mmu.c list_for_each_entry(kvm, &vm_list, vm_list) { kvm 2105 arch/x86/kvm/mmu.c if (!down_read_trylock(&kvm->slots_lock)) kvm 2107 arch/x86/kvm/mmu.c spin_lock(&kvm->mmu_lock); kvm 2108 arch/x86/kvm/mmu.c npages = kvm->arch.n_alloc_mmu_pages - kvm 2109 arch/x86/kvm/mmu.c kvm->arch.n_free_mmu_pages; kvm 2112 arch/x86/kvm/mmu.c kvm_mmu_remove_one_alloc_mmu_page(kvm); kvm 2114 arch/x86/kvm/mmu.c kvm_freed = kvm; kvm 2118 arch/x86/kvm/mmu.c spin_unlock(&kvm->mmu_lock); kvm 2119 arch/x86/kvm/mmu.c up_read(&kvm->slots_lock); kvm 2187 arch/x86/kvm/mmu.c for (i = 0; i < kvm->nmemslots; i++) kvm 2188 arch/x86/kvm/mmu.c nr_pages += kvm->memslots[i].npages; kvm 2246 arch/x86/kvm/mmu.c spin_lock(&vcpu->kvm->mmu_lock); kvm 2247 arch/x86/kvm/mmu.c mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT); kvm 2248 arch/x86/kvm/mmu.c spin_unlock(&vcpu->kvm->mmu_lock); kvm 2300 arch/x86/kvm/mmu.c r = kvm_read_guest(vcpu->kvm, addr, buffer.buf, buffer.len); kvm 2394 arch/x86/kvm/mmu.c struct kvm_memory_slot *m = &vcpu->kvm->memslots[i]; kvm 2426 arch/x86/kvm/mmu.c list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) { kvm 2462 arch/x86/kvm/mmu.c list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) { kvm 2466 arch/x86/kvm/mmu.c slot = gfn_to_memslot(vcpu->kvm, sp->gfn); kvm 2467 arch/x86/kvm/mmu.c gfn = unalias_gfn(vcpu->kvm, sp->gfn); kvm 42 arch/x86/kvm/mmu.h if (unlikely(vcpu->kvm->arch.n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES)) kvm 95 arch/x86/kvm/paging_tmpl.h page = gfn_to_page(kvm, table_gfn); kvm 161 arch/x86/kvm/paging_tmpl.h kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte)); kvm 179 arch/x86/kvm/paging_tmpl.h mark_page_dirty(vcpu->kvm, table_gfn); kvm 180 arch/x86/kvm/paging_tmpl.h if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, kvm 212 arch/x86/kvm/paging_tmpl.h mark_page_dirty(vcpu->kvm, table_gfn); kvm 213 arch/x86/kvm/paging_tmpl.h ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte, kvm 319 arch/x86/kvm/paging_tmpl.h rmap_remove(vcpu->kvm, shadow_ent); kvm 337 arch/x86/kvm/paging_tmpl.h r = kvm_read_guest_atomic(vcpu->kvm, kvm 419 arch/x86/kvm/paging_tmpl.h mmu_seq = vcpu->kvm->mmu_notifier_seq; kvm 421 arch/x86/kvm/paging_tmpl.h pfn = gfn_to_pfn(vcpu->kvm, walker.gfn); kvm 431 arch/x86/kvm/paging_tmpl.h spin_lock(&vcpu->kvm->mmu_lock); kvm 446 arch/x86/kvm/paging_tmpl.h spin_unlock(&vcpu->kvm->mmu_lock); kvm 451 arch/x86/kvm/paging_tmpl.h spin_unlock(&vcpu->kvm->mmu_lock); kvm 492 arch/x86/kvm/paging_tmpl.h r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt); kvm 639 arch/x86/kvm/svm.c err = kvm_vcpu_init(&svm->vcpu, kvm, id); kvm 1013 arch/x86/kvm/svm.c struct kvm *kvm = svm->vcpu.kvm; kvm 1018 arch/x86/kvm/svm.c if (!irqchip_in_kernel(kvm) && kvm 1197 arch/x86/kvm/svm.c if (irqchip_in_kernel(svm->vcpu.kvm)) kvm 1561 arch/x86/kvm/svm.c if (!irqchip_in_kernel(vcpu->kvm) || vcpu->arch.apic->vapic_addr) kvm 1618 arch/x86/kvm/svm.c && !irqchip_in_kernel(svm->vcpu.kvm)) { kvm 1711 arch/x86/kvm/svm.c if (!irqchip_in_kernel(vcpu->kvm)) kvm 214 arch/x86/kvm/vmx.c return ((cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm))); kvm 259 arch/x86/kvm/vmx.c (irqchip_in_kernel(kvm))); kvm 1323 arch/x86/kvm/vmx.c if (!kvm->arch.tss_addr) { kvm 1324 arch/x86/kvm/vmx.c gfn_t base_gfn = kvm->memslots[0].base_gfn + kvm 1325 arch/x86/kvm/vmx.c kvm->memslots[0].npages - 3; kvm 1328 arch/x86/kvm/vmx.c return kvm->arch.tss_addr; kvm 1352 arch/x86/kvm/vmx.c vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm)); kvm 1386 arch/x86/kvm/vmx.c init_rmode(vcpu->kvm); kvm 1720 arch/x86/kvm/vmx.c gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT; kvm 1725 arch/x86/kvm/vmx.c r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); kvm 1729 arch/x86/kvm/vmx.c r = kvm_write_guest_page(kvm, fn++, &data, 0x66, sizeof(u16)); kvm 1732 arch/x86/kvm/vmx.c r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE); kvm 1735 arch/x86/kvm/vmx.c r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); kvm 1739 arch/x86/kvm/vmx.c r = kvm_write_guest_page(kvm, fn, &data, kvm 1758 arch/x86/kvm/vmx.c if (unlikely(!kvm->arch.ept_identity_pagetable)) { kvm 1763 arch/x86/kvm/vmx.c if (likely(kvm->arch.ept_identity_pagetable_done)) kvm 1767 arch/x86/kvm/vmx.c r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE); kvm 1774 arch/x86/kvm/vmx.c r = kvm_write_guest_page(kvm, identity_map_pfn, kvm 1779 arch/x86/kvm/vmx.c kvm->arch.ept_identity_pagetable_done = true; kvm 1800 arch/x86/kvm/vmx.c down_write(&kvm->slots_lock); kvm 1801 arch/x86/kvm/vmx.c if (kvm->arch.apic_access_page) kvm 1807 arch/x86/kvm/vmx.c r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0); kvm 1812 arch/x86/kvm/vmx.c kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00); kvm 1815 arch/x86/kvm/vmx.c up_write(&kvm->slots_lock); kvm 1824 arch/x86/kvm/vmx.c down_write(&kvm->slots_lock); kvm 1825 arch/x86/kvm/vmx.c if (kvm->arch.ept_identity_pagetable) kvm 1831 arch/x86/kvm/vmx.c r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0); kvm 1836 arch/x86/kvm/vmx.c kvm->arch.ept_identity_pagetable = gfn_to_page(kvm, kvm 1840 arch/x86/kvm/vmx.c up_write(&kvm->slots_lock); kvm 1911 arch/x86/kvm/vmx.c if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) { kvm 1925 arch/x86/kvm/vmx.c if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) kvm 2009 arch/x86/kvm/vmx.c if (!init_rmode_tss(kvm)) kvm 2011 arch/x86/kvm/vmx.c if (!init_rmode_identity_map(kvm)) kvm 2022 arch/x86/kvm/vmx.c down_read(&vcpu->kvm->slots_lock); kvm 2023 arch/x86/kvm/vmx.c if (!init_rmode(vmx->vcpu.kvm)) { kvm 2104 arch/x86/kvm/vmx.c if (vm_need_tpr_shadow(vmx->vcpu.kvm)) kvm 2110 arch/x86/kvm/vmx.c if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) kvm 2112 arch/x86/kvm/vmx.c page_to_phys(vmx->vcpu.kvm->arch.apic_access_page)); kvm 2129 arch/x86/kvm/vmx.c up_read(&vcpu->kvm->slots_lock); kvm 2212 arch/x86/kvm/vmx.c ret = kvm_set_memory_region(kvm, &tss_mem, 0); kvm 2215 arch/x86/kvm/vmx.c kvm->arch.tss_addr = addr; kvm 2269 arch/x86/kvm/vmx.c if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) { kvm 2411 arch/x86/kvm/vmx.c if (irqchip_in_kernel(vcpu->kvm)) kvm 2657 arch/x86/kvm/vmx.c hva = gfn_to_hva(vcpu->kvm, gpa >> PAGE_SHIFT); kvm 2774 arch/x86/kvm/vmx.c if (!vm_need_tpr_shadow(vcpu->kvm)) kvm 3122 arch/x86/kvm/vmx.c err = kvm_vcpu_init(&vmx->vcpu, kvm, id); kvm 3149 arch/x86/kvm/vmx.c if (vm_need_virtualize_apic_accesses(kvm)) kvm 3150 arch/x86/kvm/vmx.c if (alloc_apic_access_page(kvm) != 0) kvm 3154 arch/x86/kvm/vmx.c if (alloc_identity_pagetable(kvm) != 0) kvm 57 arch/x86/kvm/x86.c #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM kvm 131 arch/x86/kvm/x86.c if (irqchip_in_kernel(vcpu->kvm)) kvm 141 arch/x86/kvm/x86.c if (irqchip_in_kernel(vcpu->kvm)) kvm 211 arch/x86/kvm/x86.c ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte, kvm 241 arch/x86/kvm/x86.c r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte)); kvm 395 arch/x86/kvm/x86.c if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT))) kvm 411 arch/x86/kvm/x86.c if (irqchip_in_kernel(vcpu->kvm)) kvm 420 arch/x86/kvm/x86.c if (irqchip_in_kernel(vcpu->kvm)) kvm 510 arch/x86/kvm/x86.c kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); kvm 526 arch/x86/kvm/x86.c kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc)); kvm 529 arch/x86/kvm/x86.c kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); kvm 611 arch/x86/kvm/x86.c mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT); kvm 677 arch/x86/kvm/x86.c vcpu->kvm->arch.wall_clock = data; kvm 678 arch/x86/kvm/x86.c kvm_write_wall_clock(vcpu->kvm, data); kvm 697 arch/x86/kvm/x86.c gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT); kvm 783 arch/x86/kvm/x86.c data = vcpu->kvm->arch.wall_clock; kvm 811 arch/x86/kvm/x86.c down_read(&vcpu->kvm->slots_lock); kvm 815 arch/x86/kvm/x86.c up_read(&vcpu->kvm->slots_lock); kvm 1279 arch/x86/kvm/x86.c if (irqchip_in_kernel(vcpu->kvm)) kvm 1412 arch/x86/kvm/x86.c if (!irqchip_in_kernel(vcpu->kvm)) kvm 1434 arch/x86/kvm/x86.c ret = kvm_x86_ops->set_tss_addr(kvm, addr); kvm 1444 arch/x86/kvm/x86.c down_write(&kvm->slots_lock); kvm 1446 arch/x86/kvm/x86.c kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); kvm 1447 arch/x86/kvm/x86.c kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; kvm 1449 arch/x86/kvm/x86.c up_write(&kvm->slots_lock); kvm 1455 arch/x86/kvm/x86.c return kvm->arch.n_alloc_mmu_pages; kvm 1463 arch/x86/kvm/x86.c for (i = 0; i < kvm->arch.naliases; ++i) { kvm 1464 arch/x86/kvm/x86.c alias = &kvm->arch.aliases[i]; kvm 1498 arch/x86/kvm/x86.c down_write(&kvm->slots_lock); kvm 1499 arch/x86/kvm/x86.c spin_lock(&kvm->mmu_lock); kvm 1501 arch/x86/kvm/x86.c p = &kvm->arch.aliases[alias->slot]; kvm 1507 arch/x86/kvm/x86.c if (kvm->arch.aliases[n - 1].npages) kvm 1509 arch/x86/kvm/x86.c kvm->arch.naliases = n; kvm 1511 arch/x86/kvm/x86.c spin_unlock(&kvm->mmu_lock); kvm 1512 arch/x86/kvm/x86.c kvm_mmu_zap_all(kvm); kvm 1514 arch/x86/kvm/x86.c up_write(&kvm->slots_lock); kvm 1530 arch/x86/kvm/x86.c &pic_irqchip(kvm)->pics[0], kvm 1535 arch/x86/kvm/x86.c &pic_irqchip(kvm)->pics[1], kvm 1540 arch/x86/kvm/x86.c ioapic_irqchip(kvm), kvm 1557 arch/x86/kvm/x86.c memcpy(&pic_irqchip(kvm)->pics[0], kvm 1562 arch/x86/kvm/x86.c memcpy(&pic_irqchip(kvm)->pics[1], kvm 1567 arch/x86/kvm/x86.c memcpy(ioapic_irqchip(kvm), kvm 1575 arch/x86/kvm/x86.c kvm_pic_update_irq(pic_irqchip(kvm)); kvm 1583 arch/x86/kvm/x86.c memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state)); kvm 1591 arch/x86/kvm/x86.c memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state)); kvm 1592 arch/x86/kvm/x86.c kvm_pit_load_count(kvm, 0, ps->channels[0].count); kvm 1607 arch/x86/kvm/x86.c down_write(&kvm->slots_lock); kvm 1609 arch/x86/kvm/x86.c r = kvm_get_dirty_log(kvm, log, &is_dirty); kvm 1615 arch/x86/kvm/x86.c kvm_mmu_slot_remove_write_access(kvm, log->slot); kvm 1616 arch/x86/kvm/x86.c kvm_flush_remote_tlbs(kvm); kvm 1617 arch/x86/kvm/x86.c memslot = &kvm->memslots[log->slot]; kvm 1623 arch/x86/kvm/x86.c up_write(&kvm->slots_lock); kvm 1630 arch/x86/kvm/x86.c struct kvm *kvm = filp->private_data; kvm 1636 arch/x86/kvm/x86.c r = kvm_vm_ioctl_set_tss_addr(kvm, arg); kvm 1651 arch/x86/kvm/x86.c r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0); kvm 1657 arch/x86/kvm/x86.c r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg); kvm 1662 arch/x86/kvm/x86.c r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); kvm 1670 arch/x86/kvm/x86.c r = kvm_vm_ioctl_set_memory_alias(kvm, &alias); kvm 1677 arch/x86/kvm/x86.c kvm->arch.vpic = kvm_create_pic(kvm); kvm 1678 arch/x86/kvm/x86.c if (kvm->arch.vpic) { kvm 1679 arch/x86/kvm/x86.c r = kvm_ioapic_init(kvm); kvm 1681 arch/x86/kvm/x86.c kfree(kvm->arch.vpic); kvm 1682 arch/x86/kvm/x86.c kvm->arch.vpic = NULL; kvm 1690 arch/x86/kvm/x86.c kvm->arch.vpit = kvm_create_pit(kvm); kvm 1691 arch/x86/kvm/x86.c if (kvm->arch.vpit) kvm 1700 arch/x86/kvm/x86.c if (irqchip_in_kernel(kvm)) { kvm 1701 arch/x86/kvm/x86.c mutex_lock(&kvm->lock); kvm 1703 arch/x86/kvm/x86.c kvm_pic_set_irq(pic_irqchip(kvm), kvm 1706 arch/x86/kvm/x86.c kvm_ioapic_set_irq(kvm->arch.vioapic, kvm 1709 arch/x86/kvm/x86.c mutex_unlock(&kvm->lock); kvm 1722 arch/x86/kvm/x86.c if (!irqchip_in_kernel(kvm)) kvm 1724 arch/x86/kvm/x86.c r = kvm_vm_ioctl_get_irqchip(kvm, &chip); kvm 1741 arch/x86/kvm/x86.c if (!irqchip_in_kernel(kvm)) kvm 1743 arch/x86/kvm/x86.c r = kvm_vm_ioctl_set_irqchip(kvm, &chip); kvm 1755 arch/x86/kvm/x86.c if (!kvm->arch.vpit) kvm 1757 arch/x86/kvm/x86.c r = kvm_vm_ioctl_get_pit(kvm, &ps); kvm 1772 arch/x86/kvm/x86.c if (!kvm->arch.vpit) kvm 1774 arch/x86/kvm/x86.c r = kvm_vm_ioctl_set_pit(kvm, &ps); kvm 1828 arch/x86/kvm/x86.c dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len, kvm 1851 arch/x86/kvm/x86.c ret = kvm_read_guest(vcpu->kvm, gpa, data, tocopy); kvm 1896 arch/x86/kvm/x86.c mutex_lock(&vcpu->kvm->lock); kvm 1900 arch/x86/kvm/x86.c mutex_unlock(&vcpu->kvm->lock); kvm 1903 arch/x86/kvm/x86.c mutex_unlock(&vcpu->kvm->lock); kvm 1918 arch/x86/kvm/x86.c ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes); kvm 1951 arch/x86/kvm/x86.c mutex_lock(&vcpu->kvm->lock); kvm 1955 arch/x86/kvm/x86.c mutex_unlock(&vcpu->kvm->lock); kvm 1958 arch/x86/kvm/x86.c mutex_unlock(&vcpu->kvm->lock); kvm 2022 arch/x86/kvm/x86.c page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); kvm 2276 arch/x86/kvm/x86.c mutex_lock(&vcpu->kvm->lock); kvm 2285 arch/x86/kvm/x86.c mutex_unlock(&vcpu->kvm->lock); kvm 2295 arch/x86/kvm/x86.c mutex_lock(&vcpu->kvm->lock); kvm 2302 arch/x86/kvm/x86.c mutex_unlock(&vcpu->kvm->lock); kvm 2309 arch/x86/kvm/x86.c return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr, len, is_write); kvm 2493 arch/x86/kvm/x86.c if (irqchip_in_kernel(vcpu->kvm)) { kvm 2495 arch/x86/kvm/x86.c up_read(&vcpu->kvm->slots_lock); kvm 2497 arch/x86/kvm/x86.c down_read(&vcpu->kvm->slots_lock); kvm 2569 arch/x86/kvm/x86.c kvm_mmu_zap_all(vcpu->kvm); kvm 2763 arch/x86/kvm/x86.c if (irqchip_in_kernel(vcpu->kvm)) kvm 2780 arch/x86/kvm/x86.c page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); kvm 2793 arch/x86/kvm/x86.c down_read(&vcpu->kvm->slots_lock); kvm 2795 arch/x86/kvm/x86.c mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); kvm 2796 arch/x86/kvm/x86.c up_read(&vcpu->kvm->slots_lock); kvm 2813 arch/x86/kvm/x86.c down_read(&vcpu->kvm->slots_lock); kvm 2882 arch/x86/kvm/x86.c else if (irqchip_in_kernel(vcpu->kvm)) kvm 2889 arch/x86/kvm/x86.c up_read(&vcpu->kvm->slots_lock); kvm 2914 arch/x86/kvm/x86.c down_read(&vcpu->kvm->slots_lock); kvm 2943 arch/x86/kvm/x86.c up_read(&vcpu->kvm->slots_lock); kvm 2946 arch/x86/kvm/x86.c down_read(&vcpu->kvm->slots_lock); kvm 2974 arch/x86/kvm/x86.c if (!irqchip_in_kernel(vcpu->kvm)) kvm 2988 arch/x86/kvm/x86.c down_read(&vcpu->kvm->slots_lock); kvm 2992 arch/x86/kvm/x86.c up_read(&vcpu->kvm->slots_lock); kvm 3142 arch/x86/kvm/x86.c if (irqchip_in_kernel(vcpu->kvm)) { kvm 3245 arch/x86/kvm/x86.c return kvm_read_guest(vcpu->kvm, gpa, seg_desc, 8); kvm 3262 arch/x86/kvm/x86.c return kvm_write_guest(vcpu->kvm, gpa, seg_desc, 8); kvm 3440 arch/x86/kvm/x86.c if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16, kvm 3446 arch/x86/kvm/x86.c if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16, kvm 3450 arch/x86/kvm/x86.c if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc), kvm 3469 arch/x86/kvm/x86.c if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32, kvm 3475 arch/x86/kvm/x86.c if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32, kvm 3479 arch/x86/kvm/x86.c if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc), kvm 3607 arch/x86/kvm/x86.c if (!irqchip_in_kernel(vcpu->kvm)) { kvm 3687 arch/x86/kvm/x86.c down_read(&vcpu->kvm->slots_lock); kvm 3689 arch/x86/kvm/x86.c up_read(&vcpu->kvm->slots_lock); kvm 3799 arch/x86/kvm/x86.c return kvm_x86_ops->vcpu_create(kvm, id); kvm 3865 arch/x86/kvm/x86.c struct kvm *kvm; kvm 3868 arch/x86/kvm/x86.c BUG_ON(vcpu->kvm == NULL); kvm 3869 arch/x86/kvm/x86.c kvm = vcpu->kvm; kvm 3872 arch/x86/kvm/x86.c if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0) kvm 3888 arch/x86/kvm/x86.c if (irqchip_in_kernel(kvm)) { kvm 3907 arch/x86/kvm/x86.c down_read(&vcpu->kvm->slots_lock); kvm 3909 arch/x86/kvm/x86.c up_read(&vcpu->kvm->slots_lock); kvm 3913 arch/x86/kvm/x86.c struct kvm *kvm_arch_create_vm(void) kvm 3915 arch/x86/kvm/x86.c struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL); kvm 3917 arch/x86/kvm/x86.c if (!kvm) kvm 3920 arch/x86/kvm/x86.c INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); kvm 3922 arch/x86/kvm/x86.c return kvm; kvm 3940 arch/x86/kvm/x86.c if (kvm->vcpus[i]) kvm 3941 arch/x86/kvm/x86.c kvm_unload_vcpu_mmu(kvm->vcpus[i]); kvm 3943 arch/x86/kvm/x86.c if (kvm->vcpus[i]) { kvm 3944 arch/x86/kvm/x86.c kvm_arch_vcpu_free(kvm->vcpus[i]); kvm 3945 arch/x86/kvm/x86.c kvm->vcpus[i] = NULL; kvm 3953 arch/x86/kvm/x86.c kvm_free_pit(kvm); kvm 3954 arch/x86/kvm/x86.c kfree(kvm->arch.vpic); kvm 3955 arch/x86/kvm/x86.c kfree(kvm->arch.vioapic); kvm 3956 arch/x86/kvm/x86.c kvm_free_vcpus(kvm); kvm 3957 arch/x86/kvm/x86.c kvm_free_physmem(kvm); kvm 3958 arch/x86/kvm/x86.c if (kvm->arch.apic_access_page) kvm 3959 arch/x86/kvm/x86.c put_page(kvm->arch.apic_access_page); kvm 3960 arch/x86/kvm/x86.c if (kvm->arch.ept_identity_pagetable) kvm 3961 arch/x86/kvm/x86.c put_page(kvm->arch.ept_identity_pagetable); kvm 3962 arch/x86/kvm/x86.c kfree(kvm); kvm 3971 arch/x86/kvm/x86.c struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot]; kvm 3992 arch/x86/kvm/x86.c spin_lock(&kvm->mmu_lock); kvm 3994 arch/x86/kvm/x86.c spin_unlock(&kvm->mmu_lock); kvm 4011 arch/x86/kvm/x86.c if (!kvm->arch.n_requested_mmu_pages) { kvm 4012 arch/x86/kvm/x86.c unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); kvm 4013 arch/x86/kvm/x86.c kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); kvm 4016 arch/x86/kvm/x86.c kvm_mmu_slot_remove_write_access(kvm, mem->slot); kvm 4017 arch/x86/kvm/x86.c kvm_flush_remote_tlbs(kvm); kvm 4024 arch/x86/kvm/x86.c kvm_mmu_zap_all(kvm); kvm 90 include/asm-x86/kvm_host.h struct kvm; kvm 385 include/asm-x86/kvm_host.h struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id); kvm 438 include/asm-x86/kvm_host.h int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); kvm 61 include/linux/kvm_host.h struct kvm *kvm; kvm 141 include/linux/kvm_host.h #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt) kvm 280 include/linux/kvm_host.h struct kvm *kvm_arch_create_vm(void); kvm 302 include/linux/kvm_host.h return slot - kvm->memslots; kvm 344 include/linux/kvm_host.h if (unlikely(vcpu->kvm->mmu_notifier_count)) kvm 353 include/linux/kvm_host.h if (vcpu->kvm->mmu_notifier_seq != mmu_seq) kvm 40 virt/kvm/coalesced_mmio.c next = (dev->kvm->coalesced_mmio_ring->last + 1) % kvm 42 virt/kvm/coalesced_mmio.c if (next == dev->kvm->coalesced_mmio_ring->first) { kvm 68 virt/kvm/coalesced_mmio.c struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; kvm 97 virt/kvm/coalesced_mmio.c dev->kvm = kvm; kvm 98 virt/kvm/coalesced_mmio.c kvm->coalesced_mmio_dev = dev; kvm 99 virt/kvm/coalesced_mmio.c kvm_io_bus_register_dev(&kvm->mmio_bus, &dev->dev); kvm 107 virt/kvm/coalesced_mmio.c struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev; kvm 112 virt/kvm/coalesced_mmio.c mutex_lock(&kvm->lock); kvm 114 virt/kvm/coalesced_mmio.c mutex_unlock(&kvm->lock); kvm 121 virt/kvm/coalesced_mmio.c mutex_unlock(&kvm->lock); kvm 129 virt/kvm/coalesced_mmio.c struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev; kvm 135 virt/kvm/coalesced_mmio.c mutex_lock(&kvm->lock); kvm 153 virt/kvm/coalesced_mmio.c mutex_unlock(&kvm->lock); kvm 14 virt/kvm/coalesced_mmio.h struct kvm *kvm; kvm 159 virt/kvm/ioapic.c struct kvm *kvm = ioapic->kvm; kvm 167 virt/kvm/ioapic.c if (kvm->vcpus[i] && kvm->vcpus[i]->arch.apic) kvm 172 virt/kvm/ioapic.c vcpu = kvm->vcpus[i]; kvm 183 virt/kvm/ioapic.c vcpu = kvm->vcpus[i]; kvm 217 virt/kvm/ioapic.c vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector, kvm 221 virt/kvm/ioapic.c vcpu = ioapic->kvm->vcpus[0]; kvm 240 virt/kvm/ioapic.c vcpu = ioapic->kvm->vcpus[vcpu_id]; kvm 252 virt/kvm/ioapic.c vcpu = ioapic->kvm->vcpus[vcpu_id]; kvm 302 virt/kvm/ioapic.c struct kvm_ioapic *ioapic = kvm->arch.vioapic; kvm 383 virt/kvm/ioapic.c kvm_ioapic_update_eoi(ioapic->kvm, data); kvm 411 virt/kvm/ioapic.c kvm->arch.vioapic = ioapic; kvm 417 virt/kvm/ioapic.c ioapic->kvm = kvm; kvm 418 virt/kvm/ioapic.c kvm_io_bus_register_dev(&kvm->mmio_bus, &ioapic->dev); kvm 8 virt/kvm/ioapic.h struct kvm; kvm 60 virt/kvm/ioapic.h struct kvm *kvm; kvm 78 virt/kvm/ioapic.h return kvm->arch.vioapic; kvm 115 virt/kvm/kvm_main.c vcpu = kvm->vcpus[i]; kvm 126 virt/kvm/kvm_main.c ++kvm->stat.remote_tlb_flush; kvm 141 virt/kvm/kvm_main.c vcpu = kvm->vcpus[i]; kvm 165 virt/kvm/kvm_main.c vcpu->kvm = kvm; kvm 196 virt/kvm/kvm_main.c static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) kvm 198 virt/kvm/kvm_main.c return container_of(mn, struct kvm, mmu_notifier); kvm 205 virt/kvm/kvm_main.c struct kvm *kvm = mmu_notifier_to_kvm(mn); kvm 226 virt/kvm/kvm_main.c spin_lock(&kvm->mmu_lock); kvm 227 virt/kvm/kvm_main.c kvm->mmu_notifier_seq++; kvm 228 virt/kvm/kvm_main.c need_tlb_flush = kvm_unmap_hva(kvm, address); kvm 229 virt/kvm/kvm_main.c spin_unlock(&kvm->mmu_lock); kvm 233 virt/kvm/kvm_main.c kvm_flush_remote_tlbs(kvm); kvm 242 virt/kvm/kvm_main.c struct kvm *kvm = mmu_notifier_to_kvm(mn); kvm 245 virt/kvm/kvm_main.c spin_lock(&kvm->mmu_lock); kvm 251 virt/kvm/kvm_main.c kvm->mmu_notifier_count++; kvm 253 virt/kvm/kvm_main.c need_tlb_flush |= kvm_unmap_hva(kvm, start); kvm 254 virt/kvm/kvm_main.c spin_unlock(&kvm->mmu_lock); kvm 258 virt/kvm/kvm_main.c kvm_flush_remote_tlbs(kvm); kvm 266 virt/kvm/kvm_main.c struct kvm *kvm = mmu_notifier_to_kvm(mn); kvm 268 virt/kvm/kvm_main.c spin_lock(&kvm->mmu_lock); kvm 274 virt/kvm/kvm_main.c kvm->mmu_notifier_seq++; kvm 281 virt/kvm/kvm_main.c kvm->mmu_notifier_count--; kvm 282 virt/kvm/kvm_main.c spin_unlock(&kvm->mmu_lock); kvm 284 virt/kvm/kvm_main.c BUG_ON(kvm->mmu_notifier_count < 0); kvm 291 virt/kvm/kvm_main.c struct kvm *kvm = mmu_notifier_to_kvm(mn); kvm 294 virt/kvm/kvm_main.c spin_lock(&kvm->mmu_lock); kvm 295 virt/kvm/kvm_main.c young = kvm_age_hva(kvm, address); kvm 296 virt/kvm/kvm_main.c spin_unlock(&kvm->mmu_lock); kvm 299 virt/kvm/kvm_main.c kvm_flush_remote_tlbs(kvm); kvm 312 virt/kvm/kvm_main.c static struct kvm *kvm_create_vm(void) kvm 314 virt/kvm/kvm_main.c struct kvm *kvm = kvm_arch_create_vm(); kvm 319 virt/kvm/kvm_main.c if (IS_ERR(kvm)) kvm 325 virt/kvm/kvm_main.c kfree(kvm); kvm 328 virt/kvm/kvm_main.c kvm->coalesced_mmio_ring = kvm 335 virt/kvm/kvm_main.c kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; kvm 336 virt/kvm/kvm_main.c err = mmu_notifier_register(&kvm->mmu_notifier, current->mm); kvm 341 virt/kvm/kvm_main.c kfree(kvm); kvm 347 virt/kvm/kvm_main.c kvm->mm = current->mm; kvm 348 virt/kvm/kvm_main.c atomic_inc(&kvm->mm->mm_count); kvm 349 virt/kvm/kvm_main.c spin_lock_init(&kvm->mmu_lock); kvm 350 virt/kvm/kvm_main.c kvm_io_bus_init(&kvm->pio_bus); kvm 351 virt/kvm/kvm_main.c mutex_init(&kvm->lock); kvm 352 virt/kvm/kvm_main.c kvm_io_bus_init(&kvm->mmio_bus); kvm 353 virt/kvm/kvm_main.c init_rwsem(&kvm->slots_lock); kvm 354 virt/kvm/kvm_main.c atomic_set(&kvm->users_count, 1); kvm 356 virt/kvm/kvm_main.c list_add(&kvm->vm_list, &vm_list); kvm 359 virt/kvm/kvm_main.c kvm_coalesced_mmio_init(kvm); kvm 362 virt/kvm/kvm_main.c return kvm; kvm 390 virt/kvm/kvm_main.c for (i = 0; i < kvm->nmemslots; ++i) kvm 391 virt/kvm/kvm_main.c kvm_free_physmem_slot(&kvm->memslots[i], NULL); kvm 396 virt/kvm/kvm_main.c struct mm_struct *mm = kvm->mm; kvm 399 virt/kvm/kvm_main.c list_del(&kvm->vm_list); kvm 401 virt/kvm/kvm_main.c kvm_io_bus_destroy(&kvm->pio_bus); kvm 402 virt/kvm/kvm_main.c kvm_io_bus_destroy(&kvm->mmio_bus); kvm 404 virt/kvm/kvm_main.c if (kvm->coalesced_mmio_ring != NULL) kvm 405 virt/kvm/kvm_main.c free_page((unsigned long)kvm->coalesced_mmio_ring); kvm 408 virt/kvm/kvm_main.c mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); kvm 410 virt/kvm/kvm_main.c kvm_arch_destroy_vm(kvm); kvm 416 virt/kvm/kvm_main.c atomic_inc(&kvm->users_count); kvm 422 virt/kvm/kvm_main.c if (atomic_dec_and_test(&kvm->users_count)) kvm 423 virt/kvm/kvm_main.c kvm_destroy_vm(kvm); kvm 430 virt/kvm/kvm_main.c struct kvm *kvm = filp->private_data; kvm 432 virt/kvm/kvm_main.c kvm_put_kvm(kvm); kvm 466 virt/kvm/kvm_main.c memslot = &kvm->memslots[mem->slot]; kvm 487 virt/kvm/kvm_main.c struct kvm_memory_slot *s = &kvm->memslots[i]; kvm 555 virt/kvm/kvm_main.c kvm_arch_flush_shadow(kvm); kvm 557 virt/kvm/kvm_main.c spin_lock(&kvm->mmu_lock); kvm 558 virt/kvm/kvm_main.c if (mem->slot >= kvm->nmemslots) kvm 559 virt/kvm/kvm_main.c kvm->nmemslots = mem->slot + 1; kvm 562 virt/kvm/kvm_main.c spin_unlock(&kvm->mmu_lock); kvm 564 virt/kvm/kvm_main.c r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc); kvm 566 virt/kvm/kvm_main.c spin_lock(&kvm->mmu_lock); kvm 568 virt/kvm/kvm_main.c spin_unlock(&kvm->mmu_lock); kvm 589 virt/kvm/kvm_main.c down_write(&kvm->slots_lock); kvm 590 virt/kvm/kvm_main.c r = __kvm_set_memory_region(kvm, mem, user_alloc); kvm 591 virt/kvm/kvm_main.c up_write(&kvm->slots_lock); kvm 603 virt/kvm/kvm_main.c return kvm_set_memory_region(kvm, mem, user_alloc); kvm 618 virt/kvm/kvm_main.c memslot = &kvm->memslots[log->slot]; kvm 667 virt/kvm/kvm_main.c for (i = 0; i < kvm->nmemslots; ++i) { kvm 668 virt/kvm/kvm_main.c struct kvm_memory_slot *memslot = &kvm->memslots[i]; kvm 679 virt/kvm/kvm_main.c gfn = unalias_gfn(kvm, gfn); kvm 680 virt/kvm/kvm_main.c return __gfn_to_memslot(kvm, gfn); kvm 687 virt/kvm/kvm_main.c gfn = unalias_gfn(kvm, gfn); kvm 689 virt/kvm/kvm_main.c struct kvm_memory_slot *memslot = &kvm->memslots[i]; kvm 703 virt/kvm/kvm_main.c gfn = unalias_gfn(kvm, gfn); kvm 704 virt/kvm/kvm_main.c slot = __gfn_to_memslot(kvm, gfn); kvm 723 virt/kvm/kvm_main.c addr = gfn_to_hva(kvm, gfn); kvm 756 virt/kvm/kvm_main.c pfn = gfn_to_pfn(kvm, gfn); kvm 838 virt/kvm/kvm_main.c addr = gfn_to_hva(kvm, gfn); kvm 856 virt/kvm/kvm_main.c ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); kvm 876 virt/kvm/kvm_main.c addr = gfn_to_hva(kvm, gfn); kvm 894 virt/kvm/kvm_main.c addr = gfn_to_hva(kvm, gfn); kvm 900 virt/kvm/kvm_main.c mark_page_dirty(kvm, gfn); kvm 914 virt/kvm/kvm_main.c ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); kvm 927 virt/kvm/kvm_main.c return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len); kvm 939 virt/kvm/kvm_main.c ret = kvm_clear_guest_page(kvm, gfn, offset, seg); kvm 954 virt/kvm/kvm_main.c gfn = unalias_gfn(kvm, gfn); kvm 955 virt/kvm/kvm_main.c memslot = __gfn_to_memslot(kvm, gfn); kvm 1013 virt/kvm/kvm_main.c page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); kvm 1036 virt/kvm/kvm_main.c kvm_put_kvm(vcpu->kvm); kvm 1054 virt/kvm/kvm_main.c kvm_put_kvm(vcpu->kvm); kvm 1069 virt/kvm/kvm_main.c vcpu = kvm_arch_vcpu_create(kvm, n); kvm 1079 virt/kvm/kvm_main.c mutex_lock(&kvm->lock); kvm 1080 virt/kvm/kvm_main.c if (kvm->vcpus[n]) { kvm 1082 virt/kvm/kvm_main.c mutex_unlock(&kvm->lock); kvm 1085 virt/kvm/kvm_main.c kvm->vcpus[n] = vcpu; kvm 1086 virt/kvm/kvm_main.c mutex_unlock(&kvm->lock); kvm 1089 virt/kvm/kvm_main.c kvm_get_kvm(kvm); kvm 1096 virt/kvm/kvm_main.c mutex_lock(&kvm->lock); kvm 1097 virt/kvm/kvm_main.c kvm->vcpus[n] = NULL; kvm 1098 virt/kvm/kvm_main.c mutex_unlock(&kvm->lock); kvm 1122 virt/kvm/kvm_main.c if (vcpu->kvm->mm != current->mm) kvm 1301 virt/kvm/kvm_main.c struct kvm *kvm = filp->private_data; kvm 1305 virt/kvm/kvm_main.c if (kvm->mm != current->mm) kvm 1309 virt/kvm/kvm_main.c r = kvm_vm_ioctl_create_vcpu(kvm, arg); kvm 1321 virt/kvm/kvm_main.c r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1); kvm 1332 virt/kvm/kvm_main.c r = kvm_vm_ioctl_get_dirty_log(kvm, &log); kvm 1344 virt/kvm/kvm_main.c r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); kvm 1356 virt/kvm/kvm_main.c r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); kvm 1372 virt/kvm/kvm_main.c struct kvm *kvm = vma->vm_file->private_data; kvm 1375 virt/kvm/kvm_main.c if (!kvm_is_visible_gfn(kvm, vmf->pgoff)) kvm 1377 virt/kvm/kvm_main.c page = gfn_to_page(kvm, vmf->pgoff); kvm 1406 virt/kvm/kvm_main.c struct kvm *kvm; kvm 1408 virt/kvm/kvm_main.c kvm = kvm_create_vm(); kvm 1409 virt/kvm/kvm_main.c if (IS_ERR(kvm)) kvm 1410 virt/kvm/kvm_main.c return PTR_ERR(kvm); kvm 1411 virt/kvm/kvm_main.c fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, 0); kvm 1413 virt/kvm/kvm_main.c kvm_put_kvm(kvm); kvm 1598 virt/kvm/kvm_main.c struct kvm *kvm; kvm 1602 virt/kvm/kvm_main.c list_for_each_entry(kvm, &vm_list, vm_list) kvm 1603 virt/kvm/kvm_main.c *val += *(u32 *)((void *)kvm + offset); kvm 1613 virt/kvm/kvm_main.c struct kvm *kvm; kvm 1619 virt/kvm/kvm_main.c list_for_each_entry(kvm, &vm_list, vm_list) kvm 1621 virt/kvm/kvm_main.c vcpu = kvm->vcpus[i];