__pa 96 arch/x86/kernel/aperture_64.c if (!p || __pa(p)+aper_size > 0xffffffff) { __pa 101 arch/x86/kernel/aperture_64.c free_bootmem(__pa(p), aper_size); __pa 105 arch/x86/kernel/aperture_64.c aper_size >> 10, __pa(p)); __pa 106 arch/x86/kernel/aperture_64.c insert_aperture_resource((u32)__pa(p), aper_size); __pa 107 arch/x86/kernel/aperture_64.c register_nosave_region((u32)__pa(p) >> PAGE_SHIFT, __pa 108 arch/x86/kernel/aperture_64.c (u32)__pa(p+aper_size) >> PAGE_SHIFT); __pa 110 arch/x86/kernel/aperture_64.c return (u32)__pa(p); __pa 1271 arch/x86/kernel/apic_32.c apic_phys = __pa(apic_phys); __pa 1217 arch/x86/kernel/apic_64.c apic_phys = __pa(apic_phys); __pa 69 arch/x86/kernel/cpu/intel.c __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); __pa 80 arch/x86/kernel/efi_32.c gdt_descr.address = __pa(get_cpu_gdt_table(0)); __pa 335 arch/x86/kernel/es7000_32.c vect = ((unsigned long)__pa(eip)/0x1000) << 16; __pa 2900 arch/x86/kernel/io_apic_32.c ioapic_phys = __pa(ioapic_phys); __pa 2936 arch/x86/kernel/io_apic_64.c ioapic_phys = __pa(ioapic_phys); __pa 59 arch/x86/kernel/kvm.c a1 = __pa(buffer); __pa 103 arch/x86/kernel/kvm.c pte_phys = (unsigned long)__pa(dest); __pa 54 arch/x86/kernel/kvmclock.c low = (int)__pa(&wall_clock); __pa 55 arch/x86/kernel/kvmclock.c high = ((u64)__pa(&wall_clock) >> 32); __pa 95 arch/x86/kernel/kvmclock.c low = (int)__pa(&per_cpu(hv_clock, cpu)) | 1; __pa 96 arch/x86/kernel/kvmclock.c high = ((u64)__pa(&per_cpu(hv_clock, cpu)) >> 32); __pa 151 arch/x86/kernel/machine_kexec_32.c page_list[PA_CONTROL_PAGE] = __pa(control_page); __pa 153 arch/x86/kernel/machine_kexec_32.c page_list[PA_PGD] = __pa(kexec_pgd); __pa 156 arch/x86/kernel/machine_kexec_32.c page_list[PA_PMD_0] = __pa(kexec_pmd0); __pa 158 arch/x86/kernel/machine_kexec_32.c page_list[PA_PMD_1] = __pa(kexec_pmd1); __pa 161 arch/x86/kernel/machine_kexec_32.c page_list[PA_PTE_0] = __pa(kexec_pte0); __pa 163 arch/x86/kernel/machine_kexec_32.c page_list[PA_PTE_1] = __pa(kexec_pte1); __pa 62 arch/x86/kernel/machine_kexec_64.c set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE)); __pa 98 arch/x86/kernel/machine_kexec_64.c set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE)); __pa 215 arch/x86/kernel/machine_kexec_64.c (unsigned long)__pa(page_address(image->control_code_page)); __pa 839 arch/x86/kernel/pci-calgary_64.c table_phys = (u64)__pa(tbl->it_base); __pa 103 arch/x86/kernel/pci-dma.c free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size); __pa 578 arch/x86/kernel/pci-gart_64.c enable_gart_translation(dev, __pa(agp_gatt_table)); __pa 854 arch/x86/kernel/pci-gart_64.c gart_unmapped_entry = GPTE_ENCODE(__pa(scratch)); __pa 706 arch/x86/kernel/setup.c addr, __pa(addr), *addr); __pa 167 arch/x86/kernel/setup_percpu.c cpu, __pa(ptr)); __pa 173 arch/x86/kernel/setup_percpu.c cpu, node, __pa(ptr)); __pa 188 arch/x86/kernel/tce_64.c free_bootmem(__pa(tbl), size); __pa 659 arch/x86/kernel/tlb_uv.c pa = __pa((unsigned long)adp); __pa 453 arch/x86/kernel/vmi_32.c vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); __pa 459 arch/x86/kernel/vmi_32.c vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); __pa 466 arch/x86/kernel/vmi_32.c vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE | VMI_PAGE_PD); __pa 472 arch/x86/kernel/vmi_32.c vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); __pa 480 arch/x86/kernel/vmi_32.c vmi_check_page_type(__pa(pmdp) >> PAGE_SHIFT, VMI_PAGE_PMD); __pa 483 arch/x86/kernel/vmi_32.c vmi_check_page_type(__pa(pmdp) >> PAGE_SHIFT, VMI_PAGE_PGD); __pa 505 arch/x86/kernel/vmi_32.c vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); __pa 513 arch/x86/kernel/vmi_32.c vmi_check_page_type(__pa(pudp) >> PAGE_SHIFT, VMI_PAGE_PGD); __pa 520 arch/x86/kernel/vmi_32.c vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); __pa 527 arch/x86/kernel/vmi_32.c vmi_check_page_type(__pa(pmd) >> PAGE_SHIFT, VMI_PAGE_PMD); __pa 571 arch/x86/kernel/vmi_32.c ap.cr3 = __pa(swapper_pg_dir); __pa 476 arch/x86/kvm/mmu.c sp = page_header(__pa(spte)); __pa 537 arch/x86/kvm/mmu.c sp = page_header(__pa(spte)); __pa 1099 arch/x86/kvm/mmu.c struct kvm_mmu_page *sp = page_header(__pa(pte)); __pa 1275 arch/x86/kvm/mmu.c __pa(new_table->spt) __pa 1378 arch/x86/kvm/mmu.c root = __pa(sp->spt); __pa 1401 arch/x86/kvm/mmu.c root = __pa(sp->spt); __pa 1405 arch/x86/kvm/mmu.c vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); __pa 345 arch/x86/kvm/paging_tmpl.h shadow_addr = __pa(shadow_page->spt); __pa 550 arch/x86/kvm/svm.c control->msrpm_base_pa = __pa(svm->msrpm); __pa 321 arch/x86/kvm/vmx.c u64 phys_addr = __pa(vmcs); __pa 623 arch/x86/kvm/vmx.c u64 phys_addr = __pa(vmx->vmcs); __pa 1043 arch/x86/kvm/vmx.c u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); __pa 260 arch/x86/lguest/boot.c hcall(LHCALL_LOAD_GDT, __pa(desc->address), GDT_ENTRIES, 0); __pa 270 arch/x86/lguest/boot.c hcall(LHCALL_LOAD_GDT, __pa(dt), GDT_ENTRIES, 0); __pa 282 arch/x86/lguest/boot.c lazy_hcall(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu, 0); __pa 482 arch/x86/lguest/boot.c lazy_hcall(LHCALL_SET_PTE, __pa(mm->pgd), addr, pteval.pte_low); __pa 491 arch/x86/lguest/boot.c lazy_hcall(LHCALL_SET_PMD, __pa(pmdp)&PAGE_MASK, __pa 492 arch/x86/lguest/boot.c (__pa(pmdp)&(PAGE_SIZE-1))/4, 0); __pa 840 arch/x86/lguest/boot.c hcall(LHCALL_SHUTDOWN, __pa("Power down"), LGUEST_SHUTDOWN_POWEROFF, 0); __pa 850 arch/x86/lguest/boot.c hcall(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF, 0); __pa 891 arch/x86/lguest/boot.c hcall(LHCALL_NOTIFY, __pa(scratch), 0, 0); __pa 901 arch/x86/lguest/boot.c hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0); __pa 1044 arch/x86/lguest/boot.c init_pg_tables_start = __pa(pg0); __pa 1045 arch/x86/lguest/boot.c init_pg_tables_end = __pa(pg0); __pa 101 arch/x86/mm/init_32.c paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); __pa 102 arch/x86/mm/init_32.c set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); __pa 134 arch/x86/mm/init_32.c paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); __pa 135 arch/x86/mm/init_32.c set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); __pa 473 arch/x86/mm/init_32.c paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT); __pa 551 arch/x86/mm/init_32.c set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page))); __pa 951 arch/x86/mm/init_32.c __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY); __pa 241 arch/x86/mm/init_64.c set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE | __pa 247 arch/x86/mm/init_64.c set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | __pa 305 arch/x86/mm/init_64.c *phys = __pa(adr); __pa 613 arch/x86/mm/init_64.c last_map_addr = phys_pud_update(pgd, __pa(start), __pa 614 arch/x86/mm/init_64.c __pa(end), page_size_mask); __pa 619 arch/x86/mm/init_64.c last_map_addr = phys_pud_init(pud, __pa(start), __pa(next), __pa 1188 arch/x86/mm/init_64.c entry = pfn_pte(__pa(p) >> PAGE_SHIFT, __pa 562 arch/x86/mm/ioremap.c paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT); __pa 163 arch/x86/mm/numa_64.c ptr = __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS)); __pa 194 arch/x86/mm/numa_64.c nodedata_phys = __pa(node_data[nodeid]); __pa 228 arch/x86/mm/numa_64.c bootmap_start = __pa(bootmap); __pa 94 arch/x86/mm/pageattr.c return __pa(_text) >> PAGE_SHIFT; __pa 99 arch/x86/mm/pageattr.c return __pa(roundup((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT; __pa 270 arch/x86/mm/pageattr.c if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, __pa 271 arch/x86/mm/pageattr.c __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) __pa 869 arch/x86/mm/pageattr.c if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, __pa 886 arch/x86/mm/pageattr.c start = __pa(addr[i]); __pa 888 arch/x86/mm/pageattr.c if (end != __pa(addr[i + 1])) __pa 900 arch/x86/mm/pageattr.c unsigned long tmp = __pa(addr[i]); __pa 905 arch/x86/mm/pageattr.c if (end != __pa(addr[i + 1])) __pa 926 arch/x86/mm/pageattr.c if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, __pa 942 arch/x86/mm/pageattr.c free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); __pa 953 arch/x86/mm/pageattr.c unsigned long start = __pa(addr[i]); __pa 957 arch/x86/mm/pageattr.c if (end != __pa(addr[i + 1])) __pa 532 arch/x86/mm/pat.c (pfn << PAGE_SHIFT) >= __pa(high_memory)) { __pa 36 arch/x86/mm/pgtable.c paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); __pa 43 arch/x86/mm/pgtable.c paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); __pa 77 arch/x86/mm/pgtable.c paravirt_alloc_pmd_clone(__pa(pgd) >> PAGE_SHIFT, __pa 78 arch/x86/mm/pgtable.c __pa(swapper_pg_dir) >> PAGE_SHIFT, __pa 127 arch/x86/mm/pgtable.c paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); __pa 131 arch/x86/mm/pgtable.c set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT)); __pa 44 arch/x86/power/hibernate_32.c set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); __pa 67 arch/x86/power/hibernate_32.c set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); __pa 141 arch/x86/power/hibernate_32.c __pgd(__pa(empty_zero_page) | _PAGE_PRESENT)); __pa 57 arch/x86/power/hibernate_64.c set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); __pa 95 arch/x86/power/hibernate_64.c if ((error = res_phys_pud_init(pud, __pa(start), __pa(next)))) __pa 98 arch/x86/power/hibernate_64.c mk_kernel_pgd(__pa(pud))); __pa 781 arch/x86/xen/enlighten.c __xen_write_cr3(false, __pa(user_pgd)); __pa 908 arch/x86/xen/enlighten.c __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE); __pa 1439 arch/x86/xen/enlighten.c unsigned long pfn = __pa(addr) >> PAGE_SHIFT; __pa 1468 arch/x86/xen/enlighten.c pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE); __pa 1554 arch/x86/xen/enlighten.c pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); __pa 1565 arch/x86/xen/enlighten.c __xen_write_cr3(true, __pa(pgd)); __pa 1568 arch/x86/xen/enlighten.c reserve_early(__pa(xen_start_info->pt_base), __pa 1569 arch/x86/xen/enlighten.c __pa(xen_start_info->pt_base + __pa 1582 arch/x86/xen/enlighten.c init_pg_tables_start = __pa(pgd); __pa 1583 arch/x86/xen/enlighten.c init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE; __pa 1593 arch/x86/xen/enlighten.c __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT)); __pa 1599 arch/x86/xen/enlighten.c pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); __pa 1601 arch/x86/xen/enlighten.c xen_write_cr3(__pa(swapper_pg_dir)); __pa 1603 arch/x86/xen/enlighten.c pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir))); __pa 1694 arch/x86/xen/enlighten.c ? __pa(xen_start_info->mod_start) : 0; __pa 1696 arch/x86/xen/enlighten.c boot_params.hdr.cmd_line_ptr = __pa(xen_start_info->cmd_line); __pa 856 arch/x86/xen/mmu.c xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd))); __pa 860 arch/x86/xen/mmu.c xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(user_pgd))); __pa 869 arch/x86/xen/mmu.c xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd))); __pa 968 arch/x86/xen/mmu.c xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); __pa 975 arch/x86/xen/mmu.c xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(user_pgd))); __pa 1053 arch/x86/xen/mmu.c if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) { __pa 1081 arch/x86/xen/mmu.c if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd)) __pa 61 arch/x86/xen/setup.c e820_add_region(__pa(xen_start_info->mfn_list), __pa 268 arch/x86/xen/smp.c per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir); __pa 28 include/asm-cris/io.h return __pa(address); __pa 61 include/asm-cris/page.h #define page_to_phys(page) __pa((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET) __pa 183 include/asm-cris/pgtable.h pte_val(pte) = __pa(page) | pgprot_val(pgprot); __pa 62 include/asm-frv/page.h #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) __pa 63 include/asm-frv/page.h #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) __pa 23 include/asm-frv/pgalloc.h #define pmd_populate_kernel(mm, pmd, pte) __set_pmd(pmd, __pa(pte) | _PAGE_TABLE) __pa 27 include/asm-m32r/io.h return __pa(address); __pa 76 include/asm-m32r/page.h #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) __pa 77 include/asm-m32r/page.h #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) __pa 9 include/asm-m32r/pgalloc.h set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))) __pa 120 include/asm-m68k/cacheflush.h : : "a" (__pa(vaddr))); __pa 121 include/asm-m68k/motorola_pgtable.h pgd_val(*pgdp) = _PAGE_TABLE | _PAGE_ACCESSED | __pa(pmdp); __pa 173 include/asm-m68k/page.h #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) __pa 69 include/asm-m68k/sun3_pgalloc.h pmd_val(*pmd) = __pa((unsigned long)pte); __pa 74 include/asm-m68k/sun3_pgalloc.h pmd_val(*pmd) = __pa((unsigned long)page_address(page)); __pa 17 include/asm-m68k/sun3_pgtable.h #define VTOP(addr) __pa(addr) __pa 20 include/asm-m68k/virtconvert.h return __pa(address); __pa 31 include/asm-m68k/virtconvert.h __pa(PAGE_OFFSET + (((page) - pg_data_map[0].node_mem_map) << PAGE_SHIFT)) __pa 230 include/asm-mn10300/io.h return __pa(address); __pa 117 include/asm-mn10300/page.h #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) __pa 118 include/asm-mn10300/page.h #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) __pa 24 include/asm-mn10300/pgalloc.h set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)) __pa 13 include/asm-parisc/io.h #define virt_to_phys(a) ((unsigned long)__pa(a)) __pa 51 include/asm-parisc/mmu_context.h mtctl(__pa(next->pgd), 25); __pa 162 include/asm-parisc/page.h #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) __pa 165 include/asm-parisc/page.h #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) __pa 37 include/asm-parisc/pgalloc.h + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT)); __pa 61 include/asm-parisc/pgalloc.h (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT)); __pa 109 include/asm-parisc/pgalloc.h + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)); __pa 113 include/asm-parisc/pgalloc.h + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)); __pa 17 include/asm-um/io.h return __pa((void *) address); __pa 116 include/asm-um/page.h #define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v))) __pa 15 include/asm-um/pgalloc.h set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte))) __pa 60 include/asm-um/pgtable-3level.h set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd))) __pa 274 include/asm-um/pgtable.h #define __virt_to_page(virt) phys_to_page(__pa(virt)) __pa 71 include/asm-x86/io_32.h return __pa(address); __pa 152 include/asm-x86/io_64.h return __pa(address); __pa 99 include/asm-x86/mmzone_32.h __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) __pa 102 include/asm-x86/mmzone_32.h __pa(MAX_DMA_ADDRESS)) __pa 106 include/asm-x86/mmzone_32.h __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) __pa 109 include/asm-x86/mmzone_32.h __pa(MAX_DMA_ADDRESS)) __pa 117 include/asm-x86/mmzone_32.h __pa(MAX_DMA_ADDRESS)); \ __pa 124 include/asm-x86/mmzone_32.h __pa(MAX_DMA_ADDRESS)); \ __pa 185 include/asm-x86/page.h #define __pa_symbol(x) __pa(__phys_reloc_hide((unsigned long)(x))) __pa 190 include/asm-x86/page.h #define __boot_pa(x) __pa(x) __pa 196 include/asm-x86/page.h #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) __pa 53 include/asm-x86/pgalloc.h paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); __pa 54 include/asm-x86/pgalloc.h set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); __pa 87 include/asm-x86/pgalloc.h paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); __pa 88 include/asm-x86/pgalloc.h set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd))); __pa 95 include/asm-x86/pgalloc.h paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT); __pa 96 include/asm-x86/pgalloc.h set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud))); __pa 118 include/asm-x86/pgtable-3level.h if (__pa(pudp) >= pgd && __pa(pudp) < __pa 186 include/asm-x86/processor.h write_cr3(__pa(pgdir)); __pa 47 include/asm-x86/uv/uv_bau.h #define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask)) __pa 184 include/asm-x86/uv/uv_hub.h return __pa(v) | uv_hub_info->gnode_upper; __pa 121 include/asm-x86/xen/page.h #define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v)))) __pa 122 include/asm-x86/xen/page.h #define virt_to_mfn(v) (pfn_to_mfn(PFN_DOWN(__pa(v)))) __pa 48 include/asm-xtensa/io.h return __pa(address); __pa 160 include/asm-xtensa/page.h #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) __pa 162 include/asm-xtensa/page.h #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) __pa 99 include/linux/bootmem.h __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) __pa 101 include/linux/bootmem.h __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) __pa 105 include/linux/bootmem.h __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) __pa 107 include/linux/bootmem.h __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) __pa 111 include/linux/bootmem.h __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) __pa 113 include/linux/bootmem.h __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) __pa 317 include/linux/efi.h unsigned long paddr = __pa(start + i); __pa 1362 kernel/kexec.c return __pa((unsigned long)(char *)&vmcoreinfo_note); __pa 4084 mm/page_alloc.c __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); __pa 58 mm/sparse-vmemmap.c __pa(MAX_DMA_ADDRESS)); __pa 80 mm/sparse-vmemmap.c entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); __pa 287 mm/sparse.c section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); __pa 299 mm/sparse.c usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT); __pa 300 mm/sparse.c pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); __pa 450 mm/sparse.c free_bootmem(__pa(usemap_map), size); __pa 588 sound/parisc/harmony.c ss->runtime->dma_addr = __pa(ss->runtime->dma_area);