page_to_pfn        99 arch/x86/kernel/kvm.c 	pte_phys = page_to_pfn(page);
page_to_pfn       165 arch/x86/kernel/machine_kexec_32.c 	page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) << PAGE_SHIFT);
page_to_pfn       165 arch/x86/kernel/machine_kexec_64.c 	start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
page_to_pfn       390 arch/x86/kernel/vmi_32.c 	vmi_ops.set_linear_mapping((type - KM_PTE0)+1, va, 1, page_to_pfn(page));
page_to_pfn       316 arch/x86/kvm/svm.c 	       page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
page_to_pfn       433 arch/x86/kvm/svm.c 	iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
page_to_pfn       658 arch/x86/kvm/svm.c 	svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
page_to_pfn       484 arch/x86/mm/pageattr.c 	paravirt_alloc_pte(&init_mm, page_to_pfn(base));
page_to_pfn        29 arch/x86/mm/pgtable.c 	paravirt_release_pte(page_to_pfn(pte));
page_to_pfn       274 arch/x86/vdso/vdso32-setup.c 	__set_fixmap(FIX_VDSO, page_to_pfn(vdso32_pages[0]) << PAGE_SHIFT,
page_to_pfn       976 arch/x86/xen/enlighten.c 		       page_to_pfn(page), type,
page_to_pfn       792 arch/x86/xen/mmu.c 		unsigned long pfn = page_to_pfn(page);
page_to_pfn       930 arch/x86/xen/mmu.c 		unsigned long pfn = page_to_pfn(page);
page_to_pfn        64 block/blk-merge.c 		high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
page_to_pfn      2026 fs/binfmt_elf.c 							 page_to_pfn(page));
page_to_pfn      1502 fs/binfmt_elf_fdpic.c 				flush_cache_page(vma, addr, page_to_pfn(page));
page_to_pfn      1326 fs/compat.c    						 page_to_pfn(kmapped_page));
page_to_pfn       233 fs/exec.c      	flush_cache_page(bprm->vma, pos, page_to_pfn(page));
page_to_pfn        26 include/asm-frv/pgalloc.h 	__set_pmd((PMD), page_to_pfn(PAGE) << PAGE_SHIFT | _PAGE_TABLE);	\
page_to_pfn       423 include/asm-frv/pgtable.h #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
page_to_pfn        76 include/asm-generic/memory_model.h extern unsigned long page_to_pfn(struct page *page);
page_to_pfn        74 include/asm-m32r/io.h #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
page_to_pfn       290 include/asm-m32r/pgtable.h #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), pgprot)
page_to_pfn       144 include/asm-m68k/cacheflush.h 	flush_cache_page(vma, vaddr, page_to_pfn(page));
page_to_pfn       152 include/asm-m68k/cacheflush.h 	flush_cache_page(vma, vaddr, page_to_pfn(page));
page_to_pfn       100 include/asm-m68k/motorola_pgtable.h #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
page_to_pfn       202 include/asm-m68k/page.h 	pfn_to_virt(page_to_pfn(page));					\
page_to_pfn       104 include/asm-m68k/sun3_pgtable.h #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
page_to_pfn        37 include/asm-m68k/virtconvert.h 	page_to_pfn(__page) << PAGE_SHIFT;				\
page_to_pfn       119 include/asm-mn10300/page.h #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
page_to_pfn        29 include/asm-mn10300/pgalloc.h 	set_pmd(pmd, __pmd((page_to_pfn(pte) << PAGE_SHIFT) | _PAGE_TABLE));
page_to_pfn       384 include/asm-mn10300/pgtable.h #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
page_to_pfn        64 include/asm-parisc/cacheflush.h 	flush_cache_page(vma, vaddr, page_to_pfn(page)); \
page_to_pfn        71 include/asm-parisc/cacheflush.h 	flush_cache_page(vma, vaddr, page_to_pfn(page)); \
page_to_pfn       164 include/asm-parisc/page.h #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
page_to_pfn       349 include/asm-parisc/pgtable.h #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
page_to_pfn        19 include/asm-um/pgalloc.h 		((unsigned long long)page_to_pfn(pte) <<	\
page_to_pfn       275 include/asm-um/pgtable.h #define page_to_phys(page) pfn_to_phys((pfn_t) page_to_pfn(page))
page_to_pfn        95 include/asm-x86/io_32.h #define page_to_phys(page)    ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
page_to_pfn       164 include/asm-x86/io_64.h #define page_to_phys(page)    ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
page_to_pfn        60 include/asm-x86/pgalloc.h 	unsigned long pfn = page_to_pfn(pte);
page_to_pfn       116 include/asm-x86/pgtable_32.h #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
page_to_pfn       236 include/asm-x86/pgtable_64.h #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn((page)), (pgprot))
page_to_pfn        75 include/asm-xtensa/dma-mapping.h 	return (dma_addr_t)(page_to_pfn(page)) * PAGE_SIZE + offset;
page_to_pfn       161 include/asm-xtensa/page.h #define page_to_virt(page)	__va(page_to_pfn(page) << PAGE_SHIFT)
page_to_pfn       163 include/asm-xtensa/page.h #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
page_to_pfn       239 include/asm-xtensa/pgtable.h #define mk_pte(page, prot)	pfn_pte(page_to_pfn(page), prot)
page_to_pfn        43 include/linux/mm.h #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
page_to_pfn       586 include/linux/mm.h 	return __va(page_to_pfn(page) << PAGE_SHIFT);
page_to_pfn        84 include/linux/swapops.h 			page_to_pfn(page));
page_to_pfn       637 init/main.c    	    page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) {
page_to_pfn       640 init/main.c    		    page_to_pfn(virt_to_page((void *)initrd_start)),
page_to_pfn       427 kernel/kexec.c 		pfn   = page_to_pfn(pages);
page_to_pfn       708 kernel/kexec.c 		addr = page_to_pfn(page) << PAGE_SHIFT;
page_to_pfn       723 kernel/kexec.c 		if (page_to_pfn(page) >
page_to_pfn       728 kernel/kexec.c 		addr = page_to_pfn(page) << PAGE_SHIFT;
page_to_pfn       807 kernel/kexec.c 		result = kimage_add_page(image, page_to_pfn(page)
page_to_pfn       633 kernel/power/snapshot.c 		memory_bm_set_bit(free_pages_map, page_to_pfn(page));
page_to_pfn       639 kernel/power/snapshot.c 		memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
page_to_pfn       645 kernel/power/snapshot.c 		memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
page_to_pfn       651 kernel/power/snapshot.c 		memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
page_to_pfn       657 kernel/power/snapshot.c 		memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
page_to_pfn       663 kernel/power/snapshot.c 		memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
page_to_pfn      1125 kernel/power/snapshot.c 		memory_bm_set_bit(bm, page_to_pfn(page));
page_to_pfn      1175 kernel/power/snapshot.c 		memory_bm_set_bit(copy_bm, page_to_pfn(page));
page_to_pfn      1561 kernel/power/snapshot.c 			memory_bm_set_bit(bm, page_to_pfn(page));
page_to_pfn       192 mm/bounce.c    		if (page_to_pfn(page) <= q->bounce_pfn)
page_to_pfn       350 mm/memory_hotplug.c 	max_mapnr = max(page_to_pfn(page), max_mapnr);
page_to_pfn       543 mm/memory_hotplug.c 	BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
page_to_pfn       129 mm/mm_init.c   	BUG_ON(page_to_pfn(page) != pfn);
page_to_pfn       226 mm/nommu.c     	return page_to_pfn(virt_to_page(addr));
page_to_pfn       183 mm/page_alloc.c 	unsigned long pfn = page_to_pfn(page);
page_to_pfn       198 mm/page_alloc.c 	if (!pfn_valid_within(page_to_pfn(page)))
page_to_pfn       278 mm/page_alloc.c 			p = pfn_to_page(page_to_pfn(page) + i);
page_to_pfn       298 mm/page_alloc.c 			p = pfn_to_page(page_to_pfn(page) + i);
page_to_pfn       379 mm/page_alloc.c 	if (!pfn_valid_within(page_to_pfn(buddy)))
page_to_pfn       426 mm/page_alloc.c 	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
page_to_pfn       703 mm/page_alloc.c 		if (!pfn_valid_within(page_to_pfn(page))) {
page_to_pfn       730 mm/page_alloc.c 	start_pfn = page_to_pfn(page);
page_to_pfn       959 mm/page_alloc.c 			pfn = page_to_pfn(list_entry(curr, struct page, lru));
page_to_pfn      3520 mm/page_alloc.c 		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
page_to_pfn      4543 mm/page_alloc.c 	pfn = page_to_pfn(page);
page_to_pfn      4570 mm/page_alloc.c 	pfn = page_to_pfn(page);
page_to_pfn       263 mm/rmap.c      	if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
page_to_pfn       652 mm/rmap.c      			printk (KERN_EMERG "  page pfn = %lx\n", page_to_pfn(page));
page_to_pfn       727 mm/rmap.c      	flush_cache_page(vma, address, page_to_pfn(page));
page_to_pfn       484 mm/sparse.c    	ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
page_to_pfn       516 mm/sparse.c    		maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
page_to_pfn       212 mm/vmalloc.c   	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
page_to_pfn       776 mm/vmscan.c    		page_pfn = page_to_pfn(page);
page_to_pfn      1276 sound/oss/au1550_ac97.c 	if (remap_pfn_range(vma, vma->vm_start, page_to_pfn(virt_to_page(db->rawbuf)),
page_to_pfn       465 sound/pci/emu10k1/memory.c 		if (!p || (page_to_pfn(p) & ~(emu->dma_mask >> PAGE_SHIFT))) {
page_to_pfn       726 virt/kvm/kvm_main.c 		return page_to_pfn(bad_page);
page_to_pfn       739 virt/kvm/kvm_main.c 			return page_to_pfn(bad_page);
page_to_pfn       745 virt/kvm/kvm_main.c 		pfn = page_to_pfn(page[0]);
page_to_pfn       770 virt/kvm/kvm_main.c 	kvm_release_pfn_clean(page_to_pfn(page));
page_to_pfn       783 virt/kvm/kvm_main.c 	kvm_release_pfn_dirty(page_to_pfn(page));
page_to_pfn       796 virt/kvm/kvm_main.c 	kvm_set_pfn_dirty(page_to_pfn(page));
page_to_pfn      1722 virt/kvm/kvm_main.c 	bad_pfn = page_to_pfn(bad_page);