page               36 arch/x86/boot/video.c 	u16 ax, page;
page               42 arch/x86/boot/video.c 	    : "+a" (ax), "=b" (page)
page               47 arch/x86/boot/video.c 	boot_params.screen_info.orig_video_page = page >> 8;
page              504 arch/x86/kernel/alternative.c 	struct page *pages[2];
page              343 arch/x86/kernel/amd_iommu.c 	u64 __pte, *pte, *page;
page              355 arch/x86/kernel/amd_iommu.c 		page = (u64 *)get_zeroed_page(GFP_KERNEL);
page              356 arch/x86/kernel/amd_iommu.c 		if (!page)
page              358 arch/x86/kernel/amd_iommu.c 		*pte = IOMMU_L2_PDE(virt_to_phys(page));
page              365 arch/x86/kernel/amd_iommu.c 		page = (u64 *)get_zeroed_page(GFP_KERNEL);
page              366 arch/x86/kernel/amd_iommu.c 		if (!page)
page              368 arch/x86/kernel/amd_iommu.c 		*pte = IOMMU_L1_PDE(virt_to_phys(page));
page               48 arch/x86/kernel/cpu/mtrr/if.c 	if (!page) {
page               67 arch/x86/kernel/cpu/mtrr/if.c 	if (!page) {
page               36 arch/x86/kernel/kdebugfs.c 	struct page *pg;
page              121 arch/x86/kernel/kdebugfs.c 	struct page *pg;
page               95 arch/x86/kernel/kvm.c 	struct page *page;
page               98 arch/x86/kernel/kvm.c 	page = kmap_atomic_to_page(dest);
page               99 arch/x86/kernel/kvm.c 	pte_phys = page_to_pfn(page);
page               52 arch/x86/kernel/machine_kexec_64.c 		struct page *page;
page               55 arch/x86/kernel/machine_kexec_64.c 		page = kimage_alloc_control_pages(image, 0);
page               56 arch/x86/kernel/machine_kexec_64.c 		if (!page) {
page               60 arch/x86/kernel/machine_kexec_64.c 		level2p = (pmd_t *)page_address(page);
page               85 arch/x86/kernel/machine_kexec_64.c 		struct page *page;
page               88 arch/x86/kernel/machine_kexec_64.c 		page = kimage_alloc_control_pages(image, 0);
page               89 arch/x86/kernel/machine_kexec_64.c 		if (!page) {
page               93 arch/x86/kernel/machine_kexec_64.c 		level3p = (pud_t *)page_address(page);
page              141 arch/x86/kernel/pci-dma.c 	struct page *page;
page              148 arch/x86/kernel/pci-dma.c 	page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
page              149 arch/x86/kernel/pci-dma.c 	if (!page)
page              152 arch/x86/kernel/pci-dma.c 	addr = page_to_phys(page);
page              154 arch/x86/kernel/pci-dma.c 		__free_pages(page, get_order(size));
page              165 arch/x86/kernel/pci-dma.c 	return page_address(page);
page              493 arch/x86/kernel/pci-gart_64.c 	struct page *page;
page              497 arch/x86/kernel/pci-gart_64.c 		page = alloc_pages(flag | __GFP_ZERO, get_order(size));
page              498 arch/x86/kernel/pci-gart_64.c 		if (!page)
page              502 arch/x86/kernel/pci-gart_64.c 		paddr = dma_map_area(dev, page_to_phys(page), size,
page              508 arch/x86/kernel/pci-gart_64.c 			return page_address(page);
page              510 arch/x86/kernel/pci-gart_64.c 		__free_pages(page, get_order(size));
page              293 arch/x86/kernel/vmi_32.c 		struct page *page = pfn_to_page(boot_page_allocations[i].pfn);
page              294 arch/x86/kernel/vmi_32.c 		page->type = boot_page_allocations[i].type;
page              295 arch/x86/kernel/vmi_32.c 		page->type = boot_page_allocations[i].type &
page              315 arch/x86/kernel/vmi_32.c 	if (page_address(page))
page              316 arch/x86/kernel/vmi_32.c 		ptr = (u32 *)page_address(page);
page              340 arch/x86/kernel/vmi_32.c 		struct page *page = pfn_to_page(pfn);
page              342 arch/x86/kernel/vmi_32.c 			BUG_ON(page->type);
page              344 arch/x86/kernel/vmi_32.c 			BUG_ON(page->type == VMI_PAGE_NORMAL);
page              345 arch/x86/kernel/vmi_32.c 		page->type = type & ~(VMI_PAGE_ZEROED | VMI_PAGE_CLONE);
page              347 arch/x86/kernel/vmi_32.c 			check_zeroed_page(pfn, type, page);
page              361 arch/x86/kernel/vmi_32.c 		struct page *page = pfn_to_page(pfn);
page              362 arch/x86/kernel/vmi_32.c 		BUG_ON((page->type ^ type) & VMI_PAGE_PAE);
page              363 arch/x86/kernel/vmi_32.c 		BUG_ON(type == VMI_PAGE_NORMAL && page->type);
page              364 arch/x86/kernel/vmi_32.c 		BUG_ON((type & page->type) == 0);
page              375 arch/x86/kernel/vmi_32.c 	void *va = kmap_atomic(page, type);
page              390 arch/x86/kernel/vmi_32.c 	vmi_ops.set_linear_mapping((type - KM_PTE0)+1, va, 1, page_to_pfn(page));
page               19 arch/x86/kvm/lapic.h 	struct page *regs_page;
page               22 arch/x86/kvm/lapic.h 	struct page *vapic_page;
page              279 arch/x86/kvm/mmu.c 	struct page *page;
page              284 arch/x86/kvm/mmu.c 		page = alloc_page(GFP_KERNEL);
page              285 arch/x86/kvm/mmu.c 		if (!page)
page              287 arch/x86/kvm/mmu.c 		set_page_private(page, 0);
page              288 arch/x86/kvm/mmu.c 		cache->objects[cache->nobjs++] = page_address(page);
page             1048 arch/x86/kvm/mmu.c 			struct kvm_mmu_page *page;
page             1050 arch/x86/kvm/mmu.c 			page = container_of(kvm->arch.active_mmu_pages.prev,
page             1052 arch/x86/kvm/mmu.c 			kvm_mmu_zap_page(kvm, page);
page             1104 arch/x86/kvm/mmu.c struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
page             1106 arch/x86/kvm/mmu.c 	struct page *page;
page             1114 arch/x86/kvm/mmu.c 	page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
page             1117 arch/x86/kvm/mmu.c 	return page;
page             1999 arch/x86/kvm/mmu.c 	struct page *page;
page             2015 arch/x86/kvm/mmu.c 	page = alloc_page(GFP_KERNEL | __GFP_DMA32);
page             2016 arch/x86/kvm/mmu.c 	if (!page)
page             2018 arch/x86/kvm/mmu.c 	vcpu->arch.mmu.pae_root = page_address(page);
page             2087 arch/x86/kvm/mmu.c 	struct kvm_mmu_page *page;
page             2089 arch/x86/kvm/mmu.c 	page = container_of(kvm->arch.active_mmu_pages.prev,
page             2091 arch/x86/kvm/mmu.c 	kvm_mmu_zap_page(kvm, page);
page               92 arch/x86/kvm/paging_tmpl.h 	struct page *page;
page               95 arch/x86/kvm/paging_tmpl.h 	page = gfn_to_page(kvm, table_gfn);
page               98 arch/x86/kvm/paging_tmpl.h 	table = kmap_atomic(page, KM_USER0);
page              104 arch/x86/kvm/paging_tmpl.h 	kvm_release_page_dirty(page);
page              245 arch/x86/kvm/paging_tmpl.h static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
page              260 arch/x86/kvm/paging_tmpl.h 	pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte);
page              269 arch/x86/kvm/paging_tmpl.h 	mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
page               91 arch/x86/kvm/svm.c 	struct page *save_area;
page              421 arch/x86/kvm/svm.c 	struct page *iopm_pages;
page              629 arch/x86/kvm/svm.c 	struct page *page;
page              630 arch/x86/kvm/svm.c 	struct page *msrpm_pages;
page              643 arch/x86/kvm/svm.c 	page = alloc_page(GFP_KERNEL);
page              644 arch/x86/kvm/svm.c 	if (!page) {
page              656 arch/x86/kvm/svm.c 	svm->vmcb = page_address(page);
page              658 arch/x86/kvm/svm.c 	svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
page              100 arch/x86/kvm/vmx.c static struct page *vmx_io_bitmap_a;
page              101 arch/x86/kvm/vmx.c static struct page *vmx_io_bitmap_b;
page              102 arch/x86/kvm/vmx.c static struct page *vmx_msr_bitmap;
page             1208 arch/x86/kvm/vmx.c 	struct page *pages;
page             2006 arch/x86/kvm/x86.c 		struct page *page;
page             2022 arch/x86/kvm/x86.c 		page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
page             2025 arch/x86/kvm/x86.c 		kaddr = kmap_atomic(page, KM_USER0);
page             2028 arch/x86/kvm/x86.c 		kvm_release_page_dirty(page);
page             2358 arch/x86/kvm/x86.c 	struct page *page;
page             2414 arch/x86/kvm/x86.c 		page = gva_to_page(vcpu, address + i * PAGE_SIZE);
page             2415 arch/x86/kvm/x86.c 		vcpu->arch.pio.guest_pages[i] = page;
page             2416 arch/x86/kvm/x86.c 		if (!page) {
page             2774 arch/x86/kvm/x86.c 	struct page *page;
page             2780 arch/x86/kvm/x86.c 	page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
page             2783 arch/x86/kvm/x86.c 	vcpu->arch.apic->vapic_page = page;
page             3864 arch/x86/kvm/x86.c 	struct page *page;
page             3877 arch/x86/kvm/x86.c 	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
page             3878 arch/x86/kvm/x86.c 	if (!page) {
page             3882 arch/x86/kvm/x86.c 	vcpu->arch.pio_data = page_address(page);
page              145 arch/x86/lib/mmx_32.c 		: : "r" (page) : "memory");
page              146 arch/x86/lib/mmx_32.c 		page += 64;
page              273 arch/x86/lib/mmx_32.c 			: : "r" (page) : "memory");
page              274 arch/x86/lib/mmx_32.c 		page += 128;
page              345 arch/x86/lib/mmx_32.c 			:"a" (0), "1" (page), "0" (1024)
page              352 arch/x86/lib/mmx_32.c 		slow_zero_page(page);
page              354 arch/x86/lib/mmx_32.c 		fast_clear_page(page);
page              741 arch/x86/lib/usercopy_32.c 			struct page *pg;
page              198 arch/x86/mm/fault.c 	__typeof__(pte_val(__pte(0))) page;
page              200 arch/x86/mm/fault.c 	page = read_cr3();
page              201 arch/x86/mm/fault.c 	page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
page              203 arch/x86/mm/fault.c 	printk("*pdpt = %016Lx ", page);
page              204 arch/x86/mm/fault.c 	if ((page >> PAGE_SHIFT) < max_low_pfn
page              205 arch/x86/mm/fault.c 	    && page & _PAGE_PRESENT) {
page              206 arch/x86/mm/fault.c 		page &= PAGE_MASK;
page              207 arch/x86/mm/fault.c 		page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
page              209 arch/x86/mm/fault.c 		printk(KERN_CONT "*pde = %016Lx ", page);
page              210 arch/x86/mm/fault.c 		page &= ~_PAGE_NX;
page              213 arch/x86/mm/fault.c 	printk("*pde = %08lx ", page);
page              222 arch/x86/mm/fault.c 	if ((page >> PAGE_SHIFT) < max_low_pfn
page              223 arch/x86/mm/fault.c 	    && (page & _PAGE_PRESENT)
page              224 arch/x86/mm/fault.c 	    && !(page & _PAGE_PSE)) {
page              225 arch/x86/mm/fault.c 		page &= PAGE_MASK;
page              226 arch/x86/mm/fault.c 		page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
page              228 arch/x86/mm/fault.c 		printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
page              922 arch/x86/mm/fault.c 		struct page *page;
page              925 arch/x86/mm/fault.c 		list_for_each_entry(page, &pgd_list, lru) {
page              926 arch/x86/mm/fault.c 			if (!vmalloc_sync_one(page_address(page),
page              937 arch/x86/mm/fault.c 		struct page *page;
page              942 arch/x86/mm/fault.c 		list_for_each_entry(page, &pgd_list, lru) {
page              944 arch/x86/mm/fault.c 			pgd = (pgd_t *)page_address(page) + pgd_index(address);
page               83 arch/x86/mm/gup.c 		struct page *page;
page               90 arch/x86/mm/gup.c 		page = pte_page(pte);
page               91 arch/x86/mm/gup.c 		get_page(page);
page               92 arch/x86/mm/gup.c 		pages[*nr] = page;
page              103 arch/x86/mm/gup.c 	VM_BUG_ON(page != compound_head(page));
page              104 arch/x86/mm/gup.c 	VM_BUG_ON(page_count(page) == 0);
page              105 arch/x86/mm/gup.c 	atomic_add(nr, &page->_count);
page              113 arch/x86/mm/gup.c 	struct page *head, *page;
page              127 arch/x86/mm/gup.c 	page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
page              129 arch/x86/mm/gup.c 		VM_BUG_ON(compound_head(page) != head);
page              130 arch/x86/mm/gup.c 		pages[*nr] = page;
page              132 arch/x86/mm/gup.c 		page++;
page              170 arch/x86/mm/gup.c 	struct page *head, *page;
page              184 arch/x86/mm/gup.c 	page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
page              186 arch/x86/mm/gup.c 		VM_BUG_ON(compound_head(page) != head);
page              187 arch/x86/mm/gup.c 		pages[*nr] = page;
page              189 arch/x86/mm/gup.c 		page++;
page                7 arch/x86/mm/highmem_32.c 	if (!PageHighMem(page))
page                8 arch/x86/mm/highmem_32.c 		return page_address(page);
page                9 arch/x86/mm/highmem_32.c 	return kmap_high(page);
page               16 arch/x86/mm/highmem_32.c 	if (!PageHighMem(page))
page               18 arch/x86/mm/highmem_32.c 	kunmap_high(page);
page               80 arch/x86/mm/highmem_32.c 	if (!PageHighMem(page))
page               81 arch/x86/mm/highmem_32.c 		return page_address(page);
page               88 arch/x86/mm/highmem_32.c 	set_pte(kmap_pte-idx, mk_pte(page, prot));
page               96 arch/x86/mm/highmem_32.c 	return kmap_atomic_prot(page, type, kmap_prot);
page              141 arch/x86/mm/highmem_32.c struct page *kmap_atomic_to_page(void *ptr)
page              170 arch/x86/mm/hugetlbpage.c struct page *
page              176 arch/x86/mm/hugetlbpage.c 	struct page *page;
page              188 arch/x86/mm/hugetlbpage.c 	page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
page              190 arch/x86/mm/hugetlbpage.c 	WARN_ON(!PageHead(page));
page              192 arch/x86/mm/hugetlbpage.c 	return page;
page              205 arch/x86/mm/hugetlbpage.c struct page *
page              214 arch/x86/mm/hugetlbpage.c struct page *
page              230 arch/x86/mm/hugetlbpage.c struct page *
page              234 arch/x86/mm/hugetlbpage.c 	struct page *page;
page              236 arch/x86/mm/hugetlbpage.c 	page = pte_page(*(pte_t *)pmd);
page              237 arch/x86/mm/hugetlbpage.c 	if (page)
page              238 arch/x86/mm/hugetlbpage.c 		page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
page              239 arch/x86/mm/hugetlbpage.c 	return page;
page              242 arch/x86/mm/hugetlbpage.c struct page *
page              246 arch/x86/mm/hugetlbpage.c 	struct page *page;
page              248 arch/x86/mm/hugetlbpage.c 	page = pte_page(*(pte_t *)pud);
page              249 arch/x86/mm/hugetlbpage.c 	if (page)
page              250 arch/x86/mm/hugetlbpage.c 		page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
page              251 arch/x86/mm/hugetlbpage.c 	return page;
page              380 arch/x86/mm/init_32.c 	ClearPageReserved(page);
page              381 arch/x86/mm/init_32.c 	init_page_count(page);
page              382 arch/x86/mm/init_32.c 	__free_page(page);
page              395 arch/x86/mm/init_32.c 	struct page *page;
page              410 arch/x86/mm/init_32.c 		page = pfn_to_page(node_pfn);
page              411 arch/x86/mm/init_32.c 		add_one_highpage_init(page, node_pfn);
page               34 arch/x86/mm/kmmio.c 	unsigned long page; /* location of the fault page */
page               68 arch/x86/mm/kmmio.c 	return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
page               99 arch/x86/mm/kmmio.c 	page &= PAGE_MASK;
page              100 arch/x86/mm/kmmio.c 	head = kmmio_page_list(page);
page              102 arch/x86/mm/kmmio.c 		if (p->page == page)
page              152 arch/x86/mm/kmmio.c 	set_page_present(page & PAGE_MASK, false, pglevel);
page              158 arch/x86/mm/kmmio.c 	set_page_present(page & PAGE_MASK, true, pglevel);
page              205 arch/x86/mm/kmmio.c 		disarm_kmmio_fault_page(faultpage->page, NULL);
page              247 arch/x86/mm/kmmio.c 	disarm_kmmio_fault_page(ctx->fpage->page, NULL);
page              286 arch/x86/mm/kmmio.c 	arm_kmmio_fault_page(ctx->fpage->page, NULL);
page              314 arch/x86/mm/kmmio.c 	page &= PAGE_MASK;
page              315 arch/x86/mm/kmmio.c 	f = get_kmmio_fault_page(page);
page              318 arch/x86/mm/kmmio.c 			arm_kmmio_fault_page(f->page, NULL);
page              328 arch/x86/mm/kmmio.c 	f->page = page;
page              329 arch/x86/mm/kmmio.c 	list_add_rcu(&f->list, kmmio_page_list(f->page));
page              331 arch/x86/mm/kmmio.c 	arm_kmmio_fault_page(f->page, NULL);
page              342 arch/x86/mm/kmmio.c 	page &= PAGE_MASK;
page              343 arch/x86/mm/kmmio.c 	f = get_kmmio_fault_page(page);
page              350 arch/x86/mm/kmmio.c 		disarm_kmmio_fault_page(f->page, NULL);
page               97 arch/x86/mm/numa_32.c 	return (nr_pages + 1) * sizeof(struct page);
page               70 arch/x86/mm/pageattr.c 	int n = sprintf(page, "DirectMap4k:  %8lu kB\n",
page               73 arch/x86/mm/pageattr.c 	n += sprintf(page + n, "DirectMap2M:  %8lu kB\n",
page               76 arch/x86/mm/pageattr.c 	n += sprintf(page + n, "DirectMap4M:  %8lu kB\n",
page               81 arch/x86/mm/pageattr.c 		n += sprintf(page + n, "DirectMap1G:  %8lu kB\n",
page              329 arch/x86/mm/pageattr.c 		struct page *page;
page              331 arch/x86/mm/pageattr.c 		list_for_each_entry(page, &pgd_list, lru) {
page              336 arch/x86/mm/pageattr.c 			pgd = (pgd_t *)page_address(page) + pgd_index(address);
page              464 arch/x86/mm/pageattr.c 	struct page *base;
page             1005 arch/x86/mm/pageattr.c 	unsigned long addr = (unsigned long)page_address(page);
page             1013 arch/x86/mm/pageattr.c 	unsigned long addr = (unsigned long)page_address(page);
page             1021 arch/x86/mm/pageattr.c 	unsigned long addr = (unsigned long)page_address(page);
page             1029 arch/x86/mm/pageattr.c 	unsigned long addr = (unsigned long)page_address(page);
page             1037 arch/x86/mm/pageattr.c 	unsigned long addr = (unsigned long)page_address(page);
page             1044 arch/x86/mm/pageattr.c 	unsigned long addr = (unsigned long)page_address(page);
page             1053 arch/x86/mm/pageattr.c 	unsigned long tempaddr = (unsigned long) page_address(page);
page             1071 arch/x86/mm/pageattr.c 	unsigned long tempaddr = (unsigned long) page_address(page);
page             1089 arch/x86/mm/pageattr.c 	if (PageHighMem(page))
page             1092 arch/x86/mm/pageattr.c 		debug_check_no_locks_freed(page_address(page),
page             1108 arch/x86/mm/pageattr.c 		__set_pages_p(page, numpages);
page             1110 arch/x86/mm/pageattr.c 		__set_pages_np(page, numpages);
page             1126 arch/x86/mm/pageattr.c 	if (PageHighMem(page))
page             1129 arch/x86/mm/pageattr.c 	pte = lookup_address((unsigned long)page_address(page), &level);
page              229 arch/x86/mm/pat.c 	struct page *page;
page              233 arch/x86/mm/pat.c 		page = pfn_to_page(pfn);
page              234 arch/x86/mm/pat.c 		if (page_mapped(page) || PageNonWB(page))
page              237 arch/x86/mm/pat.c 		SetPageNonWB(page);
page              244 arch/x86/mm/pat.c 		page = pfn_to_page(pfn);
page              245 arch/x86/mm/pat.c 		ClearPageNonWB(page);
page              253 arch/x86/mm/pat.c 	struct page *page;
page              257 arch/x86/mm/pat.c 		page = pfn_to_page(pfn);
page              258 arch/x86/mm/pat.c 		if (page_mapped(page) || !PageNonWB(page))
page              261 arch/x86/mm/pat.c 		ClearPageNonWB(page);
page              268 arch/x86/mm/pat.c 		page = pfn_to_page(pfn);
page              269 arch/x86/mm/pat.c 		SetPageNonWB(page);
page               14 arch/x86/mm/pgtable.c 	struct page *pte;
page               51 arch/x86/mm/pgtable.c 	struct page *page = virt_to_page(pgd);
page               53 arch/x86/mm/pgtable.c 	list_add(&page->lru, &pgd_list);
page               58 arch/x86/mm/pgtable.c 	struct page *page = virt_to_page(pgd);
page               60 arch/x86/mm/pgtable.c 	list_del(&page->lru);
page              487 arch/x86/mm/srat_64.c 		total_mb *= sizeof(struct page);
page              359 arch/x86/pci/pcbios.c 	unsigned long page;
page              363 arch/x86/pci/pcbios.c 	page = __get_free_page(GFP_KERNEL);
page              364 arch/x86/pci/pcbios.c 	if (!page)
page              366 arch/x86/pci/pcbios.c 	opt.table = (struct irq_info *) page;
page              397 arch/x86/pci/pcbios.c 			memcpy(rt->slots, (void *) page, opt.size);
page              401 arch/x86/pci/pcbios.c 	free_page(page);
page              196 arch/x86/vdso/vdso32-setup.c static struct page *vdso32_pages[1];
page               24 arch/x86/vdso/vma.c static struct page **vdso_pages;
page               43 arch/x86/vdso/vma.c 	vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
page               47 arch/x86/vdso/vma.c 		struct page *p;
page              262 arch/x86/xen/enlighten.c 	struct page *page;
page              268 arch/x86/xen/enlighten.c 	page = pfn_to_page(pfn);
page              275 arch/x86/xen/enlighten.c 	if (!PageHighMem(page)) {
page              861 arch/x86/xen/enlighten.c 	struct page *page = pfn_to_page(pfn);
page              864 arch/x86/xen/enlighten.c 		SetPagePinned(page);
page              866 arch/x86/xen/enlighten.c 		if (!PageHighMem(page)) {
page              896 arch/x86/xen/enlighten.c 		struct page *page = virt_to_page(pgd);
page              899 arch/x86/xen/enlighten.c 		BUG_ON(page->private != 0);
page              904 arch/x86/xen/enlighten.c 		page->private = (unsigned long)user_pgd;
page              932 arch/x86/xen/enlighten.c 	struct page *page = pfn_to_page(pfn);
page              934 arch/x86/xen/enlighten.c 	if (PagePinned(page)) {
page              935 arch/x86/xen/enlighten.c 		if (!PageHighMem(page)) {
page              940 arch/x86/xen/enlighten.c 		ClearPagePinned(page);
page              971 arch/x86/xen/enlighten.c 	if (PagePinned(page))
page              974 arch/x86/xen/enlighten.c 	if (0 && PageHighMem(page))
page              976 arch/x86/xen/enlighten.c 		       page_to_pfn(page), type,
page              979 arch/x86/xen/enlighten.c 	return kmap_atomic_prot(page, type, prot);
page              290 arch/x86/xen/mmu.c 	struct page *page = virt_to_page(ptr);
page              292 arch/x86/xen/mmu.c 	return PagePinned(page);
page              569 arch/x86/xen/mmu.c 		struct page *page = virt_to_page(pgd_page);
page              570 arch/x86/xen/mmu.c 		user_ptr = (pgd_t *)page->private;
page              722 arch/x86/xen/mmu.c 				struct page *pte;
page              753 arch/x86/xen/mmu.c 	ptl = __pte_lockptr(page);
page              781 arch/x86/xen/mmu.c 	unsigned pgfl = TestSetPagePinned(page);
page              786 arch/x86/xen/mmu.c 	else if (PageHighMem(page))
page              791 arch/x86/xen/mmu.c 		void *pt = lowmem_page_address(page);
page              792 arch/x86/xen/mmu.c 		unsigned long pfn = page_to_pfn(page);
page              820 arch/x86/xen/mmu.c 			ptl = xen_pte_lock(page, mm);
page              892 arch/x86/xen/mmu.c 	struct page *page;
page              896 arch/x86/xen/mmu.c 	list_for_each_entry(page, &pgd_list, lru) {
page              897 arch/x86/xen/mmu.c 		if (!PagePinned(page)) {
page              898 arch/x86/xen/mmu.c 			__xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
page              899 arch/x86/xen/mmu.c 			SetPageSavePinned(page);
page              914 arch/x86/xen/mmu.c 	SetPagePinned(page);
page              926 arch/x86/xen/mmu.c 	unsigned pgfl = TestClearPagePinned(page);
page              928 arch/x86/xen/mmu.c 	if (pgfl && !PageHighMem(page)) {
page              929 arch/x86/xen/mmu.c 		void *pt = lowmem_page_address(page);
page              930 arch/x86/xen/mmu.c 		unsigned long pfn = page_to_pfn(page);
page              942 arch/x86/xen/mmu.c 			ptl = xen_pte_lock(page, mm);
page             1004 arch/x86/xen/mmu.c 	struct page *page;
page             1008 arch/x86/xen/mmu.c 	list_for_each_entry(page, &pgd_list, lru) {
page             1009 arch/x86/xen/mmu.c 		if (PageSavePinned(page)) {
page             1010 arch/x86/xen/mmu.c 			BUG_ON(!PagePinned(page));
page             1011 arch/x86/xen/mmu.c 			__xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
page             1012 arch/x86/xen/mmu.c 			ClearPageSavePinned(page);
page             1400 block/as-iosched.c 	return sprintf(page, "%d\n", var);
page             1406 block/as-iosched.c 	char *p = (char *) page;
page             1417 block/as-iosched.c 	pos += sprintf(page+pos, "%lu %% exit probability\n",
page             1419 block/as-iosched.c 	pos += sprintf(page+pos, "%lu %% probability of exiting without a "
page             1422 block/as-iosched.c 	pos += sprintf(page+pos, "%lu ms new thinktime\n", ad->new_ttime_mean);
page             1423 block/as-iosched.c 	pos += sprintf(page+pos, "%llu sectors new seek distance\n",
page             1430 block/as-iosched.c static ssize_t __FUNC(elevator_t *e, char *page)		\
page             1433 block/as-iosched.c 	return as_var_show(jiffies_to_msecs((__VAR)), (page));	\
page             1443 block/as-iosched.c static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)	\
page             1446 block/as-iosched.c 	int ret = as_var_store(__PTR, (page), count);			\
page              178 block/blk-integrity.c 	return entry->show(bi, page);
page              192 block/blk-integrity.c 		ret = entry->store(bi, page, count);
page              200 block/blk-integrity.c 		return sprintf(page, "%s\n", bi->name);
page              202 block/blk-integrity.c 		return sprintf(page, "none\n");
page              208 block/blk-integrity.c 		return sprintf(page, "%u\n", bi->tag_size);
page              210 block/blk-integrity.c 		return sprintf(page, "0\n");
page              216 block/blk-integrity.c 	char *p = (char *) page;
page              229 block/blk-integrity.c 	return sprintf(page, "%d\n", (bi->flags & INTEGRITY_FLAG_READ) != 0);
page              235 block/blk-integrity.c 	char *p = (char *) page;
page              248 block/blk-integrity.c 	return sprintf(page, "%d\n", (bi->flags & INTEGRITY_FLAG_WRITE) != 0);
page               21 block/blk-sysfs.c 	return sprintf(page, "%d\n", var);
page               27 block/blk-sysfs.c 	char *p = (char *) page;
page               35 block/blk-sysfs.c 	return queue_var_show(q->nr_requests, (page));
page               43 block/blk-sysfs.c 	int ret = queue_var_store(&nr, page, count);
page               82 block/blk-sysfs.c 	return queue_var_show(ra_kb, (page));
page               89 block/blk-sysfs.c 	ssize_t ret = queue_var_store(&ra_kb, page, count);
page              102 block/blk-sysfs.c 	return queue_var_show(max_sectors_kb, (page));
page              107 block/blk-sysfs.c 	return queue_var_show(q->hardsect_size, page);
page              116 block/blk-sysfs.c 	ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
page              135 block/blk-sysfs.c 	return queue_var_show(max_hw_sectors_kb, (page));
page              140 block/blk-sysfs.c 	return queue_var_show(blk_queue_nomerges(q), page);
page              147 block/blk-sysfs.c 	ssize_t ret = queue_var_store(&nm, page, count);
page              163 block/blk-sysfs.c 	return queue_var_show(set != 0, page);
page              173 block/blk-sysfs.c 	ret = queue_var_store(&val, page, count);
page              259 block/blk-sysfs.c 	res = entry->show(q, page);
page              281 block/blk-sysfs.c 	res = entry->store(q, page, length);
page             2278 block/cfq-iosched.c 	return sprintf(page, "%d\n", var);
page             2284 block/cfq-iosched.c 	char *p = (char *) page;
page             2291 block/cfq-iosched.c static ssize_t __FUNC(elevator_t *e, char *page)			\
page             2297 block/cfq-iosched.c 	return cfq_var_show(__data, (page));				\
page             2311 block/cfq-iosched.c static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)	\
page             2315 block/cfq-iosched.c 	int ret = cfq_var_store(&__data, (page), count);		\
page               57 block/cmd-filter.c 	char *npage = page;
page               74 block/cmd-filter.c 	if (npage != page)
page               77 block/cmd-filter.c 	return npage - page;
page               82 block/cmd-filter.c 	return rcf_cmds_show(filter, page, READ);
page               88 block/cmd-filter.c 	return rcf_cmds_show(filter, page, WRITE);
page              106 block/cmd-filter.c 	while ((p = strsep((char **)&page, " ")) != NULL) {
page              135 block/cmd-filter.c 	return rcf_cmds_store(filter, page, count, READ);
page              141 block/cmd-filter.c 	return rcf_cmds_store(filter, page, count, WRITE);
page              178 block/cmd-filter.c 		return entry->show(filter, page);
page              197 block/cmd-filter.c 	return entry->store(filter, page, length);
page              377 block/deadline-iosched.c 	return sprintf(page, "%d\n", var);
page              383 block/deadline-iosched.c 	char *p = (char *) page;
page              390 block/deadline-iosched.c static ssize_t __FUNC(elevator_t *e, char *page)			\
page              396 block/deadline-iosched.c 	return deadline_var_show(__data, (page));			\
page              406 block/deadline-iosched.c static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)	\
page              410 block/deadline-iosched.c 	int ret = deadline_var_store(&__data, (page), count);		\
page              971 block/elevator.c 	error = e->ops ? entry->show(e, page) : -ENOENT;
page              988 block/elevator.c 	error = e->ops ? entry->store(e, page, length) : -ENOENT;
page              306 crypto/async_tx/async_xor.c 	BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(struct page *));
page              151 crypto/authenc.c 	struct page *dstp;
page              280 crypto/authenc.c 	struct page *srcp;
page               89 crypto/blkcipher.c 		memcpy(walk->dst.virt.addr, walk->page, n);
page              138 crypto/blkcipher.c 	if (walk->buffer != walk->page)
page              140 crypto/blkcipher.c 	if (walk->page)
page              141 crypto/blkcipher.c 		free_page((unsigned long)walk->page);
page              158 crypto/blkcipher.c 	walk->buffer = walk->page;
page              185 crypto/blkcipher.c 	u8 *tmp = walk->page;
page              202 crypto/blkcipher.c 	walk->src.phys.page = scatterwalk_page(&walk->in);
page              204 crypto/blkcipher.c 	walk->dst.phys.page = scatterwalk_page(&walk->out);
page              211 crypto/blkcipher.c 	diff |= walk->src.virt.page - walk->dst.virt.page;
page              244 crypto/blkcipher.c 		if (!walk->page) {
page              245 crypto/blkcipher.c 			walk->page = (void *)__get_free_page(GFP_ATOMIC);
page              246 crypto/blkcipher.c 			if (!walk->page)
page              270 crypto/blkcipher.c 		walk->src.phys.page = virt_to_page(walk->src.virt.addr);
page              271 crypto/blkcipher.c 		walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
page              344 crypto/blkcipher.c 	walk->page = NULL;
page               45 crypto/digest.c 		struct page *pg = sg_page(sg);
page               89 crypto/eseqiv.c 	struct page *srcp;
page               90 crypto/eseqiv.c 	struct page *dstp;
page               54 crypto/scatterwalk.c 		struct page *page;
page               56 crypto/scatterwalk.c 		page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT);
page               57 crypto/scatterwalk.c 		flush_dcache_page(page);
page              121 crypto/xcbc.c  		struct page *pg = sg_page(sg);
page               60 fs/9p/vfs_addr.c 	buffer = kmap(page);
page               61 fs/9p/vfs_addr.c 	offset = page_offset(page);
page               68 fs/9p/vfs_addr.c 	flush_dcache_page(page);
page               69 fs/9p/vfs_addr.c 	SetPageUptodate(page);
page               73 fs/9p/vfs_addr.c 	kunmap(page);
page               74 fs/9p/vfs_addr.c 	unlock_page(page);
page               56 fs/adfs/inode.c 	return block_write_full_page(page, adfs_get_block, wbc);
page               61 fs/adfs/inode.c 	return block_read_full_page(page, adfs_get_block);
page              397 fs/affs/file.c 	return block_write_full_page(page, affs_get_block, wbc);
page              402 fs/affs/file.c 	return block_read_full_page(page, affs_get_block);
page              489 fs/affs/file.c 	struct inode *inode = page->mapping->host;
page              496 fs/affs/file.c 	pr_debug("AFFS: read_page(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to);
page              498 fs/affs/file.c 	kmap(page);
page              499 fs/affs/file.c 	data = page_address(page);
page              501 fs/affs/file.c 	tmp = (page->index << PAGE_CACHE_SHIFT) + from;
page              517 fs/affs/file.c 	flush_dcache_page(page);
page              518 fs/affs/file.c 	kunmap(page);
page              593 fs/affs/file.c 	struct inode *inode = page->mapping->host;
page              597 fs/affs/file.c 	pr_debug("AFFS: read_page(%u, %ld)\n", (u32)inode->i_ino, page->index);
page              599 fs/affs/file.c 	if (((page->index + 1) << PAGE_CACHE_SHIFT) > inode->i_size) {
page              601 fs/affs/file.c 		memset(page_address(page) + to, 0, PAGE_CACHE_SIZE - to);
page              604 fs/affs/file.c 	err = affs_do_readpage_ofs(file, page, 0, to);
page              606 fs/affs/file.c 		SetPageUptodate(page);
page              607 fs/affs/file.c 	unlock_page(page);
page              616 fs/affs/file.c 	struct page *page;
page              631 fs/affs/file.c 	page = __grab_cache_page(mapping, index);
page              632 fs/affs/file.c 	if (!page)
page              634 fs/affs/file.c 	*pagep = page;
page              636 fs/affs/file.c 	if (PageUptodate(page))
page              640 fs/affs/file.c 	err = affs_do_readpage_ofs(file, page, 0, PAGE_CACHE_SIZE);
page              642 fs/affs/file.c 		unlock_page(page);
page              643 fs/affs/file.c 		page_cache_release(page);
page              671 fs/affs/file.c 	data = page_address(page);
page              675 fs/affs/file.c 	tmp = (page->index << PAGE_CACHE_SHIFT) + from;
page              757 fs/affs/file.c 	SetPageUptodate(page);
page              761 fs/affs/file.c 	tmp = (page->index << PAGE_CACHE_SHIFT) + from;
page              765 fs/affs/file.c 	unlock_page(page);
page              766 fs/affs/file.c 	page_cache_release(page);
page              824 fs/affs/file.c 		struct page *page;
page              829 fs/affs/file.c 		res = mapping->a_ops->write_begin(NULL, mapping, size, 0, 0, &page, &fsdata);
page              831 fs/affs/file.c 			res = mapping->a_ops->write_end(NULL, mapping, size, 0, 0, page, fsdata);
page               16 fs/affs/symlink.c 	struct inode *inode = page->mapping->host;
page               17 fs/affs/symlink.c 	char *link = kmap(page);
page               58 fs/affs/symlink.c 	SetPageUptodate(page);
page               59 fs/affs/symlink.c 	kunmap(page);
page               60 fs/affs/symlink.c 	unlock_page(page);
page               63 fs/affs/symlink.c 	SetPageError(page);
page               64 fs/affs/symlink.c 	kunmap(page);
page               65 fs/affs/symlink.c 	unlock_page(page);
page              141 fs/afs/dir.c   	if (page->index == 0 && qty != ntohs(dbuf->blocks[0].pagehdr.npages)) {
page              150 fs/afs/dir.c   	latter = dir->i_size - page_offset(page);
page              158 fs/afs/dir.c   	dbuf = page_address(page);
page              168 fs/afs/dir.c   	SetPageChecked(page);
page              172 fs/afs/dir.c   	SetPageChecked(page);
page              173 fs/afs/dir.c   	SetPageError(page);
page              181 fs/afs/dir.c   	kunmap(page);
page              182 fs/afs/dir.c   	page_cache_release(page);
page              188 fs/afs/dir.c   static struct page *afs_dir_get_page(struct inode *dir, unsigned long index,
page              191 fs/afs/dir.c   	struct page *page;
page              198 fs/afs/dir.c   	page = read_mapping_page(dir->i_mapping, index, &file);
page              199 fs/afs/dir.c   	if (!IS_ERR(page)) {
page              200 fs/afs/dir.c   		kmap(page);
page              201 fs/afs/dir.c   		if (!PageChecked(page))
page              202 fs/afs/dir.c   			afs_dir_check_page(dir, page);
page              203 fs/afs/dir.c   		if (PageError(page))
page              206 fs/afs/dir.c   	return page;
page              209 fs/afs/dir.c   	afs_dir_put_page(page);
page              334 fs/afs/dir.c   	struct page *page;
page              355 fs/afs/dir.c   		page = afs_dir_get_page(dir, blkoff / PAGE_SIZE, key);
page              356 fs/afs/dir.c   		if (IS_ERR(page)) {
page              357 fs/afs/dir.c   			ret = PTR_ERR(page);
page              363 fs/afs/dir.c   		dbuf = page_address(page);
page              372 fs/afs/dir.c   				afs_dir_put_page(page);
page              380 fs/afs/dir.c   		afs_dir_put_page(page);
page              110 fs/afs/file.c  	_enter("%p,%p,%p,%d", cookie_data, page, data, error);
page              113 fs/afs/file.c  		SetPageError(page);
page              115 fs/afs/file.c  		SetPageUptodate(page);
page              116 fs/afs/file.c  	unlock_page(page);
page              130 fs/afs/file.c  	_enter("%p,%p,%p,%d", cookie_data, page, data, error);
page              132 fs/afs/file.c  	unlock_page(page);
page              148 fs/afs/file.c  	inode = page->mapping->host;
page              154 fs/afs/file.c  	_enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index);
page              158 fs/afs/file.c  	BUG_ON(!PageLocked(page));
page              167 fs/afs/file.c  					 page,
page              188 fs/afs/file.c  		offset = page->index << PAGE_CACHE_SHIFT;
page              193 fs/afs/file.c  		ret = afs_vnode_fetch_data(vnode, key, offset, len, page);
page              202 fs/afs/file.c  			cachefs_uncache_page(vnode->cache, page);
page              207 fs/afs/file.c  		SetPageUptodate(page);
page              211 fs/afs/file.c  				       page,
page              216 fs/afs/file.c  			cachefs_uncache_page(vnode->cache, page);
page              217 fs/afs/file.c  			unlock_page(page);
page              220 fs/afs/file.c  		unlock_page(page);
page              228 fs/afs/file.c  	SetPageError(page);
page              229 fs/afs/file.c  	unlock_page(page);
page              241 fs/afs/file.c  	_enter("{%lu},%lu", page->index, offset);
page              243 fs/afs/file.c  	BUG_ON(!PageLocked(page));
page              245 fs/afs/file.c  	if (PagePrivate(page)) {
page              252 fs/afs/file.c  			BUG_ON(!PageLocked(page));
page              255 fs/afs/file.c  			if (!PageWriteback(page))
page              256 fs/afs/file.c  				ret = page->mapping->a_ops->releasepage(page,
page              270 fs/afs/file.c  	_enter("{%lu}", page->index);
page              280 fs/afs/file.c  	struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
page              284 fs/afs/file.c  	       vnode->fid.vid, vnode->fid.vnode, page->index, page->flags,
page              287 fs/afs/file.c  	if (PagePrivate(page)) {
page              288 fs/afs/file.c  		wb = (struct afs_writeback *) page_private(page);
page              290 fs/afs/file.c  		set_page_private(page, 0);
page              291 fs/afs/file.c  		ClearPagePrivate(page);
page              309 fs/afs/fsclient.c 	struct page *page;
page              365 fs/afs/fsclient.c 			page = call->reply3;
page              366 fs/afs/fsclient.c 			buffer = kmap_atomic(page, KM_USER0);
page              411 fs/afs/fsclient.c 		page = call->reply3;
page              412 fs/afs/fsclient.c 		buffer = kmap_atomic(page, KM_USER0);
page               56 fs/afs/mntpt.c 	struct page *page;
page               65 fs/afs/mntpt.c 	page = read_mapping_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0, &file);
page               66 fs/afs/mntpt.c 	if (IS_ERR(page)) {
page               67 fs/afs/mntpt.c 		ret = PTR_ERR(page);
page               72 fs/afs/mntpt.c 	if (PageError(page))
page               75 fs/afs/mntpt.c 	buf = kmap(page);
page               93 fs/afs/mntpt.c 	kunmap(page);
page               95 fs/afs/mntpt.c 	page_cache_release(page);
page              142 fs/afs/mntpt.c 	struct page *page = NULL;
page              166 fs/afs/mntpt.c 	page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL);
page              167 fs/afs/mntpt.c 	if (IS_ERR(page)) {
page              168 fs/afs/mntpt.c 		ret = PTR_ERR(page);
page              173 fs/afs/mntpt.c 	if (PageError(page))
page              176 fs/afs/mntpt.c 	buf = kmap(page);
page              178 fs/afs/mntpt.c 	kunmap(page);
page              179 fs/afs/mntpt.c 	page_cache_release(page);
page              180 fs/afs/mntpt.c 	page = NULL;
page              200 fs/afs/mntpt.c 	if (page)
page              201 fs/afs/mntpt.c 		page_cache_release(page);
page              245 fs/afs/rxrpc.c 	struct page *pages[8];
page              425 fs/afs/vnode.c 					page, &afs_sync_call);
page               28 fs/afs/write.c 	return __set_page_dirty_nobuffers(page);
page               95 fs/afs/write.c 	ret = afs_vnode_fetch_data(vnode, key, start, len, page);
page              125 fs/afs/write.c 	p = kmap_atomic(page, KM_USER0);
page              128 fs/afs/write.c 	pos = (loff_t) page->index << PAGE_SHIFT;
page              166 fs/afs/write.c 		ret = afs_fill_page(vnode, key, start, len, page);
page              188 fs/afs/write.c 	       vnode->fid.vid, vnode->fid.vnode, page->index, offset, to);
page              194 fs/afs/write.c 	candidate->first = candidate->last = page->index;
page              201 fs/afs/write.c 	if (!PageUptodate(page)) {
page              203 fs/afs/write.c 		ret = afs_prepare_page(vnode, page, key, offset, to);
page              212 fs/afs/write.c 	index = page->index;
page              217 fs/afs/write.c 	wb = (struct afs_writeback *) page_private(page);
page              237 fs/afs/write.c 	SetPagePrivate(page);
page              238 fs/afs/write.c 	set_page_private(page, (unsigned long) candidate);
page              260 fs/afs/write.c 	SetPagePrivate(page);
page              261 fs/afs/write.c 	set_page_private(page, (unsigned long) wb);
page              273 fs/afs/write.c 	if (PageDirty(page)) {
page              274 fs/afs/write.c 		ret = afs_write_back_from_locked_page(wb, page);
page              284 fs/afs/write.c 	set_page_private(page, 0);
page              285 fs/afs/write.c 	ClearPagePrivate(page);
page              299 fs/afs/write.c 	       vnode->fid.vid, vnode->fid.vnode, page->index, offset, to);
page              301 fs/afs/write.c 	maybe_i_size = (loff_t) page->index << PAGE_SHIFT;
page              313 fs/afs/write.c 	SetPageUptodate(page);
page              314 fs/afs/write.c 	set_page_dirty(page);
page              315 fs/afs/write.c 	if (PageDirty(page))
page              365 fs/afs/write.c 	struct page *pages[8], *page;
page              404 fs/afs/write.c 			page = pages[loop];
page              405 fs/afs/write.c 			if (page->index > wb->last)
page              407 fs/afs/write.c 			if (!trylock_page(page))
page              409 fs/afs/write.c 			if (!PageDirty(page) ||
page              410 fs/afs/write.c 			    page_private(page) != (unsigned long) wb) {
page              411 fs/afs/write.c 				unlock_page(page);
page              414 fs/afs/write.c 			if (!clear_page_dirty_for_io(page))
page              416 fs/afs/write.c 			if (test_set_page_writeback(page))
page              418 fs/afs/write.c 			unlock_page(page);
page              419 fs/afs/write.c 			put_page(page);
page              486 fs/afs/write.c 	struct backing_dev_info *bdi = page->mapping->backing_dev_info;
page              490 fs/afs/write.c 	_enter("{%lx},", page->index);
page              492 fs/afs/write.c 	wb = (struct afs_writeback *) page_private(page);
page              495 fs/afs/write.c 	ret = afs_write_back_from_locked_page(wb, page);
page              496 fs/afs/write.c 	unlock_page(page);
page              519 fs/afs/write.c 	struct page *page;
page              526 fs/afs/write.c 				       1, &page);
page              530 fs/afs/write.c 		_debug("wback %lx", page->index);
page              532 fs/afs/write.c 		if (page->index > end) {
page              534 fs/afs/write.c 			page_cache_release(page);
page              544 fs/afs/write.c 		lock_page(page);
page              546 fs/afs/write.c 		if (page->mapping != mapping) {
page              547 fs/afs/write.c 			unlock_page(page);
page              548 fs/afs/write.c 			page_cache_release(page);
page              553 fs/afs/write.c 			wait_on_page_writeback(page);
page              555 fs/afs/write.c 		if (PageWriteback(page) || !PageDirty(page)) {
page              556 fs/afs/write.c 			unlock_page(page);
page              560 fs/afs/write.c 		wb = (struct afs_writeback *) page_private(page);
page              567 fs/afs/write.c 		ret = afs_write_back_from_locked_page(wb, page);
page              568 fs/afs/write.c 		unlock_page(page);
page              569 fs/afs/write.c 		page_cache_release(page);
page              683 fs/afs/write.c 			struct page *page = pv.pages[loop];
page              684 fs/afs/write.c 			end_page_writeback(page);
page              685 fs/afs/write.c 			if (page_private(page) == (unsigned long) wb) {
page              686 fs/afs/write.c 				set_page_private(page, 0);
page              687 fs/afs/write.c 				ClearPagePrivate(page);
page               77 fs/aio.c       	pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
page              125 fs/aio.c       		info->ring_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
page               98 fs/befs/linuxvfs.c 	return block_read_full_page(page, befs_get_block);
page              160 fs/bfs/file.c  	return block_write_full_page(page, bfs_get_block, wbc);
page              165 fs/bfs/file.c  	return block_read_full_page(page, bfs_get_block);
page             2011 fs/binfmt_elf.c 			struct page *page;
page             2015 fs/binfmt_elf.c 						&page, &tmp_vma) <= 0) {
page             2018 fs/binfmt_elf.c 				if (page == ZERO_PAGE(0)) {
page             2020 fs/binfmt_elf.c 						page_cache_release(page);
page             2026 fs/binfmt_elf.c 							 page_to_pfn(page));
page             2027 fs/binfmt_elf.c 					kaddr = kmap(page);
page             2031 fs/binfmt_elf.c 						kunmap(page);
page             2032 fs/binfmt_elf.c 						page_cache_release(page);
page             2035 fs/binfmt_elf.c 					kunmap(page);
page             2037 fs/binfmt_elf.c 				page_cache_release(page);
page              669 fs/binfmt_elf_fdpic.c 		src = kmap(bprm->page[index]);
page              673 fs/binfmt_elf_fdpic.c 		kunmap(bprm->page[index]);
page             1489 fs/binfmt_elf_fdpic.c 			struct page *page;
page             1492 fs/binfmt_elf_fdpic.c 					   &page, &vma) <= 0) {
page             1495 fs/binfmt_elf_fdpic.c 			else if (page == ZERO_PAGE(0)) {
page             1496 fs/binfmt_elf_fdpic.c 				page_cache_release(page);
page             1502 fs/binfmt_elf_fdpic.c 				flush_cache_page(vma, addr, page_to_pfn(page));
page             1503 fs/binfmt_elf_fdpic.c 				kaddr = kmap(page);
page             1507 fs/binfmt_elf_fdpic.c 					kunmap(page);
page             1508 fs/binfmt_elf_fdpic.c 					page_cache_release(page);
page             1511 fs/binfmt_elf_fdpic.c 				kunmap(page);
page             1512 fs/binfmt_elf_fdpic.c 				page_cache_release(page);
page              894 fs/binfmt_flat.c 			((char *) page_address(bprm->page[i/PAGE_SIZE]))[i % PAGE_SIZE];
page              447 fs/binfmt_misc.c 		sprintf(page, "%s\n", status);
page              451 fs/binfmt_misc.c 	sprintf(page, "%s\ninterpreter %s\n", status, e->interpreter);
page              452 fs/binfmt_misc.c 	dp = page + strlen(page);
page              475 fs/binfmt_misc.c 		dp = page + strlen(page);
page              540 fs/binfmt_misc.c 	char *page;
page              542 fs/binfmt_misc.c 	if (!(page = (char*) __get_free_page(GFP_KERNEL)))
page              545 fs/binfmt_misc.c 	entry_status(e, page);
page              547 fs/binfmt_misc.c 	res = simple_read_from_buffer(buf, nbytes, ppos, page, strlen(page));
page              549 fs/binfmt_misc.c 	free_page((unsigned long) page);
page              145 fs/bio-integrity.c 	iv->bv_page = page;
page              368 fs/bio.c       		if (page == prev->bv_page &&
page              413 fs/bio.c       	bvec->bv_page = page;
page              470 fs/bio.c       	return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors);
page              490 fs/bio.c       	return __bio_add_page(q, bio, page, len, offset, q->max_sectors);
page              633 fs/bio.c       	struct page *page;
page              681 fs/bio.c       			page = map_data->pages[i++];
page              683 fs/bio.c       			page = alloc_page(q->bounce_gfp | gfp_mask);
page              684 fs/bio.c       		if (!page) {
page              689 fs/bio.c       		if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
page              752 fs/bio.c       	struct page **pages;
page              779 fs/bio.c       	pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
page             1110 fs/bio.c       		struct page *page = bvec[i].bv_page;
page             1112 fs/bio.c       		if (page && !PageCompound(page))
page             1113 fs/bio.c       			set_page_dirty_lock(page);
page             1123 fs/bio.c       		struct page *page = bvec[i].bv_page;
page             1125 fs/bio.c       		if (page)
page             1126 fs/bio.c       			put_page(page);
page             1177 fs/bio.c       		struct page *page = bvec[i].bv_page;
page             1179 fs/bio.c       		if (PageDirty(page) || PageCompound(page)) {
page             1180 fs/bio.c       			page_cache_release(page);
page              179 fs/block_dev.c 	return block_write_full_page(page, blkdev_get_block, wbc);
page              184 fs/block_dev.c 	return block_read_full_page(page, blkdev_get_block);
page              201 fs/block_dev.c 	ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
page              203 fs/block_dev.c 	unlock_page(page);
page              204 fs/block_dev.c 	page_cache_release(page);
page               98 fs/buffer.c    	ClearPagePrivate(page);
page               99 fs/buffer.c    	set_page_private(page, 0);
page              100 fs/buffer.c    	page_cache_release(page);
page              270 fs/buffer.c    	struct page *page;
page              274 fs/buffer.c    	page = find_get_page(bd_mapping, index);
page              275 fs/buffer.c    	if (!page)
page              279 fs/buffer.c    	if (!page_has_buffers(page))
page              281 fs/buffer.c    	head = page_buffers(page);
page              310 fs/buffer.c    	page_cache_release(page);
page              388 fs/buffer.c    	struct page *page;
page              393 fs/buffer.c    	page = bh->b_page;
page              400 fs/buffer.c    		SetPageError(page);
page              408 fs/buffer.c    	first = page_buffers(page);
page              430 fs/buffer.c    	if (page_uptodate && !PageError(page))
page              431 fs/buffer.c    		SetPageUptodate(page);
page              432 fs/buffer.c    	unlock_page(page);
page              451 fs/buffer.c    	struct page *page;
page              455 fs/buffer.c    	page = bh->b_page;
page              465 fs/buffer.c    		set_bit(AS_EIO, &page->mapping->flags);
page              468 fs/buffer.c    		SetPageError(page);
page              471 fs/buffer.c    	first = page_buffers(page);
page              487 fs/buffer.c    	end_page_writeback(page);
page              704 fs/buffer.c    		return !TestSetPageDirty(page);
page              706 fs/buffer.c    	if (TestSetPageDirty(page))
page              710 fs/buffer.c    	if (page->mapping) {	/* Race with truncate? */
page              711 fs/buffer.c    		WARN_ON_ONCE(warn && !PageUptodate(page));
page              714 fs/buffer.c    			__inc_zone_page_state(page, NR_FILE_DIRTY);
page              720 fs/buffer.c    				page_index(page), PAGECACHE_TAG_DIRTY);
page              755 fs/buffer.c    	struct address_space *mapping = page_mapping(page);
page              758 fs/buffer.c    		return !TestSetPageDirty(page);
page              761 fs/buffer.c    	if (page_has_buffers(page)) {
page              762 fs/buffer.c    		struct buffer_head *head = page_buffers(page);
page              772 fs/buffer.c    	return __set_page_dirty(page, mapping, 1);
page              946 fs/buffer.c    		set_bh_page(bh, page, offset);
page              994 fs/buffer.c    	attach_page_buffers(page, head);
page             1004 fs/buffer.c    	struct buffer_head *head = page_buffers(page);
page             1006 fs/buffer.c    	int uptodate = PageUptodate(page);
page             1027 fs/buffer.c    static struct page *
page             1032 fs/buffer.c    	struct page *page;
page             1035 fs/buffer.c    	page = find_or_create_page(inode->i_mapping, index,
page             1037 fs/buffer.c    	if (!page)
page             1040 fs/buffer.c    	BUG_ON(!PageLocked(page));
page             1042 fs/buffer.c    	if (page_has_buffers(page)) {
page             1043 fs/buffer.c    		bh = page_buffers(page);
page             1045 fs/buffer.c    			init_page_buffers(page, bdev, block, size);
page             1046 fs/buffer.c    			return page;
page             1048 fs/buffer.c    		if (!try_to_free_buffers(page))
page             1055 fs/buffer.c    	bh = alloc_page_buffers(page, size, 0);
page             1065 fs/buffer.c    	link_dev_buffers(page, bh);
page             1066 fs/buffer.c    	init_page_buffers(page, bdev, block, size);
page             1068 fs/buffer.c    	return page;
page             1072 fs/buffer.c    	unlock_page(page);
page             1073 fs/buffer.c    	page_cache_release(page);
page             1084 fs/buffer.c    	struct page *page;
page             1110 fs/buffer.c    	page = grow_dev_page(bdev, block, index, size);
page             1111 fs/buffer.c    	if (!page)
page             1113 fs/buffer.c    	unlock_page(page);
page             1114 fs/buffer.c    	page_cache_release(page);
page             1473 fs/buffer.c    	bh->b_page = page;
page             1475 fs/buffer.c    	if (PageHighMem(page))
page             1481 fs/buffer.c    		bh->b_data = page_address(page) + offset;
page             1521 fs/buffer.c    	BUG_ON(!PageLocked(page));
page             1522 fs/buffer.c    	if (!page_has_buffers(page))
page             1525 fs/buffer.c    	head = page_buffers(page);
page             1546 fs/buffer.c    		try_to_release_page(page, 0);
page             1562 fs/buffer.c    	head = alloc_page_buffers(page, blocksize, 1);
page             1571 fs/buffer.c    	spin_lock(&page->mapping->private_lock);
page             1572 fs/buffer.c    	if (PageUptodate(page) || PageDirty(page)) {
page             1575 fs/buffer.c    			if (PageDirty(page))
page             1577 fs/buffer.c    			if (PageUptodate(page))
page             1582 fs/buffer.c    	attach_page_buffers(page, head);
page             1583 fs/buffer.c    	spin_unlock(&page->mapping->private_lock);
page             1654 fs/buffer.c    	BUG_ON(!PageLocked(page));
page             1658 fs/buffer.c    	if (!page_has_buffers(page)) {
page             1659 fs/buffer.c    		create_empty_buffers(page, blocksize,
page             1673 fs/buffer.c    	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
page             1674 fs/buffer.c    	head = page_buffers(page);
page             1724 fs/buffer.c    			redirty_page_for_writepage(wbc, page);
page             1738 fs/buffer.c    	BUG_ON(PageWriteback(page));
page             1739 fs/buffer.c    	set_page_writeback(page);
page             1749 fs/buffer.c    	unlock_page(page);
page             1759 fs/buffer.c    		end_page_writeback(page);
page             1790 fs/buffer.c    	SetPageError(page);
page             1791 fs/buffer.c    	BUG_ON(PageWriteback(page));
page             1792 fs/buffer.c    	mapping_set_error(page->mapping, err);
page             1793 fs/buffer.c    	set_page_writeback(page);
page             1803 fs/buffer.c    	unlock_page(page);
page             1817 fs/buffer.c    	BUG_ON(!PageLocked(page));
page             1818 fs/buffer.c    	if (!page_has_buffers(page))
page             1821 fs/buffer.c    	bh = head = page_buffers(page);
page             1828 fs/buffer.c    				if (!PageUptodate(page)) {
page             1834 fs/buffer.c    					zero_user(page, start, size);
page             1858 fs/buffer.c    	BUG_ON(!PageLocked(page));
page             1864 fs/buffer.c    	if (!page_has_buffers(page))
page             1865 fs/buffer.c    		create_empty_buffers(page, blocksize, 0);
page             1866 fs/buffer.c    	head = page_buffers(page);
page             1869 fs/buffer.c    	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
page             1875 fs/buffer.c    			if (PageUptodate(page)) {
page             1891 fs/buffer.c    				if (PageUptodate(page)) {
page             1898 fs/buffer.c    					zero_user_segments(page,
page             1904 fs/buffer.c    		if (PageUptodate(page)) {
page             1925 fs/buffer.c    		page_zero_new_buffers(page, from, to);
page             1939 fs/buffer.c    	for(bh = head = page_buffers(page), block_start = 0;
page             1960 fs/buffer.c    		SetPageUptodate(page);
page             1979 fs/buffer.c    	struct page *page;
page             1988 fs/buffer.c    	page = *pagep;
page             1989 fs/buffer.c    	if (page == NULL) {
page             1991 fs/buffer.c    		page = __grab_cache_page(mapping, index);
page             1992 fs/buffer.c    		if (!page) {
page             1996 fs/buffer.c    		*pagep = page;
page             1998 fs/buffer.c    		BUG_ON(!PageLocked(page));
page             2000 fs/buffer.c    	status = __block_prepare_write(inode, page, start, end, get_block);
page             2002 fs/buffer.c    		ClearPageUptodate(page);
page             2005 fs/buffer.c    			unlock_page(page);
page             2006 fs/buffer.c    			page_cache_release(page);
page             2047 fs/buffer.c    		if (!PageUptodate(page))
page             2050 fs/buffer.c    		page_zero_new_buffers(page, start+copied, start+len);
page             2052 fs/buffer.c    	flush_dcache_page(page);
page             2055 fs/buffer.c    	__block_commit_write(inode, page, start, start+copied);
page             2068 fs/buffer.c    	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
page             2082 fs/buffer.c    	unlock_page(page);
page             2083 fs/buffer.c    	page_cache_release(page);
page             2108 fs/buffer.c    	struct inode *inode = page->mapping->host;
page             2114 fs/buffer.c    	if (!page_has_buffers(page))
page             2123 fs/buffer.c    	head = page_buffers(page);
page             2153 fs/buffer.c    	struct inode *inode = page->mapping->host;
page             2160 fs/buffer.c    	BUG_ON(!PageLocked(page));
page             2162 fs/buffer.c    	if (!page_has_buffers(page))
page             2163 fs/buffer.c    		create_empty_buffers(page, blocksize, 0);
page             2164 fs/buffer.c    	head = page_buffers(page);
page             2166 fs/buffer.c    	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
page             2184 fs/buffer.c    					SetPageError(page);
page             2187 fs/buffer.c    				zero_user(page, i * blocksize, blocksize);
page             2203 fs/buffer.c    		SetPageMappedToDisk(page);
page             2210 fs/buffer.c    		if (!PageError(page))
page             2211 fs/buffer.c    			SetPageUptodate(page);
page             2212 fs/buffer.c    		unlock_page(page);
page             2245 fs/buffer.c    	struct page *page;
page             2261 fs/buffer.c    				&page, &fsdata);
page             2265 fs/buffer.c    	err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
page             2277 fs/buffer.c    	struct page *page;
page             2297 fs/buffer.c    						&page, &fsdata);
page             2300 fs/buffer.c    		zero_user(page, zerofrom, len);
page             2302 fs/buffer.c    						page, fsdata);
page             2326 fs/buffer.c    						&page, &fsdata);
page             2329 fs/buffer.c    		zero_user(page, zerofrom, len);
page             2331 fs/buffer.c    						page, fsdata);
page             2375 fs/buffer.c    	struct inode *inode = page->mapping->host;
page             2376 fs/buffer.c    	int err = __block_prepare_write(inode, page, from, to, get_block);
page             2378 fs/buffer.c    		ClearPageUptodate(page);
page             2384 fs/buffer.c    	struct inode *inode = page->mapping->host;
page             2385 fs/buffer.c    	__block_commit_write(inode,page,from,to);
page             2413 fs/buffer.c    	lock_page(page);
page             2415 fs/buffer.c    	if ((page->mapping != inode->i_mapping) ||
page             2416 fs/buffer.c    	    (page_offset(page) > size)) {
page             2422 fs/buffer.c    	if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
page             2427 fs/buffer.c    	ret = block_prepare_write(page, 0, end, get_block);
page             2429 fs/buffer.c    		ret = block_commit_write(page, 0, end);
page             2432 fs/buffer.c    	unlock_page(page);
page             2455 fs/buffer.c    	BUG_ON(!PageLocked(page));
page             2457 fs/buffer.c    	spin_lock(&page->mapping->private_lock);
page             2460 fs/buffer.c    		if (PageDirty(page))
page             2466 fs/buffer.c    	attach_page_buffers(page, head);
page             2467 fs/buffer.c    	spin_unlock(&page->mapping->private_lock);
page             2483 fs/buffer.c    	struct page *page;
page             2497 fs/buffer.c    	page = __grab_cache_page(mapping, index);
page             2498 fs/buffer.c    	if (!page)
page             2500 fs/buffer.c    	*pagep = page;
page             2503 fs/buffer.c    	if (page_has_buffers(page)) {
page             2504 fs/buffer.c    		unlock_page(page);
page             2505 fs/buffer.c    		page_cache_release(page);
page             2511 fs/buffer.c    	if (PageMappedToDisk(page))
page             2523 fs/buffer.c    	head = alloc_page_buffers(page, blocksize, 0);
page             2529 fs/buffer.c    	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
page             2554 fs/buffer.c    		if (PageUptodate(page)) {
page             2559 fs/buffer.c    			zero_user_segments(page, block_start, from,
page             2589 fs/buffer.c    		SetPageMappedToDisk(page);
page             2604 fs/buffer.c    	attach_nobh_buffers(page, head);
page             2605 fs/buffer.c    	page_zero_new_buffers(page, from, to);
page             2608 fs/buffer.c    	unlock_page(page);
page             2609 fs/buffer.c    	page_cache_release(page);
page             2623 fs/buffer.c    	struct inode *inode = page->mapping->host;
page             2626 fs/buffer.c    	BUG_ON(fsdata != NULL && page_has_buffers(page));
page             2628 fs/buffer.c    	if (unlikely(copied < len) && !page_has_buffers(page))
page             2629 fs/buffer.c    		attach_nobh_buffers(page, head);
page             2630 fs/buffer.c    	if (page_has_buffers(page))
page             2632 fs/buffer.c    					copied, page, fsdata);
page             2634 fs/buffer.c    	SetPageUptodate(page);
page             2635 fs/buffer.c    	set_page_dirty(page);
page             2641 fs/buffer.c    	unlock_page(page);
page             2642 fs/buffer.c    	page_cache_release(page);
page             2662 fs/buffer.c    	struct inode * const inode = page->mapping->host;
page             2669 fs/buffer.c    	if (page->index < end_index)
page             2674 fs/buffer.c    	if (page->index >= end_index+1 || !offset) {
page             2682 fs/buffer.c    		if (page->mapping->a_ops->invalidatepage)
page             2683 fs/buffer.c    			page->mapping->a_ops->invalidatepage(page, offset);
page             2685 fs/buffer.c    		unlock_page(page);
page             2696 fs/buffer.c    	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
page             2698 fs/buffer.c    	ret = mpage_writepage(page, get_block, wbc);
page             2700 fs/buffer.c    		ret = __block_write_full_page(inode, page, get_block, wbc);
page             2714 fs/buffer.c    	struct page *page;
page             2728 fs/buffer.c    	page = grab_cache_page(mapping, index);
page             2730 fs/buffer.c    	if (!page)
page             2733 fs/buffer.c    	if (page_has_buffers(page)) {
page             2735 fs/buffer.c    		unlock_page(page);
page             2736 fs/buffer.c    		page_cache_release(page);
page             2755 fs/buffer.c    	if (!PageUptodate(page)) {
page             2756 fs/buffer.c    		err = mapping->a_ops->readpage(NULL, page);
page             2758 fs/buffer.c    			page_cache_release(page);
page             2761 fs/buffer.c    		lock_page(page);
page             2762 fs/buffer.c    		if (!PageUptodate(page)) {
page             2766 fs/buffer.c    		if (page_has_buffers(page))
page             2769 fs/buffer.c    	zero_user(page, offset, length);
page             2770 fs/buffer.c    	set_page_dirty(page);
page             2774 fs/buffer.c    	unlock_page(page);
page             2775 fs/buffer.c    	page_cache_release(page);
page             2790 fs/buffer.c    	struct page *page;
page             2804 fs/buffer.c    	page = grab_cache_page(mapping, index);
page             2806 fs/buffer.c    	if (!page)
page             2809 fs/buffer.c    	if (!page_has_buffers(page))
page             2810 fs/buffer.c    		create_empty_buffers(page, blocksize, 0);
page             2813 fs/buffer.c    	bh = page_buffers(page);
page             2833 fs/buffer.c    	if (PageUptodate(page))
page             2845 fs/buffer.c    	zero_user(page, offset, length);
page             2850 fs/buffer.c    	unlock_page(page);
page             2851 fs/buffer.c    	page_cache_release(page);
page             2862 fs/buffer.c    	struct inode * const inode = page->mapping->host;
page             2868 fs/buffer.c    	if (page->index < end_index)
page             2869 fs/buffer.c    		return __block_write_full_page(inode, page, get_block, wbc);
page             2873 fs/buffer.c    	if (page->index >= end_index+1 || !offset) {
page             2879 fs/buffer.c    		do_invalidatepage(page, 0);
page             2880 fs/buffer.c    		unlock_page(page);
page             2891 fs/buffer.c    	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
page             2892 fs/buffer.c    	return __block_write_full_page(inode, page, get_block, wbc);
page             3088 fs/buffer.c    	struct buffer_head *head = page_buffers(page);
page             3093 fs/buffer.c    		if (buffer_write_io_error(bh) && page->mapping)
page             3094 fs/buffer.c    			set_bit(AS_EIO, &page->mapping->flags);
page             3108 fs/buffer.c    	__clear_page_buffers(page);
page             3116 fs/buffer.c    	struct address_space * const mapping = page->mapping;
page             3120 fs/buffer.c    	BUG_ON(!PageLocked(page));
page             3121 fs/buffer.c    	if (PageWriteback(page))
page             3125 fs/buffer.c    		ret = drop_buffers(page, &buffers_to_free);
page             3130 fs/buffer.c    	ret = drop_buffers(page, &buffers_to_free);
page             3147 fs/buffer.c    		cancel_dirty_page(page, PAGE_CACHE_SIZE);
page             3168 fs/buffer.c    	mapping = page_mapping(page);
page             3170 fs/buffer.c    		blk_run_backing_dev(mapping->backing_dev_info, page);
page             1148 fs/cifs/file.c 	struct address_space *mapping = page->mapping;
page             1149 fs/cifs/file.c 	loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
page             1161 fs/cifs/file.c 	inode = page->mapping->host;
page             1166 fs/cifs/file.c 	write_data = kmap(page);
page             1170 fs/cifs/file.c 		kunmap(page);
page             1176 fs/cifs/file.c 		kunmap(page);
page             1200 fs/cifs/file.c 	kunmap(page);
page             1222 fs/cifs/file.c 	struct page *page;
page             1285 fs/cifs/file.c 			page = pvec.pages[i];
page             1295 fs/cifs/file.c 				lock_page(page);
page             1296 fs/cifs/file.c 			else if (!trylock_page(page))
page             1299 fs/cifs/file.c 			if (unlikely(page->mapping != mapping)) {
page             1300 fs/cifs/file.c 				unlock_page(page);
page             1304 fs/cifs/file.c 			if (!wbc->range_cyclic && page->index > end) {
page             1306 fs/cifs/file.c 				unlock_page(page);
page             1310 fs/cifs/file.c 			if (next && (page->index != next)) {
page             1312 fs/cifs/file.c 				unlock_page(page);
page             1317 fs/cifs/file.c 				wait_on_page_writeback(page);
page             1319 fs/cifs/file.c 			if (PageWriteback(page) ||
page             1320 fs/cifs/file.c 					!clear_page_dirty_for_io(page)) {
page             1321 fs/cifs/file.c 				unlock_page(page);
page             1329 fs/cifs/file.c 			set_page_writeback(page);
page             1331 fs/cifs/file.c 			if (page_offset(page) >= mapping->host->i_size) {
page             1333 fs/cifs/file.c 				unlock_page(page);
page             1334 fs/cifs/file.c 				end_page_writeback(page);
page             1341 fs/cifs/file.c 			page_cache_get(page);
page             1343 fs/cifs/file.c 			len = min(mapping->host->i_size - page_offset(page),
page             1348 fs/cifs/file.c 			iov[n_iov].iov_base = kmap(page);
page             1354 fs/cifs/file.c 				offset = page_offset(page);
page             1356 fs/cifs/file.c 			next = page->index + 1;
page             1391 fs/cifs/file.c 				page = pvec.pages[first + i];
page             1398 fs/cifs/file.c 					SetPageError(page);
page             1399 fs/cifs/file.c 				kunmap(page);
page             1400 fs/cifs/file.c 				unlock_page(page);
page             1401 fs/cifs/file.c 				end_page_writeback(page);
page             1402 fs/cifs/file.c 				page_cache_release(page);
page             1434 fs/cifs/file.c 	page_cache_get(page);
page             1435 fs/cifs/file.c 	if (!PageUptodate(page))
page             1448 fs/cifs/file.c 	set_page_writeback(page);
page             1449 fs/cifs/file.c 	rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
page             1450 fs/cifs/file.c 	SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
page             1451 fs/cifs/file.c 	unlock_page(page);
page             1452 fs/cifs/file.c 	end_page_writeback(page);
page             1453 fs/cifs/file.c 	page_cache_release(page);
page             1466 fs/cifs/file.c 		 page, pos, copied));
page             1468 fs/cifs/file.c 	if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
page             1469 fs/cifs/file.c 		SetPageUptodate(page);
page             1471 fs/cifs/file.c 	if (!PageUptodate(page)) {
page             1482 fs/cifs/file.c 		page_data = kmap(page);
page             1485 fs/cifs/file.c 		kunmap(page);
page             1491 fs/cifs/file.c 		set_page_dirty(page);
page             1501 fs/cifs/file.c 	unlock_page(page);
page             1502 fs/cifs/file.c 	page_cache_release(page);
page             1757 fs/cifs/file.c 	struct page *page;
page             1764 fs/cifs/file.c 		page = list_entry(pages->prev, struct page, lru);
page             1765 fs/cifs/file.c 		list_del(&page->lru);
page             1767 fs/cifs/file.c 		if (add_to_page_cache(page, mapping, page->index,
page             1769 fs/cifs/file.c 			page_cache_release(page);
page             1776 fs/cifs/file.c 		target = kmap_atomic(page, KM_USER0);
page             1790 fs/cifs/file.c 		flush_dcache_page(page);
page             1791 fs/cifs/file.c 		SetPageUptodate(page);
page             1792 fs/cifs/file.c 		unlock_page(page);
page             1793 fs/cifs/file.c 		if (!pagevec_add(plru_pvec, page))
page             1806 fs/cifs/file.c 	struct page *page;
page             1830 fs/cifs/file.c 		struct page *tmp_page;
page             1836 fs/cifs/file.c 		page = list_entry(page_list->prev, struct page, lru);
page             1837 fs/cifs/file.c 		offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
page             1842 fs/cifs/file.c 			list_entry(page_list->prev, struct page, lru)->index;
page             1949 fs/cifs/file.c 	page_cache_get(page);
page             1950 fs/cifs/file.c 	read_data = kmap(page);
page             1966 fs/cifs/file.c 	flush_dcache_page(page);
page             1967 fs/cifs/file.c 	SetPageUptodate(page);
page             1971 fs/cifs/file.c 	kunmap(page);
page             1972 fs/cifs/file.c 	page_cache_release(page);
page             1978 fs/cifs/file.c 	loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
page             1990 fs/cifs/file.c 		 page, (int)offset, (int)offset));
page             1992 fs/cifs/file.c 	rc = cifs_readpage_worker(file, page, &offset);
page             1994 fs/cifs/file.c 	unlock_page(page);
page             1494 fs/cifs/inode.c 	struct page *page;
page             1497 fs/cifs/inode.c 	page = grab_cache_page(mapping, index);
page             1498 fs/cifs/inode.c 	if (!page)
page             1501 fs/cifs/inode.c 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
page             1502 fs/cifs/inode.c 	unlock_page(page);
page             1503 fs/cifs/inode.c 	page_cache_release(page);
page               26 fs/coda/symlink.c 	struct inode *inode = page->mapping->host;
page               30 fs/coda/symlink.c 	char *p = kmap(page);
page               39 fs/coda/symlink.c 	SetPageUptodate(page);
page               40 fs/coda/symlink.c 	kunmap(page);
page               41 fs/coda/symlink.c 	unlock_page(page);
page               45 fs/coda/symlink.c 	SetPageError(page);
page               46 fs/coda/symlink.c 	kunmap(page);
page               47 fs/coda/symlink.c 	unlock_page(page);
page             1256 fs/compat.c    	struct page *kmapped_page = NULL;
page             1299 fs/compat.c    				struct page *page;
page             1310 fs/compat.c    						     1, 1, 1, &page, NULL);
page             1322 fs/compat.c    				kmapped_page = page;
page               47 fs/configfs/file.c 	char			* page;
page               72 fs/configfs/file.c 	if (!buffer->page)
page               73 fs/configfs/file.c 		buffer->page = (char *) get_zeroed_page(GFP_KERNEL);
page               74 fs/configfs/file.c 	if (!buffer->page)
page               77 fs/configfs/file.c 	count = ops->show_attribute(item,attr,buffer->page);
page              118 fs/configfs/file.c 		 __func__, count, *ppos, buffer->page);
page              119 fs/configfs/file.c 	retval = simple_read_from_buffer(buf, count, ppos, buffer->page,
page              142 fs/configfs/file.c 	if (!buffer->page)
page              143 fs/configfs/file.c 		buffer->page = (char *)__get_free_pages(GFP_KERNEL, 0);
page              144 fs/configfs/file.c 	if (!buffer->page)
page              149 fs/configfs/file.c 	error = copy_from_user(buffer->page,buf,count);
page              153 fs/configfs/file.c 	buffer->page[count] = 0;
page              176 fs/configfs/file.c 	return ops->store_attribute(item,attr,buffer->page,count);
page              300 fs/configfs/file.c 		if (buffer->page)
page              301 fs/configfs/file.c 			free_page((unsigned long)buffer->page);
page              288 fs/configfs/symlink.c 	unsigned long page = get_zeroed_page(GFP_KERNEL);
page              290 fs/configfs/symlink.c 	if (page) {
page              291 fs/configfs/symlink.c 		error = configfs_getlink(dentry, (char *)page);
page              293 fs/configfs/symlink.c 			nd_set_link(nd, (char *)page);
page              294 fs/configfs/symlink.c 			return (void *)page;
page              306 fs/configfs/symlink.c 		unsigned long page = (unsigned long)cookie;
page              307 fs/configfs/symlink.c 		free_page(page);
page              140 fs/cramfs/inode.c 	struct page *pages[BLKS_PER_BUF];
page              169 fs/cramfs/inode.c 		struct page *page = NULL;
page              172 fs/cramfs/inode.c 			page = read_mapping_page_async(mapping, blocknr + i,
page              175 fs/cramfs/inode.c 			if (IS_ERR(page))
page              176 fs/cramfs/inode.c 				page = NULL;
page              178 fs/cramfs/inode.c 		pages[i] = page;
page              182 fs/cramfs/inode.c 		struct page *page = pages[i];
page              183 fs/cramfs/inode.c 		if (page) {
page              184 fs/cramfs/inode.c 			wait_on_page_locked(page);
page              185 fs/cramfs/inode.c 			if (!PageUptodate(page)) {
page              187 fs/cramfs/inode.c 				page_cache_release(page);
page              200 fs/cramfs/inode.c 		struct page *page = pages[i];
page              201 fs/cramfs/inode.c 		if (page) {
page              202 fs/cramfs/inode.c 			memcpy(data, kmap(page), PAGE_CACHE_SIZE);
page              203 fs/cramfs/inode.c 			kunmap(page);
page              204 fs/cramfs/inode.c 			page_cache_release(page);
page              463 fs/cramfs/inode.c 	struct inode *inode = page->mapping->host;
page              469 fs/cramfs/inode.c 	if (page->index < maxblock) {
page              471 fs/cramfs/inode.c 		u32 blkptr_offset = OFFSET(inode) + page->index*4;
page              476 fs/cramfs/inode.c 		if (page->index)
page              480 fs/cramfs/inode.c 		pgdata = kmap(page);
page              494 fs/cramfs/inode.c 		pgdata = kmap(page);
page              496 fs/cramfs/inode.c 	kunmap(page);
page              497 fs/cramfs/inode.c 	flush_dcache_page(page);
page              498 fs/cramfs/inode.c 	SetPageUptodate(page);
page              499 fs/cramfs/inode.c 	unlock_page(page);
page             2092 fs/dcache.c    	char *page = (char *) __get_free_page(GFP_USER);
page             2094 fs/dcache.c    	if (!page)
page             2112 fs/dcache.c    		cwd = __d_path(&pwd, &tmp, page, PAGE_SIZE);
page             2120 fs/dcache.c    		len = PAGE_SIZE + page - cwd;
page             2132 fs/dcache.c    	free_page((unsigned long) page);
page              102 fs/direct-io.c 	struct page *cur_page;		/* The page */
page              118 fs/direct-io.c 	struct page *pages[DIO_PAGES];	/* page buffer */
page              160 fs/direct-io.c 		struct page *page = ZERO_PAGE(0);
page              168 fs/direct-io.c 		page_cache_get(page);
page              169 fs/direct-io.c 		dio->pages[0] = page;
page              193 fs/direct-io.c static struct page *dio_get_page(struct dio *dio)
page              412 fs/direct-io.c 			struct page *page = bvec[page_no].bv_page;
page              414 fs/direct-io.c 			if (dio->rw == READ && !PageCompound(page))
page              415 fs/direct-io.c 				set_page_dirty_lock(page);
page              416 fs/direct-io.c 			page_cache_release(page);
page              669 fs/direct-io.c 	if (	(dio->cur_page == page) &&
page              698 fs/direct-io.c 	page_cache_get(page);		/* It is in dio */
page              699 fs/direct-io.c 	dio->cur_page = page;
page              739 fs/direct-io.c 	struct page *page;
page              760 fs/direct-io.c 	page = ZERO_PAGE(0);
page              761 fs/direct-io.c 	if (submit_page_section(dio, page, 0, this_chunk_bytes, 
page              788 fs/direct-io.c 	struct page *page;
page              797 fs/direct-io.c 		page = dio_get_page(dio);
page              798 fs/direct-io.c 		if (IS_ERR(page)) {
page              799 fs/direct-io.c 			ret = PTR_ERR(page);
page              818 fs/direct-io.c 					page_cache_release(page);
page              859 fs/direct-io.c 					page_cache_release(page);
page              872 fs/direct-io.c 					page_cache_release(page);
page              875 fs/direct-io.c 				zero_user(page, block_in_page << blkbits,
page              905 fs/direct-io.c 			ret = submit_page_section(dio, page, offset_in_page,
page              908 fs/direct-io.c 				page_cache_release(page);
page              923 fs/direct-io.c 		page_cache_release(page);
page              112 fs/dlm/lowcomms.c 	struct page *rx_page;
page              126 fs/dlm/lowcomms.c 	struct page *page;
page              771 fs/dlm/lowcomms.c 	__free_page(e->page);
page              826 fs/dlm/lowcomms.c 	kmap(e->page);
page              829 fs/dlm/lowcomms.c 	iov[0].iov_base = page_address(e->page)+offset;
page              857 fs/dlm/lowcomms.c 			kunmap(e->page);
page             1166 fs/dlm/lowcomms.c 	entry->page = alloc_page(allocation);
page             1167 fs/dlm/lowcomms.c 	if (!entry->page) {
page             1207 fs/dlm/lowcomms.c 			kmap(e->page);
page             1208 fs/dlm/lowcomms.c 		*ppc = page_address(e->page) + offset;
page             1236 fs/dlm/lowcomms.c 	kunmap(e->page);
page             1253 fs/dlm/lowcomms.c 	ssize_t(*sendpage) (struct socket *, struct page *, int, size_t, int);
page             1275 fs/dlm/lowcomms.c 		kmap(e->page);
page             1279 fs/dlm/lowcomms.c 			ret = sendpage(con->sock, e->page, offset, len,
page             1297 fs/dlm/lowcomms.c 			kunmap(e->page);
page              297 fs/ecryptfs/crypto.c 	struct page *pg;
page              412 fs/ecryptfs/crypto.c 	extent_base = (((loff_t)page->index)
page              430 fs/ecryptfs/crypto.c 				  (page_address(page)
page              435 fs/ecryptfs/crypto.c 					  page, (extent_offset
page              441 fs/ecryptfs/crypto.c 		       "rc = [%d]\n", __func__, page->index, extent_offset,
page              479 fs/ecryptfs/crypto.c 	struct page *enc_extent_page = NULL;
page              483 fs/ecryptfs/crypto.c 	ecryptfs_inode = page->mapping->host;
page              487 fs/ecryptfs/crypto.c 		rc = ecryptfs_write_lower_page_segment(ecryptfs_inode, page,
page              492 fs/ecryptfs/crypto.c 			       page->index);
page              508 fs/ecryptfs/crypto.c 		rc = ecryptfs_encrypt_extent(enc_extent_page, crypt_stat, page,
page              516 fs/ecryptfs/crypto.c 			&offset, ((((loff_t)page->index)
page              546 fs/ecryptfs/crypto.c 	extent_base = (((loff_t)page->index)
page              568 fs/ecryptfs/crypto.c 	rc = ecryptfs_decrypt_page_offset(crypt_stat, page,
page              576 fs/ecryptfs/crypto.c 		       "rc = [%d]\n", __func__, page->index, extent_offset,
page              587 fs/ecryptfs/crypto.c 		ecryptfs_dump_hex((char *)(page_address(page)
page              616 fs/ecryptfs/crypto.c 	struct page *enc_extent_page = NULL;
page              620 fs/ecryptfs/crypto.c 	ecryptfs_inode = page->mapping->host;
page              624 fs/ecryptfs/crypto.c 		rc = ecryptfs_read_lower_page_segment(page, page->index, 0,
page              630 fs/ecryptfs/crypto.c 			       page->index);
page              647 fs/ecryptfs/crypto.c 			&offset, ((page->index * (PAGE_CACHE_SIZE
page              659 fs/ecryptfs/crypto.c 		rc = ecryptfs_decrypt_extent(page, crypt_stat, enc_extent_page,
page              178 fs/ecryptfs/ecryptfs_kernel.h 	struct page *page;
page              691 fs/ecryptfs/ecryptfs_kernel.h struct page *ecryptfs_get_locked_page(struct file *file, loff_t index);
page               46 fs/ecryptfs/mmap.c struct page *ecryptfs_get_locked_page(struct file *file, loff_t index)
page               51 fs/ecryptfs/mmap.c 	struct page *page;
page               56 fs/ecryptfs/mmap.c 	page = read_mapping_page(mapping, index, (void *)file);
page               57 fs/ecryptfs/mmap.c 	if (!IS_ERR(page))
page               58 fs/ecryptfs/mmap.c 		lock_page(page);
page               59 fs/ecryptfs/mmap.c 	return page;
page               72 fs/ecryptfs/mmap.c 	rc = ecryptfs_encrypt_page(page);
page               75 fs/ecryptfs/mmap.c 				"page (upper index [0x%.16x])\n", page->index);
page               76 fs/ecryptfs/mmap.c 		ClearPageUptodate(page);
page               79 fs/ecryptfs/mmap.c 	SetPageUptodate(page);
page               80 fs/ecryptfs/mmap.c 	unlock_page(page);
page              134 fs/ecryptfs/mmap.c 		loff_t view_extent_num = ((((loff_t)page->index)
page              145 fs/ecryptfs/mmap.c 			page_virt = kmap_atomic(page, KM_USER0);
page              150 fs/ecryptfs/mmap.c 					page_virt, page->mapping->host);
page              154 fs/ecryptfs/mmap.c 			flush_dcache_page(page);
page              167 fs/ecryptfs/mmap.c 				page, (lower_offset >> PAGE_CACHE_SHIFT),
page              169 fs/ecryptfs/mmap.c 				crypt_stat->extent_size, page->mapping->host);
page              204 fs/ecryptfs/mmap.c 		rc = ecryptfs_read_lower_page_segment(page, page->index, 0,
page              206 fs/ecryptfs/mmap.c 						      page->mapping->host);
page              209 fs/ecryptfs/mmap.c 			rc = ecryptfs_copy_up_encrypted_with_header(page,
page              222 fs/ecryptfs/mmap.c 				page, page->index, 0, PAGE_CACHE_SIZE,
page              223 fs/ecryptfs/mmap.c 				page->mapping->host);
page              231 fs/ecryptfs/mmap.c 		rc = ecryptfs_decrypt_page(page);
page              240 fs/ecryptfs/mmap.c 		ClearPageUptodate(page);
page              242 fs/ecryptfs/mmap.c 		SetPageUptodate(page);
page              244 fs/ecryptfs/mmap.c 			page->index);
page              245 fs/ecryptfs/mmap.c 	unlock_page(page);
page              254 fs/ecryptfs/mmap.c 	struct inode *inode = page->mapping->host;
page              257 fs/ecryptfs/mmap.c 	if ((i_size_read(inode) / PAGE_CACHE_SIZE) != page->index)
page              262 fs/ecryptfs/mmap.c 	zero_user_segment(page, end_byte_in_page, PAGE_CACHE_SIZE);
page              284 fs/ecryptfs/mmap.c 	if (!PageUptodate(page)) {
page              292 fs/ecryptfs/mmap.c 				page, page->index, 0, PAGE_CACHE_SIZE,
page              293 fs/ecryptfs/mmap.c 				page->mapping->host);
page              298 fs/ecryptfs/mmap.c 				ClearPageUptodate(page);
page              301 fs/ecryptfs/mmap.c 				SetPageUptodate(page);
page              305 fs/ecryptfs/mmap.c 					page, crypt_stat);
page              313 fs/ecryptfs/mmap.c 					ClearPageUptodate(page);
page              316 fs/ecryptfs/mmap.c 				SetPageUptodate(page);
page              319 fs/ecryptfs/mmap.c 					page, page->index, 0, PAGE_CACHE_SIZE,
page              320 fs/ecryptfs/mmap.c 					page->mapping->host);
page              325 fs/ecryptfs/mmap.c 					ClearPageUptodate(page);
page              328 fs/ecryptfs/mmap.c 				SetPageUptodate(page);
page              331 fs/ecryptfs/mmap.c 			rc = ecryptfs_decrypt_page(page);
page              335 fs/ecryptfs/mmap.c 				       __func__, page->index, rc);
page              336 fs/ecryptfs/mmap.c 				ClearPageUptodate(page);
page              339 fs/ecryptfs/mmap.c 			SetPageUptodate(page);
page              342 fs/ecryptfs/mmap.c 	prev_page_end_size = ((loff_t)page->index << PAGE_CACHE_SHIFT);
page              345 fs/ecryptfs/mmap.c 	if (page->index != 0) {
page              346 fs/ecryptfs/mmap.c 		if (prev_page_end_size > i_size_read(page->mapping->host)) {
page              360 fs/ecryptfs/mmap.c 	if ((i_size_read(page->mapping->host) == prev_page_end_size)
page              362 fs/ecryptfs/mmap.c 		zero_user(page, 0, PAGE_CACHE_SIZE);
page              462 fs/ecryptfs/mmap.c 	struct inode *ecryptfs_inode = page->mapping->host;
page              474 fs/ecryptfs/mmap.c 			"(page w/ index = [0x%.16x], to = [%d])\n", page->index,
page              477 fs/ecryptfs/mmap.c 	rc = fill_zeros_to_end_of_page(page, to);
page              481 fs/ecryptfs/mmap.c 				page->index);
page              484 fs/ecryptfs/mmap.c 	rc = ecryptfs_encrypt_page(page);
page              487 fs/ecryptfs/mmap.c 				"index [0x%.16x])\n", page->index);
page              490 fs/ecryptfs/mmap.c 	pos = (((loff_t)page->index) << PAGE_CACHE_SHIFT) + to;
page              119 fs/ecryptfs/read_write.c 	struct page *ecryptfs_page;
page              303 fs/ecryptfs/read_write.c 	struct page *ecryptfs_page;
page               18 fs/efs/inode.c 	return block_read_full_page(page,efs_get_block);
page               17 fs/efs/symlink.c 	char *link = kmap(page);
page               19 fs/efs/symlink.c 	struct inode * inode = page->mapping->host;
page               44 fs/efs/symlink.c 	SetPageUptodate(page);
page               45 fs/efs/symlink.c 	kunmap(page);
page               46 fs/efs/symlink.c 	unlock_page(page);
page               51 fs/efs/symlink.c 	SetPageError(page);
page               52 fs/efs/symlink.c 	kunmap(page);
page               53 fs/efs/symlink.c 	unlock_page(page);
page              171 fs/exec.c      static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
page              174 fs/exec.c      	struct page *page;
page              185 fs/exec.c      			1, write, 1, &page, NULL);
page              198 fs/exec.c      			return page;
page              209 fs/exec.c      			put_page(page);
page              214 fs/exec.c      	return page;
page              219 fs/exec.c      	put_page(page);
page              233 fs/exec.c      	flush_cache_page(bprm->vma, pos, page_to_pfn(page));
page              289 fs/exec.c      static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
page              292 fs/exec.c      	struct page *page;
page              294 fs/exec.c      	page = bprm->page[pos / PAGE_SIZE];
page              295 fs/exec.c      	if (!page && write) {
page              296 fs/exec.c      		page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
page              297 fs/exec.c      		if (!page)
page              299 fs/exec.c      		bprm->page[pos / PAGE_SIZE] = page;
page              302 fs/exec.c      	return page;
page              311 fs/exec.c      	if (bprm->page[i]) {
page              312 fs/exec.c      		__free_page(bprm->page[i]);
page              313 fs/exec.c      		bprm->page[i] = NULL;
page              410 fs/exec.c      	struct page *kmapped_page = NULL;
page              453 fs/exec.c      				struct page *page;
page              455 fs/exec.c      				page = get_arg_page(bprm, pos, 1);
page              456 fs/exec.c      				if (!page) {
page              466 fs/exec.c      				kmapped_page = page;
page             1128 fs/exec.c      	struct page *page;
page             1135 fs/exec.c      		page = get_arg_page(bprm, bprm->p, 0);
page             1136 fs/exec.c      		if (!page) {
page             1140 fs/exec.c      		kaddr = kmap_atomic(page, KM_USER0);
page             1147 fs/exec.c      		put_arg_page(page);
page               60 fs/ext2/dir.c  	kunmap(page);
page               61 fs/ext2/dir.c  	page_cache_release(page);
page               86 fs/ext2/dir.c  	struct address_space *mapping = page->mapping;
page               91 fs/ext2/dir.c  	block_write_end(NULL, mapping, pos, len, len, page, NULL);
page               99 fs/ext2/dir.c  		err = write_one_page(page, 1);
page              101 fs/ext2/dir.c  		unlock_page(page);
page              108 fs/ext2/dir.c  	struct inode *dir = page->mapping->host;
page              111 fs/ext2/dir.c  	char *kaddr = page_address(page);
page              118 fs/ext2/dir.c  	if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
page              143 fs/ext2/dir.c  	SetPageChecked(page);
page              171 fs/ext2/dir.c  		dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
page              180 fs/ext2/dir.c  		dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs,
page              183 fs/ext2/dir.c  	SetPageChecked(page);
page              184 fs/ext2/dir.c  	SetPageError(page);
page              187 fs/ext2/dir.c  static struct page * ext2_get_page(struct inode *dir, unsigned long n)
page              190 fs/ext2/dir.c  	struct page *page = read_mapping_page(mapping, n, NULL);
page              191 fs/ext2/dir.c  	if (!IS_ERR(page)) {
page              192 fs/ext2/dir.c  		kmap(page);
page              193 fs/ext2/dir.c  		if (!PageChecked(page))
page              194 fs/ext2/dir.c  			ext2_check_page(page);
page              195 fs/ext2/dir.c  		if (PageError(page))
page              198 fs/ext2/dir.c  	return page;
page              201 fs/ext2/dir.c  	ext2_put_page(page);
page              295 fs/ext2/dir.c  		struct page *page = ext2_get_page(inode, n);
page              297 fs/ext2/dir.c  		if (IS_ERR(page)) {
page              302 fs/ext2/dir.c  			return PTR_ERR(page);
page              304 fs/ext2/dir.c  		kaddr = page_address(page);
page              319 fs/ext2/dir.c  				ext2_put_page(page);
page              334 fs/ext2/dir.c  					ext2_put_page(page);
page              340 fs/ext2/dir.c  		ext2_put_page(page);
page              361 fs/ext2/dir.c  	struct page *page = NULL;
page              377 fs/ext2/dir.c  		page = ext2_get_page(dir, n);
page              378 fs/ext2/dir.c  		if (!IS_ERR(page)) {
page              379 fs/ext2/dir.c  			kaddr = page_address(page);
page              386 fs/ext2/dir.c  					ext2_put_page(page);
page              393 fs/ext2/dir.c  			ext2_put_page(page);
page              410 fs/ext2/dir.c  	*res_page = page;
page              417 fs/ext2/dir.c  	struct page *page = ext2_get_page(dir, 0);
page              420 fs/ext2/dir.c  	if (!IS_ERR(page)) {
page              421 fs/ext2/dir.c  		de = ext2_next_entry((ext2_dirent *) page_address(page));
page              422 fs/ext2/dir.c  		*p = page;
page              431 fs/ext2/dir.c  	struct page *page;
page              433 fs/ext2/dir.c  	de = ext2_find_entry (dir, dentry, &page);
page              436 fs/ext2/dir.c  		ext2_put_page(page);
page              445 fs/ext2/dir.c  	loff_t pos = page_offset(page) +
page              446 fs/ext2/dir.c  			(char *) de - (char *) page_address(page);
page              450 fs/ext2/dir.c  	lock_page(page);
page              451 fs/ext2/dir.c  	err = __ext2_write_begin(NULL, page->mapping, pos, len,
page              452 fs/ext2/dir.c  				AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
page              456 fs/ext2/dir.c  	err = ext2_commit_chunk(page, pos, len);
page              457 fs/ext2/dir.c  	ext2_put_page(page);
page              474 fs/ext2/dir.c  	struct page *page = NULL;
page              490 fs/ext2/dir.c  		page = ext2_get_page(dir, n);
page              491 fs/ext2/dir.c  		err = PTR_ERR(page);
page              492 fs/ext2/dir.c  		if (IS_ERR(page))
page              494 fs/ext2/dir.c  		lock_page(page);
page              495 fs/ext2/dir.c  		kaddr = page_address(page);
page              525 fs/ext2/dir.c  		unlock_page(page);
page              526 fs/ext2/dir.c  		ext2_put_page(page);
page              532 fs/ext2/dir.c  	pos = page_offset(page) +
page              533 fs/ext2/dir.c  		(char*)de - (char*)page_address(page);
page              534 fs/ext2/dir.c  	err = __ext2_write_begin(NULL, page->mapping, pos, rec_len, 0,
page              535 fs/ext2/dir.c  							&page, NULL);
page              548 fs/ext2/dir.c  	err = ext2_commit_chunk(page, pos, rec_len);
page              554 fs/ext2/dir.c  	ext2_put_page(page);
page              558 fs/ext2/dir.c  	unlock_page(page);
page              568 fs/ext2/dir.c  	struct address_space *mapping = page->mapping;
page              570 fs/ext2/dir.c  	char *kaddr = page_address(page);
page              590 fs/ext2/dir.c  		from = (char*)pde - (char*)page_address(page);
page              591 fs/ext2/dir.c  	pos = page_offset(page) + from;
page              592 fs/ext2/dir.c  	lock_page(page);
page              593 fs/ext2/dir.c  	err = __ext2_write_begin(NULL, page->mapping, pos, to - from, 0,
page              594 fs/ext2/dir.c  							&page, NULL);
page              599 fs/ext2/dir.c  	err = ext2_commit_chunk(page, pos, to - from);
page              604 fs/ext2/dir.c  	ext2_put_page(page);
page              614 fs/ext2/dir.c  	struct page *page = grab_cache_page(mapping, 0);
page              620 fs/ext2/dir.c  	if (!page)
page              623 fs/ext2/dir.c  	err = __ext2_write_begin(NULL, page->mapping, 0, chunk_size, 0,
page              624 fs/ext2/dir.c  							&page, NULL);
page              626 fs/ext2/dir.c  		unlock_page(page);
page              629 fs/ext2/dir.c  	kaddr = kmap_atomic(page, KM_USER0);
page              645 fs/ext2/dir.c  	err = ext2_commit_chunk(page, 0, chunk_size);
page              647 fs/ext2/dir.c  	page_cache_release(page);
page              656 fs/ext2/dir.c  	struct page *page = NULL;
page              662 fs/ext2/dir.c  		page = ext2_get_page(inode, i);
page              664 fs/ext2/dir.c  		if (IS_ERR(page))
page              667 fs/ext2/dir.c  		kaddr = page_address(page);
page              693 fs/ext2/dir.c  		ext2_put_page(page);
page              698 fs/ext2/dir.c  	ext2_put_page(page);
page              717 fs/ext2/inode.c 	return block_write_full_page(page, ext2_get_block, wbc);
page              722 fs/ext2/inode.c 	return mpage_readpage(page, ext2_get_block);
page              766 fs/ext2/inode.c 	return nobh_writepage(page, ext2_get_block, wbc);
page              257 fs/ext2/namei.c 	struct page * page;
page              260 fs/ext2/namei.c 	de = ext2_find_entry (dir, dentry, &page);
page              264 fs/ext2/namei.c 	err = ext2_delete_entry (de, page);
page              296 fs/ext2/namei.c 	struct page * dir_page = NULL;
page              298 fs/ext2/namei.c 	struct page * old_page;
page              314 fs/ext2/namei.c 		struct page *new_page;
page             1154 fs/ext3/inode.c 	struct page *page;
page             1163 fs/ext3/inode.c 	page = __grab_cache_page(mapping, index);
page             1164 fs/ext3/inode.c 	if (!page)
page             1166 fs/ext3/inode.c 	*pagep = page;
page             1170 fs/ext3/inode.c 		unlock_page(page);
page             1171 fs/ext3/inode.c 		page_cache_release(page);
page             1181 fs/ext3/inode.c 		ret = walk_page_buffers(handle, page_buffers(page),
page             1187 fs/ext3/inode.c 		unlock_page(page);
page             1188 fs/ext3/inode.c 		page_cache_release(page);
page             1228 fs/ext3/inode.c 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
page             1258 fs/ext3/inode.c 	ret = walk_page_buffers(handle, page_buffers(page),
page             1273 fs/ext3/inode.c 							page, fsdata);
page             1281 fs/ext3/inode.c 	unlock_page(page);
page             1282 fs/ext3/inode.c 	page_cache_release(page);
page             1302 fs/ext3/inode.c 							page, fsdata);
page             1310 fs/ext3/inode.c 	unlock_page(page);
page             1311 fs/ext3/inode.c 	page_cache_release(page);
page             1331 fs/ext3/inode.c 		if (!PageUptodate(page))
page             1333 fs/ext3/inode.c 		page_zero_new_buffers(page, from+copied, to);
page             1336 fs/ext3/inode.c 	ret = walk_page_buffers(handle, page_buffers(page), from,
page             1339 fs/ext3/inode.c 		SetPageUptodate(page);
page             1353 fs/ext3/inode.c 	unlock_page(page);
page             1354 fs/ext3/inode.c 	page_cache_release(page);
page             1485 fs/ext3/inode.c 	struct inode *inode = page->mapping->host;
page             1491 fs/ext3/inode.c 	J_ASSERT(PageLocked(page));
page             1507 fs/ext3/inode.c 	if (!page_has_buffers(page)) {
page             1508 fs/ext3/inode.c 		create_empty_buffers(page, inode->i_sb->s_blocksize,
page             1511 fs/ext3/inode.c 	page_bufs = page_buffers(page);
page             1515 fs/ext3/inode.c 	ret = block_write_full_page(page, ext3_get_block, wbc);
page             1543 fs/ext3/inode.c 	redirty_page_for_writepage(wbc, page);
page             1544 fs/ext3/inode.c 	unlock_page(page);
page             1551 fs/ext3/inode.c 	struct inode *inode = page->mapping->host;
page             1566 fs/ext3/inode.c 		ret = nobh_writepage(page, ext3_get_block, wbc);
page             1568 fs/ext3/inode.c 		ret = block_write_full_page(page, ext3_get_block, wbc);
page             1576 fs/ext3/inode.c 	redirty_page_for_writepage(wbc, page);
page             1577 fs/ext3/inode.c 	unlock_page(page);
page             1584 fs/ext3/inode.c 	struct inode *inode = page->mapping->host;
page             1598 fs/ext3/inode.c 	if (!page_has_buffers(page) || PageChecked(page)) {
page             1603 fs/ext3/inode.c 		ClearPageChecked(page);
page             1604 fs/ext3/inode.c 		ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
page             1610 fs/ext3/inode.c 		ret = walk_page_buffers(handle, page_buffers(page), 0,
page             1613 fs/ext3/inode.c 		err = walk_page_buffers(handle, page_buffers(page), 0,
page             1618 fs/ext3/inode.c 		unlock_page(page);
page             1625 fs/ext3/inode.c 		ret = block_write_full_page(page, ext3_get_block, wbc);
page             1634 fs/ext3/inode.c 	redirty_page_for_writepage(wbc, page);
page             1636 fs/ext3/inode.c 	unlock_page(page);
page             1642 fs/ext3/inode.c 	return mpage_readpage(page, ext3_get_block);
page             1654 fs/ext3/inode.c 	journal_t *journal = EXT3_JOURNAL(page->mapping->host);
page             1660 fs/ext3/inode.c 		ClearPageChecked(page);
page             1662 fs/ext3/inode.c 	journal_invalidatepage(journal, page, offset);
page             1667 fs/ext3/inode.c 	journal_t *journal = EXT3_JOURNAL(page->mapping->host);
page             1669 fs/ext3/inode.c 	WARN_ON(PageChecked(page));
page             1670 fs/ext3/inode.c 	if (!page_has_buffers(page))
page             1672 fs/ext3/inode.c 	return journal_try_to_free_buffers(journal, page, wait);
page             1773 fs/ext3/inode.c 	SetPageChecked(page);
page             1774 fs/ext3/inode.c 	return __set_page_dirty_nobuffers(page);
page             1855 fs/ext3/inode.c 	if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
page             1856 fs/ext3/inode.c 	     ext3_should_writeback_data(inode) && PageUptodate(page)) {
page             1857 fs/ext3/inode.c 		zero_user(page, offset, length);
page             1858 fs/ext3/inode.c 		set_page_dirty(page);
page             1862 fs/ext3/inode.c 	if (!page_has_buffers(page))
page             1863 fs/ext3/inode.c 		create_empty_buffers(page, blocksize, 0);
page             1866 fs/ext3/inode.c 	bh = page_buffers(page);
page             1891 fs/ext3/inode.c 	if (PageUptodate(page))
page             1910 fs/ext3/inode.c 	zero_user(page, offset, length);
page             1923 fs/ext3/inode.c 	unlock_page(page);
page             1924 fs/ext3/inode.c 	page_cache_release(page);
page             2336 fs/ext3/inode.c 	struct page *page;
page             2347 fs/ext3/inode.c 		page = NULL;
page             2349 fs/ext3/inode.c 		page = grab_cache_page(mapping,
page             2351 fs/ext3/inode.c 		if (!page)
page             2357 fs/ext3/inode.c 		if (page) {
page             2358 fs/ext3/inode.c 			clear_highpage(page);
page             2359 fs/ext3/inode.c 			flush_dcache_page(page);
page             2360 fs/ext3/inode.c 			unlock_page(page);
page             2361 fs/ext3/inode.c 			page_cache_release(page);
page             2369 fs/ext3/inode.c 	if (page)
page             2370 fs/ext3/inode.c 		ext3_block_truncate_page(handle, page, mapping, inode->i_size);
page             3102 fs/ext4/extents.c 		struct page *page;
page             3106 fs/ext4/extents.c 		page = find_get_page(inode->i_mapping, offset);
page             3107 fs/ext4/extents.c 		if (!page || !page_has_buffers(page))
page             3110 fs/ext4/extents.c 		bh = page_buffers(page);
page             3117 fs/ext4/extents.c 			page_cache_release(page);
page             3119 fs/ext4/extents.c 			page_cache_release(page);
page             1333 fs/ext4/inode.c 	struct page *page;
page             1348 fs/ext4/inode.c 	page = __grab_cache_page(mapping, index);
page             1349 fs/ext4/inode.c 	if (!page) {
page             1354 fs/ext4/inode.c 	*pagep = page;
page             1360 fs/ext4/inode.c 		ret = walk_page_buffers(handle, page_buffers(page),
page             1365 fs/ext4/inode.c 		unlock_page(page);
page             1367 fs/ext4/inode.c 		page_cache_release(page);
page             1424 fs/ext4/inode.c 							page, fsdata);
page             1457 fs/ext4/inode.c 							page, fsdata);
page             1485 fs/ext4/inode.c 		if (!PageUptodate(page))
page             1487 fs/ext4/inode.c 		page_zero_new_buffers(page, from+copied, to);
page             1490 fs/ext4/inode.c 	ret = walk_page_buffers(handle, page_buffers(page), from,
page             1493 fs/ext4/inode.c 		SetPageUptodate(page);
page             1505 fs/ext4/inode.c 	unlock_page(page);
page             1509 fs/ext4/inode.c 	page_cache_release(page);
page             1602 fs/ext4/inode.c 	head = page_buffers(page);
page             1613 fs/ext4/inode.c 	ext4_da_release_space(page->mapping->host, to_release);
page             1663 fs/ext4/inode.c 			struct page *page = pvec.pages[i];
page             1665 fs/ext4/inode.c 			index = page->index;
page             1670 fs/ext4/inode.c 			err = mapping->a_ops->writepage(page, mpd->wbc);
page             1721 fs/ext4/inode.c 			struct page *page = pvec.pages[i];
page             1723 fs/ext4/inode.c 			index = page->index;
page             1728 fs/ext4/inode.c 			BUG_ON(!PageLocked(page));
page             1729 fs/ext4/inode.c 			BUG_ON(PageWriteback(page));
page             1730 fs/ext4/inode.c 			BUG_ON(!page_has_buffers(page));
page             1732 fs/ext4/inode.c 			bh = page_buffers(page);
page             1799 fs/ext4/inode.c 			struct page *page = pvec.pages[i];
page             1800 fs/ext4/inode.c 			index = page->index;
page             1805 fs/ext4/inode.c 			BUG_ON(!PageLocked(page));
page             1806 fs/ext4/inode.c 			BUG_ON(PageWriteback(page));
page             1807 fs/ext4/inode.c 			block_invalidatepage(page, 0);
page             1808 fs/ext4/inode.c 			ClearPageUptodate(page);
page             1809 fs/ext4/inode.c 			unlock_page(page);
page             2016 fs/ext4/inode.c 		redirty_page_for_writepage(wbc, page);
page             2017 fs/ext4/inode.c 		unlock_page(page);
page             2023 fs/ext4/inode.c 	if (mpd->next_page != page->index) {
page             2035 fs/ext4/inode.c 			redirty_page_for_writepage(wbc, page);
page             2036 fs/ext4/inode.c 			unlock_page(page);
page             2043 fs/ext4/inode.c 		mpd->first_page = page->index;
page             2053 fs/ext4/inode.c 	mpd->next_page = page->index + 1;
page             2054 fs/ext4/inode.c 	logical = (sector_t) page->index <<
page             2057 fs/ext4/inode.c 	if (!page_has_buffers(page)) {
page             2074 fs/ext4/inode.c 		head = page_buffers(page);
page             2264 fs/ext4/inode.c 	struct inode *inode = page->mapping->host;
page             2267 fs/ext4/inode.c 	if (page->index == size >> PAGE_CACHE_SHIFT)
page             2272 fs/ext4/inode.c 	if (page_has_buffers(page)) {
page             2273 fs/ext4/inode.c 		page_bufs = page_buffers(page);
page             2284 fs/ext4/inode.c 			redirty_page_for_writepage(wbc, page);
page             2285 fs/ext4/inode.c 			unlock_page(page);
page             2302 fs/ext4/inode.c 		ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
page             2305 fs/ext4/inode.c 			page_bufs = page_buffers(page);
page             2309 fs/ext4/inode.c 				redirty_page_for_writepage(wbc, page);
page             2310 fs/ext4/inode.c 				unlock_page(page);
page             2319 fs/ext4/inode.c 			redirty_page_for_writepage(wbc, page);
page             2320 fs/ext4/inode.c 			unlock_page(page);
page             2326 fs/ext4/inode.c 		ret = nobh_writepage(page, ext4_normal_get_block_write, wbc);
page             2328 fs/ext4/inode.c 		ret = block_write_full_page(page,
page             2502 fs/ext4/inode.c 	struct page *page;
page             2531 fs/ext4/inode.c 	page = __grab_cache_page(mapping, index);
page             2532 fs/ext4/inode.c 	if (!page) {
page             2537 fs/ext4/inode.c 	*pagep = page;
page             2542 fs/ext4/inode.c 		unlock_page(page);
page             2544 fs/ext4/inode.c 		page_cache_release(page);
page             2568 fs/ext4/inode.c 	struct inode *inode = page->mapping->host;
page             2572 fs/ext4/inode.c 	bh = page_buffers(page);
page             2598 fs/ext4/inode.c 					len, copied, page, fsdata);
page             2601 fs/ext4/inode.c 					len, copied, page, fsdata);
page             2618 fs/ext4/inode.c 		if (ext4_da_should_update_i_disksize(page, end)) {
page             2640 fs/ext4/inode.c 							page, fsdata);
page             2656 fs/ext4/inode.c 	BUG_ON(!PageLocked(page));
page             2657 fs/ext4/inode.c 	if (!page_has_buffers(page))
page             2660 fs/ext4/inode.c 	ext4_da_page_release_reservation(page, offset);
page             2663 fs/ext4/inode.c 	ext4_invalidatepage(page, offset);
page             2799 fs/ext4/inode.c 	struct inode *inode = page->mapping->host;
page             2802 fs/ext4/inode.c 		return nobh_writepage(page,
page             2805 fs/ext4/inode.c 		return block_write_full_page(page,
page             2813 fs/ext4/inode.c 	struct inode *inode = page->mapping->host;
page             2817 fs/ext4/inode.c 	J_ASSERT(PageLocked(page));
page             2818 fs/ext4/inode.c 	if (page->index == size >> PAGE_CACHE_SHIFT)
page             2823 fs/ext4/inode.c 	if (page_has_buffers(page)) {
page             2833 fs/ext4/inode.c 		BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
page             2838 fs/ext4/inode.c 		return __ext4_normal_writepage(page, wbc);
page             2840 fs/ext4/inode.c 	redirty_page_for_writepage(wbc, page);
page             2841 fs/ext4/inode.c 	unlock_page(page);
page             2848 fs/ext4/inode.c 	struct address_space *mapping = page->mapping;
page             2855 fs/ext4/inode.c 	ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
page             2860 fs/ext4/inode.c 	page_bufs = page_buffers(page);
page             2865 fs/ext4/inode.c 	unlock_page(page);
page             2890 fs/ext4/inode.c 	unlock_page(page);
page             2898 fs/ext4/inode.c 	struct inode *inode = page->mapping->host;
page             2902 fs/ext4/inode.c 	J_ASSERT(PageLocked(page));
page             2903 fs/ext4/inode.c 	if (page->index == size >> PAGE_CACHE_SHIFT)
page             2908 fs/ext4/inode.c 	if (page_has_buffers(page)) {
page             2918 fs/ext4/inode.c 		BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
page             2925 fs/ext4/inode.c 	if (PageChecked(page)) {
page             2930 fs/ext4/inode.c 		ClearPageChecked(page);
page             2931 fs/ext4/inode.c 		return __ext4_journalled_writepage(page, wbc);
page             2938 fs/ext4/inode.c 		return block_write_full_page(page,
page             2943 fs/ext4/inode.c 	redirty_page_for_writepage(wbc, page);
page             2944 fs/ext4/inode.c 	unlock_page(page);
page             2950 fs/ext4/inode.c 	return mpage_readpage(page, ext4_get_block);
page             2962 fs/ext4/inode.c 	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
page             2968 fs/ext4/inode.c 		ClearPageChecked(page);
page             2970 fs/ext4/inode.c 	jbd2_journal_invalidatepage(journal, page, offset);
page             2975 fs/ext4/inode.c 	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
page             2977 fs/ext4/inode.c 	WARN_ON(PageChecked(page));
page             2978 fs/ext4/inode.c 	if (!page_has_buffers(page))
page             2980 fs/ext4/inode.c 	return jbd2_journal_try_to_free_buffers(journal, page, wait);
page             3081 fs/ext4/inode.c 	SetPageChecked(page);
page             3082 fs/ext4/inode.c 	return __set_page_dirty_nobuffers(page);
page             3176 fs/ext4/inode.c 	struct page *page;
page             3179 fs/ext4/inode.c 	page = grab_cache_page(mapping, from >> PAGE_CACHE_SHIFT);
page             3180 fs/ext4/inode.c 	if (!page)
page             3191 fs/ext4/inode.c 	if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
page             3192 fs/ext4/inode.c 	     ext4_should_writeback_data(inode) && PageUptodate(page)) {
page             3193 fs/ext4/inode.c 		zero_user(page, offset, length);
page             3194 fs/ext4/inode.c 		set_page_dirty(page);
page             3198 fs/ext4/inode.c 	if (!page_has_buffers(page))
page             3199 fs/ext4/inode.c 		create_empty_buffers(page, blocksize, 0);
page             3202 fs/ext4/inode.c 	bh = page_buffers(page);
page             3227 fs/ext4/inode.c 	if (PageUptodate(page))
page             3246 fs/ext4/inode.c 	zero_user(page, offset, length);
page             3260 fs/ext4/inode.c 	unlock_page(page);
page             3261 fs/ext4/inode.c 	page_cache_release(page);
page             4952 fs/ext4/inode.c 	if (page->mapping != mapping || size <= page_offset(page)
page             4953 fs/ext4/inode.c 	    || !PageUptodate(page)) {
page             4958 fs/ext4/inode.c 	if (PageMappedToDisk(page))
page             4961 fs/ext4/inode.c 	if (page->index == size >> PAGE_CACHE_SHIFT)
page             4966 fs/ext4/inode.c 	if (page_has_buffers(page)) {
page             4968 fs/ext4/inode.c 		if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
page             4979 fs/ext4/inode.c 	ret = mapping->a_ops->write_begin(file, mapping, page_offset(page),
page             4980 fs/ext4/inode.c 			len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata);
page             4983 fs/ext4/inode.c 	ret = mapping->a_ops->write_end(file, mapping, page_offset(page),
page             4984 fs/ext4/inode.c 			len, len, page, fsdata);
page              745 fs/ext4/mballoc.c 	mb_debug("init page %lu\n", page->index);
page              747 fs/ext4/mballoc.c 	inode = page->mapping->host;
page              766 fs/ext4/mballoc.c 	first_group = page->index * blocks_per_page / 2;
page              816 fs/ext4/mballoc.c 	first_block = page->index * blocks_per_page;
page              831 fs/ext4/mballoc.c 		data = page_address(page) + (i * blocksize);
page              842 fs/ext4/mballoc.c 				group, page->index, i * blocksize);
page              857 fs/ext4/mballoc.c 				group, page->index, i * blocksize);
page              873 fs/ext4/mballoc.c 	SetPageUptodate(page);
page              895 fs/ext4/mballoc.c 	struct page *page;
page              920 fs/ext4/mballoc.c 	page = find_get_page(inode->i_mapping, pnum);
page              921 fs/ext4/mballoc.c 	if (page == NULL || !PageUptodate(page)) {
page              922 fs/ext4/mballoc.c 		if (page)
page              923 fs/ext4/mballoc.c 			page_cache_release(page);
page              924 fs/ext4/mballoc.c 		page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
page              925 fs/ext4/mballoc.c 		if (page) {
page              926 fs/ext4/mballoc.c 			BUG_ON(page->mapping != inode->i_mapping);
page              927 fs/ext4/mballoc.c 			if (!PageUptodate(page)) {
page              928 fs/ext4/mballoc.c 				ret = ext4_mb_init_cache(page, NULL);
page              930 fs/ext4/mballoc.c 					unlock_page(page);
page              933 fs/ext4/mballoc.c 				mb_cmp_bitmaps(e4b, page_address(page) +
page              936 fs/ext4/mballoc.c 			unlock_page(page);
page              939 fs/ext4/mballoc.c 	if (page == NULL || !PageUptodate(page)) {
page              943 fs/ext4/mballoc.c 	e4b->bd_bitmap_page = page;
page              944 fs/ext4/mballoc.c 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
page              945 fs/ext4/mballoc.c 	mark_page_accessed(page);
page              951 fs/ext4/mballoc.c 	page = find_get_page(inode->i_mapping, pnum);
page              952 fs/ext4/mballoc.c 	if (page == NULL || !PageUptodate(page)) {
page              953 fs/ext4/mballoc.c 		if (page)
page              954 fs/ext4/mballoc.c 			page_cache_release(page);
page              955 fs/ext4/mballoc.c 		page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
page              956 fs/ext4/mballoc.c 		if (page) {
page              957 fs/ext4/mballoc.c 			BUG_ON(page->mapping != inode->i_mapping);
page              958 fs/ext4/mballoc.c 			if (!PageUptodate(page)) {
page              959 fs/ext4/mballoc.c 				ret = ext4_mb_init_cache(page, e4b->bd_bitmap);
page              961 fs/ext4/mballoc.c 					unlock_page(page);
page              965 fs/ext4/mballoc.c 			unlock_page(page);
page              968 fs/ext4/mballoc.c 	if (page == NULL || !PageUptodate(page)) {
page              972 fs/ext4/mballoc.c 	e4b->bd_buddy_page = page;
page              973 fs/ext4/mballoc.c 	e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
page              974 fs/ext4/mballoc.c 	mark_page_accessed(page);
page             2340 fs/ext4/mballoc.c 	struct page *page;
page             2358 fs/ext4/mballoc.c 	page = find_get_page(inode->i_mapping, pnum);
page             2359 fs/ext4/mballoc.c 	if (page != NULL) {
page             2360 fs/ext4/mballoc.c 		ClearPageUptodate(page);
page             2361 fs/ext4/mballoc.c 		page_cache_release(page);
page             2367 fs/ext4/mballoc.c 	page = find_get_page(inode->i_mapping, pnum);
page             2368 fs/ext4/mballoc.c 	if (page != NULL) {
page             2369 fs/ext4/mballoc.c 		ClearPageUptodate(page);
page             2370 fs/ext4/mballoc.c 		page_cache_release(page);
page              210 fs/ext4/mballoc.h 	struct page *ac_bitmap_page;
page              211 fs/ext4/mballoc.h 	struct page *ac_buddy_page;
page              237 fs/ext4/mballoc.h 	struct page *bd_buddy_page;
page              239 fs/ext4/mballoc.h 	struct page *bd_bitmap_page;
page             1102 fs/ext4/resize.c 		struct page *page;
page             1108 fs/ext4/resize.c 		page = find_get_page(inode->i_mapping, pnum);
page             1109 fs/ext4/resize.c 		if (page != NULL) {
page             1110 fs/ext4/resize.c 			ClearPageUptodate(page);
page             1111 fs/ext4/resize.c 			page_cache_release(page);
page             1117 fs/ext4/resize.c 		page = find_get_page(inode->i_mapping, pnum);
page             1118 fs/ext4/resize.c 		if (page != NULL) {
page             1119 fs/ext4/resize.c 			ClearPageUptodate(page);
page             1120 fs/ext4/resize.c 			page_cache_release(page);
page              124 fs/fat/inode.c 	return block_write_full_page(page, fat_get_block, wbc);
page              135 fs/fat/inode.c 	return mpage_readpage(page, fat_get_block);
page               77 fs/freevxfs/vxfs_extern.h extern struct page *		vxfs_get_page(struct address_space *, u_long);
page              132 fs/freevxfs/vxfs_inode.c 	struct page			*pp;
page              115 fs/freevxfs/vxfs_lookup.c 	u_long				npages, page, nblocks, pblocks, block;
page              124 fs/freevxfs/vxfs_lookup.c 	for (page = 0; page < npages; page++) {
page              126 fs/freevxfs/vxfs_lookup.c 		struct page		*pp;
page              128 fs/freevxfs/vxfs_lookup.c 		pp = vxfs_get_page(ip->i_mapping, page);
page              177 fs/freevxfs/vxfs_lookup.c 	struct page			*pp;
page              246 fs/freevxfs/vxfs_lookup.c 	u_long			page, npages, block, pblocks, nblocks, offset;
page              275 fs/freevxfs/vxfs_lookup.c 	page = pos >> PAGE_CACHE_SHIFT;
page              279 fs/freevxfs/vxfs_lookup.c 	for (; page < npages; page++, block = 0) {
page              281 fs/freevxfs/vxfs_lookup.c 		struct page		*pp;
page              283 fs/freevxfs/vxfs_lookup.c 		pp = vxfs_get_page(ip->i_mapping, page);
page              312 fs/freevxfs/vxfs_lookup.c 					((page << PAGE_CACHE_SHIFT) | offset) + 2,
page              326 fs/freevxfs/vxfs_lookup.c 	fp->f_pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2;
page               69 fs/freevxfs/vxfs_subr.c struct page *
page               72 fs/freevxfs/vxfs_subr.c 	struct page *			pp;
page              163 fs/freevxfs/vxfs_subr.c 	return block_read_full_page(page, vxfs_getblk);
page              495 fs/fuse/dev.c  	struct page *pg;
page              586 fs/fuse/dev.c  	if (page && zeroing && count < PAGE_SIZE) {
page              587 fs/fuse/dev.c  		void *mapaddr = kmap_atomic(page, KM_USER1);
page              595 fs/fuse/dev.c  		if (page) {
page              596 fs/fuse/dev.c  			void *mapaddr = kmap_atomic(page, KM_USER1);
page              603 fs/fuse/dev.c  	if (page && !cs->write)
page              604 fs/fuse/dev.c  		flush_dcache_page(page);
page              618 fs/fuse/dev.c  		struct page *page = req->pages[i];
page              619 fs/fuse/dev.c  		int err = fuse_copy_page(cs, page, offset, count, zeroing);
page             1009 fs/fuse/dir.c  	struct page *page;
page             1021 fs/fuse/dir.c  	page = alloc_page(GFP_KERNEL);
page             1022 fs/fuse/dir.c  	if (!page) {
page             1027 fs/fuse/dir.c  	req->pages[0] = page;
page             1034 fs/fuse/dir.c  		err = parse_dirfile(page_address(page), nbytes, file, dstbuf,
page             1037 fs/fuse/dir.c  	__free_page(page);
page              417 fs/fuse/file.c 	struct inode *inode = page->mapping->host;
page              421 fs/fuse/file.c 	loff_t pos = page_offset(page);
page              435 fs/fuse/file.c 	fuse_wait_on_page_writeback(inode, page->index);
page              446 fs/fuse/file.c 	req->pages[0] = page;
page              458 fs/fuse/file.c 		SetPageUptodate(page);
page              463 fs/fuse/file.c 	unlock_page(page);
page              485 fs/fuse/file.c 		struct page *page = req->pages[i];
page              487 fs/fuse/file.c 			SetPageUptodate(page);
page              489 fs/fuse/file.c 			SetPageError(page);
page              490 fs/fuse/file.c 		unlock_page(page);
page              530 fs/fuse/file.c 	fuse_wait_on_page_writeback(inode, page->index);
page              535 fs/fuse/file.c 	     req->pages[req->num_pages - 1]->index + 1 != page->index)) {
page              539 fs/fuse/file.c 			unlock_page(page);
page              543 fs/fuse/file.c 	req->pages[req->num_pages] = page;
page              681 fs/fuse/file.c 	fuse_wait_on_page_writeback(inode, page->index);
page              688 fs/fuse/file.c 	req->pages[0] = page;
page              699 fs/fuse/file.c 			SetPageUptodate(page);
page              713 fs/fuse/file.c 		res = fuse_buffered_write(file, inode, pos, copied, page);
page              715 fs/fuse/file.c 	unlock_page(page);
page              716 fs/fuse/file.c 	page_cache_release(page);
page              736 fs/fuse/file.c 		struct page *page = req->pages[i];
page              739 fs/fuse/file.c 			SetPageUptodate(page);
page              747 fs/fuse/file.c 		unlock_page(page);
page              748 fs/fuse/file.c 		page_cache_release(page);
page              767 fs/fuse/file.c 		struct page *page;
page              780 fs/fuse/file.c 		page = __grab_cache_page(mapping, index);
page              781 fs/fuse/file.c 		if (!page)
page              785 fs/fuse/file.c 		tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
page              787 fs/fuse/file.c 		flush_dcache_page(page);
page              790 fs/fuse/file.c 			unlock_page(page);
page              791 fs/fuse/file.c 			page_cache_release(page);
page              797 fs/fuse/file.c 		req->pages[req->num_pages] = page;
page              919 fs/fuse/file.c 		struct page *page = req->pages[i];
page              921 fs/fuse/file.c 			set_page_dirty_lock(page);
page              922 fs/fuse/file.c 		put_page(page);
page             1124 fs/fuse/file.c 	struct address_space *mapping = page->mapping;
page             1130 fs/fuse/file.c 	struct page *tmp_page;
page             1132 fs/fuse/file.c 	set_page_writeback(page);
page             1148 fs/fuse/file.c 	fuse_write_fill(req, NULL, ff, inode, page_offset(page), 0, 1);
page             1150 fs/fuse/file.c 	copy_highpage(tmp_page, page);
page             1159 fs/fuse/file.c 	end_page_writeback(page);
page             1172 fs/fuse/file.c 	end_page_writeback(page);
page             1180 fs/fuse/file.c 	err = fuse_writepage_locked(page);
page             1181 fs/fuse/file.c 	unlock_page(page);
page             1189 fs/fuse/file.c 	if (clear_page_dirty_for_io(page)) {
page             1190 fs/fuse/file.c 		struct inode *inode = page->mapping->host;
page             1191 fs/fuse/file.c 		err = fuse_writepage_locked(page);
page             1193 fs/fuse/file.c 			fuse_wait_on_page_writeback(inode, page->index);
page             1230 fs/fuse/file.c 	fuse_wait_on_page_writeback(inode, page->index);
page              254 fs/fuse/fuse_i.h 	struct page *pages[FUSE_MAX_PAGES_PER_REQ];
page               67 fs/gfs2/bmap.c 	if (!page || page->index) {
page               68 fs/gfs2/bmap.c 		page = grab_cache_page(inode->i_mapping, 0);
page               69 fs/gfs2/bmap.c 		if (!page)
page               74 fs/gfs2/bmap.c 	if (!PageUptodate(page)) {
page               75 fs/gfs2/bmap.c 		void *kaddr = kmap(page);
page               81 fs/gfs2/bmap.c 		kunmap(page);
page               83 fs/gfs2/bmap.c 		SetPageUptodate(page);
page               86 fs/gfs2/bmap.c 	if (!page_has_buffers(page))
page               87 fs/gfs2/bmap.c 		create_empty_buffers(page, 1 << inode->i_blkbits,
page               90 fs/gfs2/bmap.c 	bh = page_buffers(page);
page              102 fs/gfs2/bmap.c 		unlock_page(page);
page              103 fs/gfs2/bmap.c 		page_cache_release(page);
page              150 fs/gfs2/bmap.c 			error = gfs2_unstuffer_page(ip, dibh, block, page);
page              962 fs/gfs2/bmap.c 	struct page *page;
page              965 fs/gfs2/bmap.c 	page = grab_cache_page(mapping, index);
page              966 fs/gfs2/bmap.c 	if (!page)
page              973 fs/gfs2/bmap.c 	if (!page_has_buffers(page))
page              974 fs/gfs2/bmap.c 		create_empty_buffers(page, blocksize, 0);
page              977 fs/gfs2/bmap.c 	bh = page_buffers(page);
page              995 fs/gfs2/bmap.c 	if (PageUptodate(page))
page             1011 fs/gfs2/bmap.c 	zero_user(page, offset, length);
page             1014 fs/gfs2/bmap.c 	unlock_page(page);
page             1015 fs/gfs2/bmap.c 	page_cache_release(page);
page               15 fs/gfs2/bmap.h struct page;
page               47 fs/gfs2/meta_io.c 	return block_write_full_page(page, aspace_get_block, wbc);
page              144 fs/gfs2/meta_io.c 	struct page *page;
page              156 fs/gfs2/meta_io.c 			page = grab_cache_page(mapping, index);
page              157 fs/gfs2/meta_io.c 			if (page)
page              162 fs/gfs2/meta_io.c 		page = find_lock_page(mapping, index);
page              163 fs/gfs2/meta_io.c 		if (!page)
page              167 fs/gfs2/meta_io.c 	if (!page_has_buffers(page))
page              168 fs/gfs2/meta_io.c 		create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
page              171 fs/gfs2/meta_io.c 	for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
page              178 fs/gfs2/meta_io.c 	unlock_page(page);
page              179 fs/gfs2/meta_io.c 	mark_page_accessed(page);
page              180 fs/gfs2/meta_io.c 	page_cache_release(page);
page               44 fs/gfs2/ops_address.c 	struct buffer_head *head = page_buffers(page);
page              100 fs/gfs2/ops_address.c 	struct inode *inode = page->mapping->host;
page              113 fs/gfs2/ops_address.c 	if (page->index > end_index || (page->index == end_index && !offset)) {
page              114 fs/gfs2/ops_address.c 		page->mapping->a_ops->invalidatepage(page, 0);
page              119 fs/gfs2/ops_address.c 	redirty_page_for_writepage(wbc, page);
page              121 fs/gfs2/ops_address.c 	unlock_page(page);
page              137 fs/gfs2/ops_address.c 	ret = gfs2_writepage_common(page, wbc);
page              141 fs/gfs2/ops_address.c 	ret = mpage_writepage(page, gfs2_get_block_noalloc, wbc);
page              143 fs/gfs2/ops_address.c 		ret = block_write_full_page(page, gfs2_get_block_noalloc, wbc);
page              157 fs/gfs2/ops_address.c 	struct inode *inode = page->mapping->host;
page              161 fs/gfs2/ops_address.c 	ret = gfs2_writepage_common(page, wbc);
page              165 fs/gfs2/ops_address.c 	if (!page_has_buffers(page)) {
page              166 fs/gfs2/ops_address.c 		create_empty_buffers(page, inode->i_sb->s_blocksize,
page              169 fs/gfs2/ops_address.c 	gfs2_page_add_databufs(ip, page, 0, inode->i_sb->s_blocksize-1);
page              170 fs/gfs2/ops_address.c 	return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
page              186 fs/gfs2/ops_address.c 	struct inode *inode = page->mapping->host;
page              190 fs/gfs2/ops_address.c 	if (PageChecked(page)) {
page              191 fs/gfs2/ops_address.c 		ClearPageChecked(page);
page              192 fs/gfs2/ops_address.c 		if (!page_has_buffers(page)) {
page              193 fs/gfs2/ops_address.c 			create_empty_buffers(page, inode->i_sb->s_blocksize,
page              196 fs/gfs2/ops_address.c 		gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
page              198 fs/gfs2/ops_address.c 	return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
page              211 fs/gfs2/ops_address.c 	struct inode *inode = page->mapping->host;
page              216 fs/gfs2/ops_address.c 	error = gfs2_writepage_common(page, wbc);
page              220 fs/gfs2/ops_address.c 	if (PageChecked(page)) {
page              228 fs/gfs2/ops_address.c 	error = __gfs2_jdata_writepage(page, wbc);
page              234 fs/gfs2/ops_address.c 	redirty_page_for_writepage(wbc, page);
page              235 fs/gfs2/ops_address.c 	unlock_page(page);
page              285 fs/gfs2/ops_address.c 		struct page *page = pvec->pages[i];
page              287 fs/gfs2/ops_address.c 		lock_page(page);
page              289 fs/gfs2/ops_address.c 		if (unlikely(page->mapping != mapping)) {
page              290 fs/gfs2/ops_address.c 			unlock_page(page);
page              294 fs/gfs2/ops_address.c 		if (!wbc->range_cyclic && page->index > end) {
page              296 fs/gfs2/ops_address.c 			unlock_page(page);
page              301 fs/gfs2/ops_address.c 			wait_on_page_writeback(page);
page              303 fs/gfs2/ops_address.c 		if (PageWriteback(page) ||
page              304 fs/gfs2/ops_address.c 		    !clear_page_dirty_for_io(page)) {
page              305 fs/gfs2/ops_address.c 			unlock_page(page);
page              310 fs/gfs2/ops_address.c 		if (page->index > end_index || (page->index == end_index && !offset)) {
page              311 fs/gfs2/ops_address.c 			page->mapping->a_ops->invalidatepage(page, 0);
page              312 fs/gfs2/ops_address.c 			unlock_page(page);
page              316 fs/gfs2/ops_address.c 		ret = __gfs2_jdata_writepage(page, wbc);
page              445 fs/gfs2/ops_address.c 	if (unlikely(page->index)) {
page              446 fs/gfs2/ops_address.c 		zero_user(page, 0, PAGE_CACHE_SIZE);
page              454 fs/gfs2/ops_address.c 	kaddr = kmap_atomic(page, KM_USER0);
page              459 fs/gfs2/ops_address.c 	flush_dcache_page(page);
page              461 fs/gfs2/ops_address.c 	SetPageUptodate(page);
page              480 fs/gfs2/ops_address.c 	struct gfs2_inode *ip = GFS2_I(page->mapping->host);
page              481 fs/gfs2/ops_address.c 	struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
page              485 fs/gfs2/ops_address.c 		error = stuffed_readpage(ip, page);
page              486 fs/gfs2/ops_address.c 		unlock_page(page);
page              488 fs/gfs2/ops_address.c 		error = mpage_readpage(page, gfs2_block_map);
page              509 fs/gfs2/ops_address.c 	struct address_space *mapping = page->mapping;
page              514 fs/gfs2/ops_address.c 	unlock_page(page);
page              520 fs/gfs2/ops_address.c 	lock_page(page);
page              521 fs/gfs2/ops_address.c 	if (page->mapping == mapping && !PageUptodate(page))
page              522 fs/gfs2/ops_address.c 		error = __gfs2_readpage(file, page);
page              524 fs/gfs2/ops_address.c 		unlock_page(page);
page              529 fs/gfs2/ops_address.c 		lock_page(page);
page              551 fs/gfs2/ops_address.c 	struct page *page;
page              558 fs/gfs2/ops_address.c 		page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
page              559 fs/gfs2/ops_address.c 		if (IS_ERR(page))
page              560 fs/gfs2/ops_address.c 			return PTR_ERR(page);
page              561 fs/gfs2/ops_address.c 		p = kmap_atomic(page, KM_USER0);
page              564 fs/gfs2/ops_address.c 		mark_page_accessed(page);
page              565 fs/gfs2/ops_address.c 		page_cache_release(page);
page              637 fs/gfs2/ops_address.c 	struct page *page;
page              678 fs/gfs2/ops_address.c 	page = __grab_cache_page(mapping, index);
page              679 fs/gfs2/ops_address.c 	*pagep = page;
page              680 fs/gfs2/ops_address.c 	if (unlikely(!page))
page              686 fs/gfs2/ops_address.c 			error = gfs2_unstuff_dinode(ip, page);
page              689 fs/gfs2/ops_address.c 		} else if (!PageUptodate(page)) {
page              690 fs/gfs2/ops_address.c 			error = stuffed_readpage(ip, page);
page              696 fs/gfs2/ops_address.c 	error = block_prepare_write(page, from, to, gfs2_block_map);
page              701 fs/gfs2/ops_address.c 	page_cache_release(page);
page              772 fs/gfs2/ops_address.c 	kaddr = kmap_atomic(page, KM_USER0);
page              775 fs/gfs2/ops_address.c 	flush_dcache_page(page);
page              778 fs/gfs2/ops_address.c 	if (!PageUptodate(page))
page              779 fs/gfs2/ops_address.c 		SetPageUptodate(page);
page              780 fs/gfs2/ops_address.c 	unlock_page(page);
page              781 fs/gfs2/ops_address.c 	page_cache_release(page);
page              821 fs/gfs2/ops_address.c 	struct inode *inode = page->mapping->host;
page              835 fs/gfs2/ops_address.c 		unlock_page(page);
page              836 fs/gfs2/ops_address.c 		page_cache_release(page);
page              843 fs/gfs2/ops_address.c 		return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page);
page              846 fs/gfs2/ops_address.c 		gfs2_page_add_databufs(ip, page, from, to);
page              848 fs/gfs2/ops_address.c 	ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
page              882 fs/gfs2/ops_address.c 	SetPageChecked(page);
page              883 fs/gfs2/ops_address.c 	return __set_page_dirty_buffers(page);
page              937 fs/gfs2/ops_address.c 	struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
page              941 fs/gfs2/ops_address.c 	BUG_ON(!PageLocked(page));
page              943 fs/gfs2/ops_address.c 		ClearPageChecked(page);
page              944 fs/gfs2/ops_address.c 	if (!page_has_buffers(page))
page              947 fs/gfs2/ops_address.c 	bh = head = page_buffers(page);
page              956 fs/gfs2/ops_address.c 		try_to_release_page(page, 0);
page             1033 fs/gfs2/ops_address.c 	struct inode *aspace = page->mapping->host;
page             1038 fs/gfs2/ops_address.c 	if (!page_has_buffers(page))
page             1042 fs/gfs2/ops_address.c 	head = bh = page_buffers(page);
page             1055 fs/gfs2/ops_address.c 	head = bh = page_buffers(page);
page             1079 fs/gfs2/ops_address.c 	return try_to_free_buffers(page);
page              315 fs/gfs2/ops_file.c 	struct inode *inode = page->mapping->host;
page              318 fs/gfs2/ops_file.c 	u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
page              347 fs/gfs2/ops_file.c 	u64 pos = page->index << (PAGE_CACHE_SIZE - inode->i_blkbits);
page              386 fs/gfs2/ops_file.c 	lock_page(page);
page              389 fs/gfs2/ops_file.c 	if (page->index > last_index)
page              392 fs/gfs2/ops_file.c 	if (!PageUptodate(page) || page->mapping != ip->i_inode.i_mapping)
page              395 fs/gfs2/ops_file.c 		ret = gfs2_unstuff_dinode(ip, page);
page              399 fs/gfs2/ops_file.c 	ret = gfs2_allocate_page_backing(page);
page              402 fs/gfs2/ops_file.c 	unlock_page(page);
page              219 fs/gfs2/ops_fstype.c 	struct page *page = bio->bi_private;
page              222 fs/gfs2/ops_fstype.c 		SetPageUptodate(page);
page              225 fs/gfs2/ops_fstype.c 	unlock_page(page);
page              271 fs/gfs2/ops_fstype.c 	struct page *page;
page              274 fs/gfs2/ops_fstype.c 	page = alloc_page(GFP_NOFS);
page              275 fs/gfs2/ops_fstype.c 	if (unlikely(!page))
page              278 fs/gfs2/ops_fstype.c 	ClearPageUptodate(page);
page              279 fs/gfs2/ops_fstype.c 	ClearPageDirty(page);
page              280 fs/gfs2/ops_fstype.c 	lock_page(page);
page              284 fs/gfs2/ops_fstype.c 		__free_page(page);
page              290 fs/gfs2/ops_fstype.c 	bio_add_page(bio, page, PAGE_SIZE, 0);
page              293 fs/gfs2/ops_fstype.c 	bio->bi_private = page;
page              295 fs/gfs2/ops_fstype.c 	wait_on_page_locked(page);
page              297 fs/gfs2/ops_fstype.c 	if (!PageUptodate(page)) {
page              298 fs/gfs2/ops_fstype.c 		__free_page(page);
page              301 fs/gfs2/ops_fstype.c 	p = kmap(page);
page              303 fs/gfs2/ops_fstype.c 	kunmap(page);
page              304 fs/gfs2/ops_fstype.c 	__free_page(page);
page              612 fs/gfs2/quota.c 	struct page *page;
page              622 fs/gfs2/quota.c 	page = grab_cache_page(mapping, index);
page              623 fs/gfs2/quota.c 	if (!page)
page              629 fs/gfs2/quota.c 	if (!page_has_buffers(page))
page              630 fs/gfs2/quota.c 		create_empty_buffers(page, blocksize, 0);
page              632 fs/gfs2/quota.c 	bh = page_buffers(page);
page              646 fs/gfs2/quota.c 	if (PageUptodate(page))
page              658 fs/gfs2/quota.c 	kaddr = kmap_atomic(page, KM_USER0);
page              664 fs/gfs2/quota.c 	flush_dcache_page(page);
page              672 fs/gfs2/quota.c 	unlock_page(page);
page              673 fs/gfs2/quota.c 	page_cache_release(page);
page               19 fs/hfs/bnode.c 	struct page *page;
page               22 fs/hfs/bnode.c 	page = node->page[0];
page               24 fs/hfs/bnode.c 	memcpy(buf, kmap(page) + off, len);
page               25 fs/hfs/bnode.c 	kunmap(page);
page               61 fs/hfs/bnode.c 	struct page *page;
page               64 fs/hfs/bnode.c 	page = node->page[0];
page               66 fs/hfs/bnode.c 	memcpy(kmap(page) + off, buf, len);
page               67 fs/hfs/bnode.c 	kunmap(page);
page               68 fs/hfs/bnode.c 	set_page_dirty(page);
page               86 fs/hfs/bnode.c 	struct page *page;
page               89 fs/hfs/bnode.c 	page = node->page[0];
page               91 fs/hfs/bnode.c 	memset(kmap(page) + off, 0, len);
page               92 fs/hfs/bnode.c 	kunmap(page);
page               93 fs/hfs/bnode.c 	set_page_dirty(page);
page              100 fs/hfs/bnode.c 	struct page *src_page, *dst_page;
page              108 fs/hfs/bnode.c 	src_page = src_node->page[0];
page              109 fs/hfs/bnode.c 	dst_page = dst_node->page[0];
page              119 fs/hfs/bnode.c 	struct page *page;
page              127 fs/hfs/bnode.c 	page = node->page[0];
page              128 fs/hfs/bnode.c 	ptr = kmap(page);
page              130 fs/hfs/bnode.c 	kunmap(page);
page              131 fs/hfs/bnode.c 	set_page_dirty(page);
page              240 fs/hfs/bnode.c 	struct page *page;
page              251 fs/hfs/bnode.c 		sizeof(struct page *);
page              282 fs/hfs/bnode.c 		page = read_mapping_page(mapping, block++, NULL);
page              283 fs/hfs/bnode.c 		if (IS_ERR(page))
page              285 fs/hfs/bnode.c 		if (PageError(page)) {
page              286 fs/hfs/bnode.c 			page_cache_release(page);
page              289 fs/hfs/bnode.c 		page_cache_release(page);
page              290 fs/hfs/bnode.c 		node->page[i] = page;
page              340 fs/hfs/bnode.c 	desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + node->page_offset);
page              346 fs/hfs/bnode.c 	kunmap(node->page[0]);
page              410 fs/hfs/bnode.c 	struct page **pagep;
page              425 fs/hfs/bnode.c 	pagep = node->page;
page              463 fs/hfs/bnode.c 			if (!node->page[i])
page              465 fs/hfs/bnode.c 			mark_page_accessed(node->page[i]);
page               22 fs/hfs/btree.c 	struct page *page;
page               62 fs/hfs/btree.c 	page = read_mapping_page(mapping, 0, NULL);
page               63 fs/hfs/btree.c 	if (IS_ERR(page))
page               67 fs/hfs/btree.c 	head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
page              106 fs/hfs/btree.c 	kunmap(page);
page              107 fs/hfs/btree.c 	page_cache_release(page);
page              111 fs/hfs/btree.c 	page_cache_release(page);
page              147 fs/hfs/btree.c 	struct page *page;
page              154 fs/hfs/btree.c 	page = node->page[0];
page              155 fs/hfs/btree.c 	head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
page              166 fs/hfs/btree.c 	kunmap(page);
page              167 fs/hfs/btree.c 	set_page_dirty(page);
page              209 fs/hfs/btree.c 	struct page **pagep;
page              244 fs/hfs/btree.c 	pagep = node->page + (off >> PAGE_CACHE_SHIFT);
page              289 fs/hfs/btree.c 		pagep = node->page + (off >> PAGE_CACHE_SHIFT);
page              298 fs/hfs/btree.c 	struct page *page;
page              333 fs/hfs/btree.c 	page = node->page[off >> PAGE_CACHE_SHIFT];
page              334 fs/hfs/btree.c 	data = kmap(page);
page              340 fs/hfs/btree.c 		kunmap(page);
page              345 fs/hfs/btree.c 	set_page_dirty(page);
page              346 fs/hfs/btree.c 	kunmap(page);
page               62 fs/hfs/btree.h 	struct page *page[0];
page              468 fs/hfs/extent.c 		struct page *page;
page              474 fs/hfs/extent.c 				AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata);
page              477 fs/hfs/extent.c 					page, fsdata);
page               30 fs/hfs/inode.c 	return block_write_full_page(page, hfs_get_block, wbc);
page               35 fs/hfs/inode.c 	return block_read_full_page(page, hfs_get_block);
page               55 fs/hfs/inode.c 	struct inode *inode = page->mapping->host;
page               74 fs/hfs/inode.c 		nidx = page->index >> (tree->node_size_shift - PAGE_CACHE_SHIFT);
page               87 fs/hfs/inode.c 		nidx = page->index << (PAGE_CACHE_SHIFT - tree->node_size_shift);
page              103 fs/hfs/inode.c 	return res ? try_to_free_buffers(page) : 0;
page               20 fs/hfsplus/bitmap.c 	struct page *page;
page               34 fs/hfsplus/bitmap.c 	page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
page               35 fs/hfsplus/bitmap.c 	pptr = kmap(page);
page               70 fs/hfsplus/bitmap.c 		kunmap(page);
page               74 fs/hfsplus/bitmap.c 		page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
page               76 fs/hfsplus/bitmap.c 		curr = pptr = kmap(page);
page              118 fs/hfsplus/bitmap.c 		set_page_dirty(page);
page              119 fs/hfsplus/bitmap.c 		kunmap(page);
page              121 fs/hfsplus/bitmap.c 		page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
page              123 fs/hfsplus/bitmap.c 		pptr = kmap(page);
page              138 fs/hfsplus/bitmap.c 	set_page_dirty(page);
page              139 fs/hfsplus/bitmap.c 	kunmap(page);
page              151 fs/hfsplus/bitmap.c 	struct page *page;
page              169 fs/hfsplus/bitmap.c 	page = read_mapping_page(mapping, pnr, NULL);
page              170 fs/hfsplus/bitmap.c 	pptr = kmap(page);
page              199 fs/hfsplus/bitmap.c 		set_page_dirty(page);
page              200 fs/hfsplus/bitmap.c 		kunmap(page);
page              201 fs/hfsplus/bitmap.c 		page = read_mapping_page(mapping, ++pnr, NULL);
page              202 fs/hfsplus/bitmap.c 		pptr = kmap(page);
page              213 fs/hfsplus/bitmap.c 	set_page_dirty(page);
page              214 fs/hfsplus/bitmap.c 	kunmap(page);
page               23 fs/hfsplus/bnode.c 	struct page **pagep;
page               27 fs/hfsplus/bnode.c 	pagep = node->page + (off >> PAGE_CACHE_SHIFT);
page               75 fs/hfsplus/bnode.c 	struct page **pagep;
page               79 fs/hfsplus/bnode.c 	pagep = node->page + (off >> PAGE_CACHE_SHIFT);
page              105 fs/hfsplus/bnode.c 	struct page **pagep;
page              109 fs/hfsplus/bnode.c 	pagep = node->page + (off >> PAGE_CACHE_SHIFT);
page              129 fs/hfsplus/bnode.c 	struct page **src_page, **dst_page;
page              138 fs/hfsplus/bnode.c 	src_page = src_node->page + (src >> PAGE_CACHE_SHIFT);
page              140 fs/hfsplus/bnode.c 	dst_page = dst_node->page + (dst >> PAGE_CACHE_SHIFT);
page              187 fs/hfsplus/bnode.c 	struct page **src_page, **dst_page;
page              197 fs/hfsplus/bnode.c 		src_page = node->page + (src >> PAGE_CACHE_SHIFT);
page              200 fs/hfsplus/bnode.c 		dst_page = node->page + (dst >> PAGE_CACHE_SHIFT);
page              246 fs/hfsplus/bnode.c 		src_page = node->page + (src >> PAGE_CACHE_SHIFT);
page              248 fs/hfsplus/bnode.c 		dst_page = node->page + (dst >> PAGE_CACHE_SHIFT);
page              400 fs/hfsplus/bnode.c 	struct page *page;
page              411 fs/hfsplus/bnode.c 		sizeof(struct page *);
page              442 fs/hfsplus/bnode.c 		page = read_mapping_page(mapping, block, NULL);
page              443 fs/hfsplus/bnode.c 		if (IS_ERR(page))
page              445 fs/hfsplus/bnode.c 		if (PageError(page)) {
page              446 fs/hfsplus/bnode.c 			page_cache_release(page);
page              449 fs/hfsplus/bnode.c 		page_cache_release(page);
page              450 fs/hfsplus/bnode.c 		node->page[i] = page;
page              500 fs/hfsplus/bnode.c 	desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + node->page_offset);
page              506 fs/hfsplus/bnode.c 	kunmap(node->page[0]);
page              570 fs/hfsplus/bnode.c 	struct page **pagep;
page              589 fs/hfsplus/bnode.c 	pagep = node->page;
page              627 fs/hfsplus/bnode.c 			if (!node->page[i])
page              629 fs/hfsplus/bnode.c 			mark_page_accessed(node->page[i]);
page               26 fs/hfsplus/btree.c 	struct page *page;
page               43 fs/hfsplus/btree.c 	page = read_mapping_page(mapping, 0, NULL);
page               44 fs/hfsplus/btree.c 	if (IS_ERR(page))
page               48 fs/hfsplus/btree.c 	head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
page               85 fs/hfsplus/btree.c 	kunmap(page);
page               86 fs/hfsplus/btree.c 	page_cache_release(page);
page               91 fs/hfsplus/btree.c 	page_cache_release(page);
page              125 fs/hfsplus/btree.c 	struct page *page;
page              132 fs/hfsplus/btree.c 	page = node->page[0];
page              133 fs/hfsplus/btree.c 	head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
page              144 fs/hfsplus/btree.c 	kunmap(page);
page              145 fs/hfsplus/btree.c 	set_page_dirty(page);
page              185 fs/hfsplus/btree.c 	struct page **pagep;
page              220 fs/hfsplus/btree.c 	pagep = node->page + (off >> PAGE_CACHE_SHIFT);
page              265 fs/hfsplus/btree.c 		pagep = node->page + (off >> PAGE_CACHE_SHIFT);
page              274 fs/hfsplus/btree.c 	struct page *page;
page              310 fs/hfsplus/btree.c 	page = node->page[off >> PAGE_CACHE_SHIFT];
page              311 fs/hfsplus/btree.c 	data = kmap(page);
page              317 fs/hfsplus/btree.c 		kunmap(page);
page              322 fs/hfsplus/btree.c 	set_page_dirty(page);
page              323 fs/hfsplus/btree.c 	kunmap(page);
page              445 fs/hfsplus/extents.c 		struct page *page;
page              452 fs/hfsplus/extents.c 						&page, &fsdata);
page              455 fs/hfsplus/extents.c 		res = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
page               73 fs/hfsplus/hfsplus_fs.h struct page;
page               93 fs/hfsplus/hfsplus_fs.h 	struct page *page[0];
page              389 fs/hfsplus/hfsplus_fs.h #define hfsplus_kmap(p)		({ struct page *__p = (p); kmap(__p); })
page              390 fs/hfsplus/hfsplus_fs.h #define hfsplus_kunmap(p)	({ struct page *__p = (p); kunmap(__p); __p; })
page               22 fs/hfsplus/inode.c 	return block_read_full_page(page, hfsplus_get_block);
page               27 fs/hfsplus/inode.c 	return block_write_full_page(page, hfsplus_get_block, wbc);
page               47 fs/hfsplus/inode.c 	struct inode *inode = page->mapping->host;
page               71 fs/hfsplus/inode.c 		nidx = page->index >> (tree->node_size_shift - PAGE_CACHE_SHIFT);
page               84 fs/hfsplus/inode.c 		nidx = page->index << (PAGE_CACHE_SHIFT - tree->node_size_shift);
page              100 fs/hfsplus/inode.c 	return res ? try_to_free_buffers(page) : 0;
page              438 fs/hostfs/hostfs_kern.c 	struct address_space *mapping = page->mapping;
page              446 fs/hostfs/hostfs_kern.c 	if (page->index >= end_index)
page              449 fs/hostfs/hostfs_kern.c 	buffer = kmap(page);
page              450 fs/hostfs/hostfs_kern.c 	base = ((unsigned long long) page->index) << PAGE_CACHE_SHIFT;
page              454 fs/hostfs/hostfs_kern.c 		ClearPageUptodate(page);
page              461 fs/hostfs/hostfs_kern.c 	if (PageError(page))
page              462 fs/hostfs/hostfs_kern.c 		ClearPageError(page);
page              466 fs/hostfs/hostfs_kern.c 	kunmap(page);
page              468 fs/hostfs/hostfs_kern.c 	unlock_page(page);
page              478 fs/hostfs/hostfs_kern.c 	start = (long long) page->index << PAGE_CACHE_SHIFT;
page              479 fs/hostfs/hostfs_kern.c 	buffer = kmap(page);
page              487 fs/hostfs/hostfs_kern.c 	flush_dcache_page(page);
page              488 fs/hostfs/hostfs_kern.c 	SetPageUptodate(page);
page              489 fs/hostfs/hostfs_kern.c 	if (PageError(page)) ClearPageError(page);
page              492 fs/hostfs/hostfs_kern.c 	kunmap(page);
page              493 fs/hostfs/hostfs_kern.c 	unlock_page(page);
page              518 fs/hostfs/hostfs_kern.c 	buffer = kmap(page);
page              520 fs/hostfs/hostfs_kern.c 	kunmap(page);
page              522 fs/hostfs/hostfs_kern.c 	if (!PageUptodate(page) && err == PAGE_CACHE_SIZE)
page              523 fs/hostfs/hostfs_kern.c 		SetPageUptodate(page);
page              531 fs/hostfs/hostfs_kern.c 	unlock_page(page);
page              532 fs/hostfs/hostfs_kern.c 	page_cache_release(page);
page              941 fs/hostfs/hostfs_kern.c 	buffer = kmap(page);
page              942 fs/hostfs/hostfs_kern.c 	name = inode_name(page->mapping->host, 0);
page              950 fs/hostfs/hostfs_kern.c 		flush_dcache_page(page);
page              951 fs/hostfs/hostfs_kern.c 		SetPageUptodate(page);
page              952 fs/hostfs/hostfs_kern.c 		if (PageError(page)) ClearPageError(page);
page              955 fs/hostfs/hostfs_kern.c 	kunmap(page);
page              956 fs/hostfs/hostfs_kern.c 	unlock_page(page);
page               87 fs/hpfs/file.c 	return block_write_full_page(page,hpfs_get_block, wbc);
page               92 fs/hpfs/file.c 	return block_read_full_page(page,hpfs_get_block);
page              513 fs/hpfs/namei.c 	char *link = kmap(page);
page              514 fs/hpfs/namei.c 	struct inode *i = page->mapping->host;
page              528 fs/hpfs/namei.c 	SetPageUptodate(page);
page              529 fs/hpfs/namei.c 	kunmap(page);
page              530 fs/hpfs/namei.c 	unlock_page(page);
page              535 fs/hpfs/namei.c 	SetPageError(page);
page              536 fs/hpfs/namei.c 	kunmap(page);
page              537 fs/hpfs/namei.c 	unlock_page(page);
page              208 fs/hugetlbfs/inode.c 		kaddr = kmap(&page[i]);
page              210 fs/hugetlbfs/inode.c 		kunmap(&page[i]);
page              253 fs/hugetlbfs/inode.c 		struct page *page;
page              269 fs/hugetlbfs/inode.c 		page = find_get_page(mapping, index);
page              270 fs/hugetlbfs/inode.c 		if (unlikely(page == NULL)) {
page              282 fs/hugetlbfs/inode.c 			ret = hugetlbfs_read_actor(page, offset, buf, len, nr);
page              287 fs/hugetlbfs/inode.c 			if (page)
page              288 fs/hugetlbfs/inode.c 				page_cache_release(page);
page              298 fs/hugetlbfs/inode.c 		if (page)
page              299 fs/hugetlbfs/inode.c 			page_cache_release(page);
page              317 fs/hugetlbfs/inode.c 	unlock_page(page);
page              339 fs/hugetlbfs/inode.c 	cancel_dirty_page(page, /* No IO accounting for huge pages? */0);
page              340 fs/hugetlbfs/inode.c 	ClearPageUptodate(page);
page              341 fs/hugetlbfs/inode.c 	remove_from_page_cache(page);
page              342 fs/hugetlbfs/inode.c 	put_page(page);
page              365 fs/hugetlbfs/inode.c 			struct page *page = pvec.pages[i];
page              367 fs/hugetlbfs/inode.c 			lock_page(page);
page              368 fs/hugetlbfs/inode.c 			if (page->index > next)
page              369 fs/hugetlbfs/inode.c 				next = page->index;
page              371 fs/hugetlbfs/inode.c 			truncate_huge_page(page);
page              372 fs/hugetlbfs/inode.c 			unlock_page(page);
page              612 fs/hugetlbfs/inode.c 	struct page *head = compound_head(page);
page               62 fs/isofs/compress.c 	struct page *pages[zisofs_block_pages];
page               63 fs/isofs/compress.c 	unsigned long index = page->index;
page               69 fs/isofs/compress.c 	pages[xpage] = page;
page               80 fs/isofs/compress.c 	if (page->index >= maxpage) {
page               81 fs/isofs/compress.c 		SetPageUptodate(page);
page               82 fs/isofs/compress.c 		unlock_page(page);
page               92 fs/isofs/compress.c 		page = pages[i];
page               93 fs/isofs/compress.c 		if ( page ) {
page               94 fs/isofs/compress.c 			ClearPageError(page);
page               95 fs/isofs/compress.c 			kmap(page);
page              159 fs/isofs/compress.c 			if ( (page = pages[fpage]) != NULL ) {
page              160 fs/isofs/compress.c 				memset(page_address(page), 0, PAGE_CACHE_SIZE);
page              162 fs/isofs/compress.c 				flush_dcache_page(page);
page              163 fs/isofs/compress.c 				SetPageUptodate(page);
page              164 fs/isofs/compress.c 				kunmap(page);
page              165 fs/isofs/compress.c 				unlock_page(page);
page              169 fs/isofs/compress.c 					page_cache_release(page);
page              220 fs/isofs/compress.c 			page = pages[fpage];
page              221 fs/isofs/compress.c 			if ( page )
page              222 fs/isofs/compress.c 				stream.next_out = page_address(page);
page              279 fs/isofs/compress.c 				if ( page ) {
page              280 fs/isofs/compress.c 					flush_dcache_page(page);
page              281 fs/isofs/compress.c 					SetPageUptodate(page);
page              282 fs/isofs/compress.c 					kunmap(page);
page              283 fs/isofs/compress.c 					unlock_page(page);
page              287 fs/isofs/compress.c 						page_cache_release(page);
page              308 fs/isofs/compress.c 		page = pages[fpage];
page              309 fs/isofs/compress.c 		if ( page ) {
page              310 fs/isofs/compress.c 			flush_dcache_page(page);
page              312 fs/isofs/compress.c 				SetPageError(page);
page              313 fs/isofs/compress.c 			kunmap(page);
page              314 fs/isofs/compress.c 			unlock_page(page);
page              316 fs/isofs/compress.c 				page_cache_release(page);
page             1082 fs/isofs/inode.c 	return block_read_full_page(page,isofs_get_block);
page              171 fs/isofs/namei.c 	struct page *page;
page              175 fs/isofs/namei.c 	page = alloc_page(GFP_USER);
page              176 fs/isofs/namei.c 	if (!page)
page              182 fs/isofs/namei.c 				page_address(page),
page              183 fs/isofs/namei.c 				1024 + page_address(page));
page              184 fs/isofs/namei.c 	__free_page(page);
page              663 fs/isofs/rock.c 	struct inode *inode = page->mapping->host;
page              665 fs/isofs/rock.c 	char *link = kmap(page);
page              753 fs/isofs/rock.c 	SetPageUptodate(page);
page              754 fs/isofs/rock.c 	kunmap(page);
page              755 fs/isofs/rock.c 	unlock_page(page);
page              771 fs/isofs/rock.c 	SetPageError(page);
page              772 fs/isofs/rock.c 	kunmap(page);
page              773 fs/isofs/rock.c 	unlock_page(page);
page               53 fs/jbd/commit.c 	struct page *page;
page               59 fs/jbd/commit.c 	page = bh->b_page;
page               60 fs/jbd/commit.c 	if (!page)
page               62 fs/jbd/commit.c 	if (page->mapping)
page               66 fs/jbd/commit.c 	if (!trylock_page(page))
page               69 fs/jbd/commit.c 	page_cache_get(page);
page               71 fs/jbd/commit.c 	try_to_free_buffers(page);
page               72 fs/jbd/commit.c 	unlock_page(page);
page               73 fs/jbd/commit.c 	page_cache_release(page);
page              287 fs/jbd/journal.c 	struct page *new_page;
page              723 fs/jbd/transaction.c 		struct page *page;
page              729 fs/jbd/transaction.c 		page = jh2bh(jh)->b_page;
page              731 fs/jbd/transaction.c 		source = kmap_atomic(page, KM_USER0);
page             1725 fs/jbd/transaction.c 	J_ASSERT(PageLocked(page));
page             1727 fs/jbd/transaction.c 	head = page_buffers(page);
page             1749 fs/jbd/transaction.c 	ret = try_to_free_buffers(page);
page             1767 fs/jbd/transaction.c 		ret = try_to_free_buffers(page);
page             2003 fs/jbd/transaction.c 	if (!PageLocked(page))
page             2005 fs/jbd/transaction.c 	if (!page_has_buffers(page))
page             2012 fs/jbd/transaction.c 	head = bh = page_buffers(page);
page             2029 fs/jbd/transaction.c 		if (may_free && try_to_free_buffers(page))
page             2030 fs/jbd/transaction.c 			J_ASSERT(!page_has_buffers(page));
page               58 fs/jbd2/commit.c 	struct page *page;
page               64 fs/jbd2/commit.c 	page = bh->b_page;
page               65 fs/jbd2/commit.c 	if (!page)
page               67 fs/jbd2/commit.c 	if (page->mapping)
page               71 fs/jbd2/commit.c 	if (!trylock_page(page))
page               74 fs/jbd2/commit.c 	page_cache_get(page);
page               76 fs/jbd2/commit.c 	try_to_free_buffers(page);
page               77 fs/jbd2/commit.c 	unlock_page(page);
page               78 fs/jbd2/commit.c 	page_cache_release(page);
page              299 fs/jbd2/commit.c 	struct page *page = bh->b_page;
page              303 fs/jbd2/commit.c 	addr = kmap_atomic(page, KM_USER0);
page              290 fs/jbd2/journal.c 	struct page *new_page;
page              732 fs/jbd2/transaction.c 		struct page *page;
page              738 fs/jbd2/transaction.c 		page = jh2bh(jh)->b_page;
page              740 fs/jbd2/transaction.c 		source = kmap_atomic(page, KM_USER0);
page             1543 fs/jbd2/transaction.c 	J_ASSERT(PageLocked(page));
page             1545 fs/jbd2/transaction.c 	head = page_buffers(page);
page             1568 fs/jbd2/transaction.c 	ret = try_to_free_buffers(page);
page             1586 fs/jbd2/transaction.c 		ret = try_to_free_buffers(page);
page             1815 fs/jbd2/transaction.c 	if (!PageLocked(page))
page             1817 fs/jbd2/transaction.c 	if (!page_has_buffers(page))
page             1824 fs/jbd2/transaction.c 	head = bh = page_buffers(page);
page             1841 fs/jbd2/transaction.c 		if (may_free && try_to_free_buffers(page))
page             1842 fs/jbd2/transaction.c 			J_ASSERT(!page_has_buffers(page));
page              128 fs/jffs2/file.c 	struct page *pg;
page              657 fs/jffs2/fs.c  	struct page *pg;
page              672 fs/jffs2/fs.c  	struct page *pg = (void *)*priv;
page              270 fs/jfs/inode.c 	return block_write_full_page(page, jfs_get_block, wbc);
page              281 fs/jfs/inode.c 	return mpage_readpage(page, jfs_get_block);
page              508 fs/jfs/jfs_logmgr.c 		lsn = (log->page << L2LOGPSIZE) + dstoffset;
page              530 fs/jfs/jfs_logmgr.c 			tblk->pn = log->page;
page              540 fs/jfs/jfs_logmgr.c 			le16_to_cpu(lrd->type), log->bp, log->page, dstoffset);
page              581 fs/jfs/jfs_logmgr.c 	pn = log->page;
page              584 fs/jfs/jfs_logmgr.c 	lspn = le32_to_cpu(lp->h.page);
page              645 fs/jfs/jfs_logmgr.c 	log->page = (pn == log->size - 1) ? 2 : pn + 1;
page              649 fs/jfs/jfs_logmgr.c 	nextbp = lbmAllocate(log, log->page);
page              655 fs/jfs/jfs_logmgr.c 	lp->h.page = lp->t.page = cpu_to_le32(lspn + 1);
page             1356 fs/jfs/jfs_logmgr.c 		log->page = le32_to_cpu(logsuper->end) / LOGPSIZE;
page             1357 fs/jfs/jfs_logmgr.c 		log->eor = le32_to_cpu(logsuper->end) - (LOGPSIZE * log->page);
page             1363 fs/jfs/jfs_logmgr.c 		if ((rc = lbmRead(log, log->page, &bp)))
page             1369 fs/jfs/jfs_logmgr.c 			 le32_to_cpu(logsuper->end), log->page, log->eor,
page             1373 fs/jfs/jfs_logmgr.c 		bp->l_pn = log->page;
page             1633 fs/jfs/jfs_logmgr.c 					       sizeof(long), mp->page,
page             1634 fs/jfs/jfs_logmgr.c 					       sizeof(struct page), 0);
page             1708 fs/jfs/jfs_logmgr.c 		 lsn, log->page, log->eor);
page             1845 fs/jfs/jfs_logmgr.c 		struct page *page;
page             1850 fs/jfs/jfs_logmgr.c 		page = virt_to_page(buffer);
page             1859 fs/jfs/jfs_logmgr.c 				get_page(page);
page             1862 fs/jfs/jfs_logmgr.c 			lbuf->l_page = page;
page             2465 fs/jfs/jfs_logmgr.c 	lp->h.page = lp->t.page = cpu_to_le32(npages - 3);
page             2485 fs/jfs/jfs_logmgr.c 		lp->h.page = lp->t.page = cpu_to_le32(lspn);
page              124 fs/jfs/jfs_logmgr.h 		__le32 page;	/* 4: log sequence page number */
page              132 fs/jfs/jfs_logmgr.h 		__le32 page;	/* 4: normally the same as h.page */
page              386 fs/jfs/jfs_logmgr.h 	int page;		/* 4: page number of eol page */
page              464 fs/jfs/jfs_logmgr.h 	struct page *l_page;	/* The page itself */
page               60 fs/jfs/jfs_metapage.c 			unlock_page(mp->page);
page               62 fs/jfs/jfs_metapage.c 			lock_page(mp->page);
page               91 fs/jfs/jfs_metapage.c #define mp_anchor(page) ((struct meta_anchor *)page_private(page))
page               95 fs/jfs/jfs_metapage.c 	if (!PagePrivate(page))
page               97 fs/jfs/jfs_metapage.c 	return mp_anchor(page)->mp[offset >> L2PSIZE];
page              106 fs/jfs/jfs_metapage.c 	if (PagePrivate(page))
page              107 fs/jfs/jfs_metapage.c 		a = mp_anchor(page);
page              112 fs/jfs/jfs_metapage.c 		set_page_private(page, (unsigned long)a);
page              113 fs/jfs/jfs_metapage.c 		SetPagePrivate(page);
page              114 fs/jfs/jfs_metapage.c 		kmap(page);
page              118 fs/jfs/jfs_metapage.c 		l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
page              129 fs/jfs/jfs_metapage.c 	struct meta_anchor *a = mp_anchor(page);
page              130 fs/jfs/jfs_metapage.c 	int l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
page              140 fs/jfs/jfs_metapage.c 		set_page_private(page, 0);
page              141 fs/jfs/jfs_metapage.c 		ClearPagePrivate(page);
page              142 fs/jfs/jfs_metapage.c 		kunmap(page);
page              148 fs/jfs/jfs_metapage.c 	atomic_inc(&mp_anchor(page)->io_count);
page              153 fs/jfs/jfs_metapage.c 	if (atomic_dec_and_test(&mp_anchor(page)->io_count))
page              154 fs/jfs/jfs_metapage.c 		handler(page);
page              160 fs/jfs/jfs_metapage.c 	return PagePrivate(page) ? (struct metapage *)page_private(page) : NULL;
page              166 fs/jfs/jfs_metapage.c 		set_page_private(page, (unsigned long)mp);
page              167 fs/jfs/jfs_metapage.c 		SetPagePrivate(page);
page              168 fs/jfs/jfs_metapage.c 		kmap(page);
page              175 fs/jfs/jfs_metapage.c 	set_page_private(page, 0);
page              176 fs/jfs/jfs_metapage.c 	ClearPagePrivate(page);
page              177 fs/jfs/jfs_metapage.c 	kunmap(page);
page              181 fs/jfs/jfs_metapage.c #define dec_io(page, handler) handler(page)
page              244 fs/jfs/jfs_metapage.c 	remove_metapage(page, mp);
page              280 fs/jfs/jfs_metapage.c 	if (!PageError(page))
page              281 fs/jfs/jfs_metapage.c 		SetPageUptodate(page);
page              282 fs/jfs/jfs_metapage.c 	unlock_page(page);
page              287 fs/jfs/jfs_metapage.c 	struct page *page = bio->bi_private;
page              291 fs/jfs/jfs_metapage.c 		SetPageError(page);
page              294 fs/jfs/jfs_metapage.c 	dec_io(page, last_read_complete);
page              326 fs/jfs/jfs_metapage.c 		mp = page_to_mp(page, offset);
page              337 fs/jfs/jfs_metapage.c 	end_page_writeback(page);
page              342 fs/jfs/jfs_metapage.c 	struct page *page = bio->bi_private;
page              344 fs/jfs/jfs_metapage.c 	BUG_ON(!PagePrivate(page));
page              348 fs/jfs/jfs_metapage.c 		SetPageError(page);
page              350 fs/jfs/jfs_metapage.c 	dec_io(page, last_write_complete);
page              358 fs/jfs/jfs_metapage.c 	struct inode *inode = page->mapping->host;
page              373 fs/jfs/jfs_metapage.c 	page_start = (sector_t)page->index <<
page              375 fs/jfs/jfs_metapage.c 	BUG_ON(!PageLocked(page));
page              376 fs/jfs/jfs_metapage.c 	BUG_ON(PageWriteback(page));
page              377 fs/jfs/jfs_metapage.c 	set_page_writeback(page);
page              380 fs/jfs/jfs_metapage.c 		mp = page_to_mp(page, offset);
page              409 fs/jfs/jfs_metapage.c 			if (bio_add_page(bio, page, bio_bytes, bio_offset) <
page              416 fs/jfs/jfs_metapage.c 			inc_io(page);
page              423 fs/jfs/jfs_metapage.c 			inc_io(page);
page              429 fs/jfs/jfs_metapage.c 			dec_io(page, last_write_complete);
page              439 fs/jfs/jfs_metapage.c 		bio->bi_private = page;
page              449 fs/jfs/jfs_metapage.c 		if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes)
page              458 fs/jfs/jfs_metapage.c 		redirty_page_for_writepage(wbc, page);
page              460 fs/jfs/jfs_metapage.c 	unlock_page(page);
page              463 fs/jfs/jfs_metapage.c 		end_page_writeback(page);
page              475 fs/jfs/jfs_metapage.c 	unlock_page(page);
page              476 fs/jfs/jfs_metapage.c 	dec_io(page, last_write_complete);
page              483 fs/jfs/jfs_metapage.c 	struct inode *inode = page->mapping->host;
page              493 fs/jfs/jfs_metapage.c 	BUG_ON(!PageLocked(page));
page              494 fs/jfs/jfs_metapage.c 	page_start = (sector_t)page->index <<
page              503 fs/jfs/jfs_metapage.c 			if (!PagePrivate(page))
page              504 fs/jfs/jfs_metapage.c 				insert_metapage(page, NULL);
page              505 fs/jfs/jfs_metapage.c 			inc_io(page);
page              513 fs/jfs/jfs_metapage.c 			bio->bi_private = page;
page              516 fs/jfs/jfs_metapage.c 			if (bio_add_page(bio, page, len, offset) < len)
page              525 fs/jfs/jfs_metapage.c 		unlock_page(page);
page              532 fs/jfs/jfs_metapage.c 	dec_io(page, last_read_complete);
page              543 fs/jfs/jfs_metapage.c 		mp = page_to_mp(page, offset);
page              558 fs/jfs/jfs_metapage.c 		remove_metapage(page, mp);
page              569 fs/jfs/jfs_metapage.c 	BUG_ON(PageWriteback(page));
page              571 fs/jfs/jfs_metapage.c 	metapage_releasepage(page, 0);
page              591 fs/jfs/jfs_metapage.c 	struct page *page;
page              622 fs/jfs/jfs_metapage.c 		page = grab_cache_page(mapping, page_index);
page              623 fs/jfs/jfs_metapage.c 		if (!page) {
page              627 fs/jfs/jfs_metapage.c 		SetPageUptodate(page);
page              629 fs/jfs/jfs_metapage.c 		page = read_mapping_page(mapping, page_index, NULL);
page              630 fs/jfs/jfs_metapage.c 		if (IS_ERR(page) || !PageUptodate(page)) {
page              634 fs/jfs/jfs_metapage.c 		lock_page(page);
page              637 fs/jfs/jfs_metapage.c 	mp = page_to_mp(page, page_offset);
page              662 fs/jfs/jfs_metapage.c 		mp->page = page;
page              668 fs/jfs/jfs_metapage.c 		mp->data = page_address(page) + page_offset;
page              670 fs/jfs/jfs_metapage.c 		if (unlikely(insert_metapage(page, mp))) {
page              682 fs/jfs/jfs_metapage.c 	unlock_page(page);
page              687 fs/jfs/jfs_metapage.c 	unlock_page(page);
page              694 fs/jfs/jfs_metapage.c 	page_cache_get(mp->page);
page              695 fs/jfs/jfs_metapage.c 	lock_page(mp->page);
page              698 fs/jfs/jfs_metapage.c 	unlock_page(mp->page);
page              703 fs/jfs/jfs_metapage.c 	struct page *page = mp->page;
page              707 fs/jfs/jfs_metapage.c 	page_cache_get(page);
page              708 fs/jfs/jfs_metapage.c 	lock_page(page);
page              709 fs/jfs/jfs_metapage.c 	set_page_dirty(page);
page              710 fs/jfs/jfs_metapage.c 	write_one_page(page, 1);
page              712 fs/jfs/jfs_metapage.c 	page_cache_release(page);
page              717 fs/jfs/jfs_metapage.c 	lock_page(mp->page);
page              724 fs/jfs/jfs_metapage.c 		unlock_page(mp->page);
page              727 fs/jfs/jfs_metapage.c 	page_cache_get(mp->page);
page              730 fs/jfs/jfs_metapage.c 	unlock_page(mp->page);
page              736 fs/jfs/jfs_metapage.c 	struct page *page = mp->page;
page              739 fs/jfs/jfs_metapage.c 	BUG_ON(!page);
page              741 fs/jfs/jfs_metapage.c 	lock_page(page);
page              746 fs/jfs/jfs_metapage.c 		unlock_page(page);
page              747 fs/jfs/jfs_metapage.c 		page_cache_release(page);
page              752 fs/jfs/jfs_metapage.c 		set_page_dirty(page);
page              755 fs/jfs/jfs_metapage.c 			write_one_page(page, 1);
page              756 fs/jfs/jfs_metapage.c 			lock_page(page); /* write_one_page unlocks the page */
page              762 fs/jfs/jfs_metapage.c 	drop_metapage(page, mp);
page              764 fs/jfs/jfs_metapage.c 	unlock_page(page);
page              765 fs/jfs/jfs_metapage.c 	page_cache_release(page);
page              777 fs/jfs/jfs_metapage.c 	struct page *page;
page              786 fs/jfs/jfs_metapage.c 		page = find_lock_page(mapping, lblock >> l2BlocksPerPage);
page              787 fs/jfs/jfs_metapage.c 		if (!page)
page              790 fs/jfs/jfs_metapage.c 			mp = page_to_mp(page, offset);
page              803 fs/jfs/jfs_metapage.c 		unlock_page(page);
page              804 fs/jfs/jfs_metapage.c 		page_cache_release(page);
page               40 fs/jfs/jfs_metapage.h 	struct page *page;
page              106 fs/jfs/jfs_metapage.h 	struct page *page = mp->page;
page              107 fs/jfs/jfs_metapage.h 	lock_page(page);
page              110 fs/jfs/jfs_metapage.h 		page_cache_get(page);
page              111 fs/jfs/jfs_metapage.h 		wait_on_page_writeback(page);
page              113 fs/jfs/jfs_metapage.h 	unlock_page(page);
page              123 fs/jfs/jfs_metapage.h 		wait_on_page_writeback(mp->page);
page              132 fs/jfs/jfs_metapage.h 		page_cache_release(mp->page);
page              333 fs/libfs.c     	clear_highpage(page);
page              334 fs/libfs.c     	flush_dcache_page(page);
page              335 fs/libfs.c     	SetPageUptodate(page);
page              336 fs/libfs.c     	unlock_page(page);
page              343 fs/libfs.c     	if (!PageUptodate(page)) {
page              345 fs/libfs.c     			zero_user_segments(page,
page              356 fs/libfs.c     	struct page *page;
page              363 fs/libfs.c     	page = __grab_cache_page(mapping, index);
page              364 fs/libfs.c     	if (!page)
page              367 fs/libfs.c     	*pagep = page;
page              369 fs/libfs.c     	return simple_prepare_write(file, page, from, from+len);
page              375 fs/libfs.c     	struct inode *inode = page->mapping->host;
page              376 fs/libfs.c     	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
page              378 fs/libfs.c     	if (!PageUptodate(page))
page              379 fs/libfs.c     		SetPageUptodate(page);
page              386 fs/libfs.c     	set_page_dirty(page);
page              398 fs/libfs.c     		void *kaddr = kmap_atomic(page, KM_USER0);
page              400 fs/libfs.c     		flush_dcache_page(page);
page              404 fs/libfs.c     	simple_commit_write(file, page, from, from+copied);
page              406 fs/libfs.c     	unlock_page(page);
page              407 fs/libfs.c     	page_cache_release(page);
page               30 fs/minix/dir.c 	kunmap(page);
page               31 fs/minix/dir.c 	page_cache_release(page);
page               55 fs/minix/dir.c 	struct address_space *mapping = page->mapping;
page               58 fs/minix/dir.c 	block_write_end(NULL, mapping, pos, len, len, page, NULL);
page               65 fs/minix/dir.c 		err = write_one_page(page, 1);
page               67 fs/minix/dir.c 		unlock_page(page);
page               71 fs/minix/dir.c static struct page * dir_get_page(struct inode *dir, unsigned long n)
page               74 fs/minix/dir.c 	struct page *page = read_mapping_page(mapping, n, NULL);
page               75 fs/minix/dir.c 	if (!IS_ERR(page)) {
page               76 fs/minix/dir.c 		kmap(page);
page               77 fs/minix/dir.c 		if (!PageUptodate(page))
page               80 fs/minix/dir.c 	return page;
page               83 fs/minix/dir.c 	dir_put_page(page);
page              113 fs/minix/dir.c 		struct page *page = dir_get_page(inode, n);
page              115 fs/minix/dir.c 		if (IS_ERR(page))
page              117 fs/minix/dir.c 		kaddr = (char *)page_address(page);
page              139 fs/minix/dir.c 					dir_put_page(page);
page              144 fs/minix/dir.c 		dir_put_page(page);
page              178 fs/minix/dir.c 	struct page *page = NULL;
page              188 fs/minix/dir.c 		page = dir_get_page(dir, n);
page              189 fs/minix/dir.c 		if (IS_ERR(page))
page              192 fs/minix/dir.c 		kaddr = (char*)page_address(page);
page              209 fs/minix/dir.c 		dir_put_page(page);
page              214 fs/minix/dir.c 	*res_page = page;
page              225 fs/minix/dir.c 	struct page *page = NULL;
page              244 fs/minix/dir.c 		page = dir_get_page(dir, n);
page              245 fs/minix/dir.c 		err = PTR_ERR(page);
page              246 fs/minix/dir.c 		if (IS_ERR(page))
page              248 fs/minix/dir.c 		lock_page(page);
page              249 fs/minix/dir.c 		kaddr = (char*)page_address(page);
page              276 fs/minix/dir.c 		unlock_page(page);
page              277 fs/minix/dir.c 		dir_put_page(page);
page              283 fs/minix/dir.c 	pos = (page->index >> PAGE_CACHE_SHIFT) + p - (char*)page_address(page);
page              284 fs/minix/dir.c 	err = __minix_write_begin(NULL, page->mapping, pos, sbi->s_dirsize,
page              285 fs/minix/dir.c 					AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
page              296 fs/minix/dir.c 	err = dir_commit_chunk(page, pos, sbi->s_dirsize);
page              300 fs/minix/dir.c 	dir_put_page(page);
page              304 fs/minix/dir.c 	unlock_page(page);
page              310 fs/minix/dir.c 	struct address_space *mapping = page->mapping;
page              312 fs/minix/dir.c 	char *kaddr = page_address(page);
page              313 fs/minix/dir.c 	loff_t pos = page_offset(page) + (char*)de - kaddr;
page              317 fs/minix/dir.c 	lock_page(page);
page              319 fs/minix/dir.c 					AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
page              322 fs/minix/dir.c 		err = dir_commit_chunk(page, pos, len);
page              324 fs/minix/dir.c 		unlock_page(page);
page              326 fs/minix/dir.c 	dir_put_page(page);
page              335 fs/minix/dir.c 	struct page *page = grab_cache_page(mapping, 0);
page              340 fs/minix/dir.c 	if (!page)
page              343 fs/minix/dir.c 					AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
page              345 fs/minix/dir.c 		unlock_page(page);
page              349 fs/minix/dir.c 	kaddr = kmap_atomic(page, KM_USER0);
page              371 fs/minix/dir.c 	err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize);
page              373 fs/minix/dir.c 	page_cache_release(page);
page              382 fs/minix/dir.c 	struct page *page = NULL;
page              391 fs/minix/dir.c 		page = dir_get_page(inode, i);
page              392 fs/minix/dir.c 		if (IS_ERR(page))
page              395 fs/minix/dir.c 		kaddr = (char *)page_address(page);
page              421 fs/minix/dir.c 		dir_put_page(page);
page              426 fs/minix/dir.c 	dir_put_page(page);
page              434 fs/minix/dir.c 	struct address_space *mapping = page->mapping;
page              437 fs/minix/dir.c 	loff_t pos = page_offset(page) +
page              438 fs/minix/dir.c 			(char *)de-(char*)page_address(page);
page              441 fs/minix/dir.c 	lock_page(page);
page              444 fs/minix/dir.c 					AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
page              447 fs/minix/dir.c 		err = dir_commit_chunk(page, pos, sbi->s_dirsize);
page              449 fs/minix/dir.c 		unlock_page(page);
page              451 fs/minix/dir.c 	dir_put_page(page);
page              458 fs/minix/dir.c 	struct page *page = dir_get_page(dir, 0);
page              462 fs/minix/dir.c 	if (!IS_ERR(page)) {
page              463 fs/minix/dir.c 		de = minix_next_entry(page_address(page), sbi);
page              464 fs/minix/dir.c 		*p = page;
page              471 fs/minix/dir.c 	struct page *page;
page              472 fs/minix/dir.c 	struct minix_dir_entry *de = minix_find_entry(dentry, &page);
page              477 fs/minix/dir.c 		dir_put_page(page);
page              347 fs/minix/inode.c 	return block_write_full_page(page, minix_get_block, wbc);
page              352 fs/minix/inode.c 	return block_read_full_page(page,minix_get_block);
page              156 fs/minix/namei.c 	struct page * page;
page              159 fs/minix/namei.c 	de = minix_find_entry(dentry, &page);
page              163 fs/minix/namei.c 	err = minix_delete_entry(de, page);
page              194 fs/minix/namei.c 	struct page * dir_page = NULL;
page              196 fs/minix/namei.c 	struct page * old_page;
page              212 fs/minix/namei.c 		struct page * new_page;
page               48 fs/mpage.c     		struct page *page = bvec->bv_page;
page               54 fs/mpage.c     			SetPageUptodate(page);
page               56 fs/mpage.c     			ClearPageUptodate(page);
page               57 fs/mpage.c     			SetPageError(page);
page               59 fs/mpage.c     		unlock_page(page);
page               70 fs/mpage.c     		struct page *page = bvec->bv_page;
page               76 fs/mpage.c     			SetPageError(page);
page               77 fs/mpage.c     			if (page->mapping)
page               78 fs/mpage.c     				set_bit(AS_EIO, &page->mapping->flags);
page               80 fs/mpage.c     		end_page_writeback(page);
page              129 fs/mpage.c     	struct inode *inode = page->mapping->host;
page              133 fs/mpage.c     	if (!page_has_buffers(page)) {
page              140 fs/mpage.c     			SetPageUptodate(page);    
page              143 fs/mpage.c     		create_empty_buffers(page, 1 << inode->i_blkbits, 0);
page              145 fs/mpage.c     	head = page_buffers(page);
page              173 fs/mpage.c     	struct inode *inode = page->mapping->host;
page              189 fs/mpage.c     	if (page_has_buffers(page))
page              192 fs/mpage.c     	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
page              226 fs/mpage.c     	map_bh->b_page = page;
page              255 fs/mpage.c     			map_buffer_to_page(page, map_bh, page_block);
page              280 fs/mpage.c     		zero_user_segment(page, first_hole << blkbits, PAGE_CACHE_SIZE);
page              282 fs/mpage.c     			SetPageUptodate(page);
page              283 fs/mpage.c     			unlock_page(page);
page              287 fs/mpage.c     		SetPageMappedToDisk(page);
page              306 fs/mpage.c     	if (bio_add_page(bio, page, length, 0) < length) {
page              321 fs/mpage.c     	if (!PageUptodate(page))
page              322 fs/mpage.c     	        block_read_full_page(page, get_block);
page              324 fs/mpage.c     		unlock_page(page);
page              383 fs/mpage.c     		struct page *page = list_entry(pages->prev, struct page, lru);
page              385 fs/mpage.c     		prefetchw(&page->flags);
page              386 fs/mpage.c     		list_del(&page->lru);
page              387 fs/mpage.c     		if (!add_to_page_cache_lru(page, mapping,
page              388 fs/mpage.c     					page->index, GFP_KERNEL)) {
page              389 fs/mpage.c     			bio = do_mpage_readpage(bio, page,
page              395 fs/mpage.c     		page_cache_release(page);
page              415 fs/mpage.c     	bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
page              445 fs/mpage.c     	struct address_space *mapping = page->mapping;
page              446 fs/mpage.c     	struct inode *inode = page->mapping->host;
page              464 fs/mpage.c     	if (page_has_buffers(page)) {
page              465 fs/mpage.c     		struct buffer_head *head = page_buffers(page);
page              517 fs/mpage.c     	BUG_ON(!PageUptodate(page));
page              518 fs/mpage.c     	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
page              520 fs/mpage.c     	map_bh.b_page = page;
page              551 fs/mpage.c     	if (page->index >= end_index) {
page              562 fs/mpage.c     		if (page->index > end_index || !offset)
page              564 fs/mpage.c     		zero_user_segment(page, offset, PAGE_CACHE_SIZE);
page              587 fs/mpage.c     	if (bio_add_page(bio, page, length, 0) < length) {
page              596 fs/mpage.c     	if (page_has_buffers(page)) {
page              597 fs/mpage.c     		struct buffer_head *head = page_buffers(page);
page              613 fs/mpage.c     		if (buffer_heads_over_limit && PageUptodate(page))
page              614 fs/mpage.c     			try_to_free_buffers(page);
page              617 fs/mpage.c     	BUG_ON(PageWriteback(page));
page              618 fs/mpage.c     	set_page_writeback(page);
page              619 fs/mpage.c     	unlock_page(page);
page              636 fs/mpage.c     		ret = mapping->a_ops->writepage(page, wbc);
page              703 fs/mpage.c     	int ret = __mpage_writepage(page, wbc, &mpd);
page              130 fs/namei.c     	retval = strncpy_from_user(page, filename, len);
page             2762 fs/namei.c     	struct page * page;
page             2764 fs/namei.c     	page = read_mapping_page(mapping, 0, NULL);
page             2765 fs/namei.c     	if (IS_ERR(page))
page             2766 fs/namei.c     		return (char*)page;
page             2767 fs/namei.c     	*ppage = page;
page             2768 fs/namei.c     	return kmap(page);
page             2773 fs/namei.c     	struct page *page = NULL;
page             2774 fs/namei.c     	char *s = page_getlink(dentry, &page);
page             2776 fs/namei.c     	if (page) {
page             2777 fs/namei.c     		kunmap(page);
page             2778 fs/namei.c     		page_cache_release(page);
page             2785 fs/namei.c     	struct page *page = NULL;
page             2786 fs/namei.c     	nd_set_link(nd, page_getlink(dentry, &page));
page             2787 fs/namei.c     	return page;
page             2792 fs/namei.c     	struct page *page = cookie;
page             2794 fs/namei.c     	if (page) {
page             2795 fs/namei.c     		kunmap(page);
page             2796 fs/namei.c     		page_cache_release(page);
page             2804 fs/namei.c     	struct page *page;
page             2811 fs/namei.c     				AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata);
page             2815 fs/namei.c     	kaddr = kmap_atomic(page, KM_USER0);
page             2820 fs/namei.c     							page, fsdata);
page             1858 fs/namespace.c 	unsigned long page;
page             1865 fs/namespace.c 	if (!(page = __get_free_page(GFP_KERNEL)))
page             1877 fs/namespace.c 	i = size - exact_copy_from_user((void *)page, data, size);
page             1879 fs/namespace.c 		free_page(page);
page             1883 fs/namespace.c 		memset((char *)page + i, 0, PAGE_SIZE - i);
page             1884 fs/namespace.c 	*where = page;
page              407 fs/ncpfs/dir.c 	struct page *page = NULL;
page              416 fs/ncpfs/dir.c 	ctl.page  = NULL;
page              439 fs/ncpfs/dir.c 	page = grab_cache_page(&inode->i_data, 0);
page              440 fs/ncpfs/dir.c 	if (!page)
page              443 fs/ncpfs/dir.c 	ctl.cache = cache = kmap(page);
page              446 fs/ncpfs/dir.c 	if (!PageUptodate(page) || !ctl.head.eof)
page              468 fs/ncpfs/dir.c 			ctl.page = find_lock_page(&inode->i_data, ctl.ofs);
page              469 fs/ncpfs/dir.c 			if (!ctl.page)
page              471 fs/ncpfs/dir.c 			ctl.cache = kmap(ctl.page);
page              472 fs/ncpfs/dir.c 			if (!PageUptodate(ctl.page))
page              494 fs/ncpfs/dir.c 		if (ctl.page) {
page              495 fs/ncpfs/dir.c 			kunmap(ctl.page);
page              496 fs/ncpfs/dir.c 			SetPageUptodate(ctl.page);
page              497 fs/ncpfs/dir.c 			unlock_page(ctl.page);
page              498 fs/ncpfs/dir.c 			page_cache_release(ctl.page);
page              499 fs/ncpfs/dir.c 			ctl.page = NULL;
page              505 fs/ncpfs/dir.c 	if (ctl.page) {
page              506 fs/ncpfs/dir.c 		kunmap(ctl.page);
page              507 fs/ncpfs/dir.c 		unlock_page(ctl.page);
page              508 fs/ncpfs/dir.c 		page_cache_release(ctl.page);
page              509 fs/ncpfs/dir.c 		ctl.page = NULL;
page              535 fs/ncpfs/dir.c 	if (page) {
page              537 fs/ncpfs/dir.c 		kunmap(page);
page              538 fs/ncpfs/dir.c 		SetPageUptodate(page);
page              539 fs/ncpfs/dir.c 		unlock_page(page);
page              540 fs/ncpfs/dir.c 		page_cache_release(page);
page              542 fs/ncpfs/dir.c 	if (ctl.page) {
page              543 fs/ncpfs/dir.c 		kunmap(ctl.page);
page              544 fs/ncpfs/dir.c 		SetPageUptodate(ctl.page);
page              545 fs/ncpfs/dir.c 		unlock_page(ctl.page);
page              546 fs/ncpfs/dir.c 		page_cache_release(ctl.page);
page              611 fs/ncpfs/dir.c 		if (ctl.page) {
page              612 fs/ncpfs/dir.c 			kunmap(ctl.page);
page              613 fs/ncpfs/dir.c 			SetPageUptodate(ctl.page);
page              614 fs/ncpfs/dir.c 			unlock_page(ctl.page);
page              615 fs/ncpfs/dir.c 			page_cache_release(ctl.page);
page              620 fs/ncpfs/dir.c 		ctl.page  = grab_cache_page(&inode->i_data, ctl.ofs);
page              621 fs/ncpfs/dir.c 		if (ctl.page)
page              622 fs/ncpfs/dir.c 			ctl.cache = kmap(ctl.page);
page               46 fs/ncpfs/mmap.c 	vmf->page = alloc_page(GFP_HIGHUSER);
page               47 fs/ncpfs/mmap.c 	if (!vmf->page)
page               49 fs/ncpfs/mmap.c 	pg_addr = kmap(vmf->page);
page               86 fs/ncpfs/mmap.c 	flush_dcache_page(vmf->page);
page               87 fs/ncpfs/mmap.c 	kunmap(vmf->page);
page              249 fs/ncpfs/ncplib_kernel.h 	struct	page			*page;
page               45 fs/ncpfs/symlink.c 	struct inode *inode = page->mapping->host;
page               48 fs/ncpfs/symlink.c 	char *buf = kmap(page);
page               83 fs/ncpfs/symlink.c 	SetPageUptodate(page);
page               84 fs/ncpfs/symlink.c 	kunmap(page);
page               85 fs/ncpfs/symlink.c 	unlock_page(page);
page               92 fs/ncpfs/symlink.c 	SetPageError(page);
page               93 fs/ncpfs/symlink.c 	kunmap(page);
page               94 fs/ncpfs/symlink.c 	unlock_page(page);
page              150 fs/nfs/dir.c   	struct page	*page;
page              185 fs/nfs/dir.c   			page->index);
page              189 fs/nfs/dir.c   	error = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred, desc->entry->cookie, page,
page              203 fs/nfs/dir.c   	SetPageUptodate(page);
page              208 fs/nfs/dir.c   	if (invalidate_inode_pages2_range(inode->i_mapping, page->index + 1, -1) < 0) {
page              212 fs/nfs/dir.c   	unlock_page(page);
page              215 fs/nfs/dir.c   	unlock_page(page);
page              237 fs/nfs/dir.c   	kunmap(desc->page);
page              238 fs/nfs/dir.c   	page_cache_release(desc->page);
page              239 fs/nfs/dir.c   	desc->page = NULL;
page              315 fs/nfs/dir.c   	struct page	*page;
page              326 fs/nfs/dir.c   	page = read_cache_page(inode->i_mapping, desc->page_index,
page              328 fs/nfs/dir.c   	if (IS_ERR(page)) {
page              329 fs/nfs/dir.c   		status = PTR_ERR(page);
page              334 fs/nfs/dir.c   	desc->page = page;
page              335 fs/nfs/dir.c   	desc->ptr = kmap(page);		/* matching kunmap in nfs_do_filldir */
page              472 fs/nfs/dir.c   	struct page	*page = NULL;
page              479 fs/nfs/dir.c   	page = alloc_page(GFP_HIGHUSER);
page              480 fs/nfs/dir.c   	if (!page) {
page              486 fs/nfs/dir.c   						*desc->dir_cookie, page,
page              489 fs/nfs/dir.c   	desc->page = page;
page              490 fs/nfs/dir.c   	desc->ptr = kmap(page);		/* matching kunmap in nfs_do_filldir */
page             1467 fs/nfs/dir.c   	struct page *page;
page             1482 fs/nfs/dir.c   	page = alloc_page(GFP_HIGHUSER);
page             1483 fs/nfs/dir.c   	if (!page)
page             1486 fs/nfs/dir.c   	kaddr = kmap_atomic(page, KM_USER0);
page             1492 fs/nfs/dir.c   	error = NFS_PROTO(dir)->symlink(dir, dentry, page, pathlen, &attr);
page             1498 fs/nfs/dir.c   		__free_page(page);
page             1507 fs/nfs/dir.c   	if (!add_to_page_cache(page, dentry->d_inode->i_mapping, 0,
page             1509 fs/nfs/dir.c   		pagevec_add(&lru_pvec, page);
page             1511 fs/nfs/dir.c   		SetPageUptodate(page);
page             1512 fs/nfs/dir.c   		unlock_page(page);
page             1514 fs/nfs/dir.c   		__free_page(page);
page              135 fs/nfs/direct.c 		struct page *page = pages[i];
page              136 fs/nfs/direct.c 		if (!PageCompound(page))
page              137 fs/nfs/direct.c 			set_page_dirty(page);
page              346 fs/nfs/file.c  	struct page *page;
page              354 fs/nfs/file.c  	page = __grab_cache_page(mapping, index);
page              355 fs/nfs/file.c  	if (!page)
page              357 fs/nfs/file.c  	*pagep = page;
page              359 fs/nfs/file.c  	ret = nfs_flush_incompatible(file, page);
page              361 fs/nfs/file.c  		unlock_page(page);
page              362 fs/nfs/file.c  		page_cache_release(page);
page              383 fs/nfs/file.c  	if (!PageUptodate(page)) {
page              384 fs/nfs/file.c  		unsigned pglen = nfs_page_length(page);
page              388 fs/nfs/file.c  			zero_user_segments(page, 0, offset,
page              390 fs/nfs/file.c  			SetPageUptodate(page);
page              392 fs/nfs/file.c  			zero_user_segment(page, end, PAGE_CACHE_SIZE);
page              394 fs/nfs/file.c  				SetPageUptodate(page);
page              396 fs/nfs/file.c  			zero_user_segment(page, pglen, PAGE_CACHE_SIZE);
page              399 fs/nfs/file.c  	status = nfs_updatepage(file, page, offset, copied);
page              401 fs/nfs/file.c  	unlock_page(page);
page              402 fs/nfs/file.c  	page_cache_release(page);
page              411 fs/nfs/file.c  	dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %lu)\n", page, offset);
page              416 fs/nfs/file.c  	nfs_wb_page_cancel(page->mapping->host, page);
page              421 fs/nfs/file.c  	dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
page              429 fs/nfs/file.c  	struct inode *inode = page->mapping->host;
page              432 fs/nfs/file.c  		inode->i_ino, (long long)page_offset(page));
page              434 fs/nfs/file.c  	return nfs_wb_page(inode, page);
page              462 fs/nfs/file.c  		(long long)page_offset(page));
page              464 fs/nfs/file.c  	lock_page(page);
page              465 fs/nfs/file.c  	mapping = page->mapping;
page              470 fs/nfs/file.c  	pagelen = nfs_page_length(page);
page              474 fs/nfs/file.c  	ret = nfs_flush_incompatible(filp, page);
page              478 fs/nfs/file.c  	ret = nfs_updatepage(filp, page, 0, pagelen);
page              482 fs/nfs/file.c  	unlock_page(page);
page              256 fs/nfs/internal.h 	loff_t i_size = i_size_read(page->mapping->host);
page              260 fs/nfs/internal.h 		if (page->index < end_index)
page              262 fs/nfs/internal.h 		if (page->index == end_index)
page              227 fs/nfs/namespace.c 	char *page = (char *) __get_free_page(GFP_USER);
page              235 fs/nfs/namespace.c 	if (page == NULL)
page              237 fs/nfs/namespace.c 	devname = nfs_devname(mnt_parent, dentry, page, PAGE_SIZE);
page              243 fs/nfs/namespace.c 	free_page((unsigned long)page);
page              427 fs/nfs/nfs2xdr.c 	struct page **page;
page              451 fs/nfs/nfs2xdr.c 	page = rcvbuf->pages;
page              452 fs/nfs/nfs2xdr.c 	kaddr = p = kmap_atomic(*page, KM_USER0);
page              188 fs/nfs/nfs3acl.c 	struct page *pages[NFSACL_MAXPAGES] = { };
page              294 fs/nfs/nfs3acl.c 	struct page *pages[NFSACL_MAXPAGES] = { };
page              236 fs/nfs/nfs3proc.c 		.pages		= &page
page              510 fs/nfs/nfs3proc.c 	data->arg.symlink.pages = &page;
page              600 fs/nfs/nfs3proc.c 		.pages		= &page
page              508 fs/nfs/nfs3xdr.c 	struct page **page;
page              541 fs/nfs/nfs3xdr.c 	page = rcvbuf->pages;
page              542 fs/nfs/nfs3xdr.c 	kaddr = p = kmap_atomic(*page, KM_USER0);
page               79 fs/nfs/nfs4namespace.c 	path = nfs4_path(mnt_parent, dentry, page, PAGE_SIZE);
page              130 fs/nfs/nfs4namespace.c 	char *page = NULL, *page2 = NULL;
page              140 fs/nfs/nfs4namespace.c 	page = (char *) __get_free_page(GFP_USER);
page              141 fs/nfs/nfs4namespace.c 	if (!page)
page              149 fs/nfs/nfs4namespace.c 	error = nfs4_validate_fspath(mnt_parent, dentry, locations, page, page2);
page              191 fs/nfs/nfs4namespace.c 			snprintf(page, PAGE_SIZE, "%s:%s",
page              195 fs/nfs/nfs4namespace.c 			mnt = vfs_kern_mount(&nfs4_referral_fs_type, 0, page, &mountdata);
page              205 fs/nfs/nfs4namespace.c 	free_page((unsigned long) page);
page              222 fs/nfs/nfs4namespace.c 	struct page *page;
page              228 fs/nfs/nfs4namespace.c 	page = alloc_page(GFP_KERNEL);
page              229 fs/nfs/nfs4namespace.c 	if (page == NULL)
page              243 fs/nfs/nfs4namespace.c 	err = nfs4_proc_fs_locations(parent->d_inode, &dentry->d_name, fs_locations, page);
page              252 fs/nfs/nfs4namespace.c 	__free_page(page);
page             1564 fs/nfs/nfs4proc.c 	struct page *page = NULL;
page             1567 fs/nfs/nfs4proc.c 	page = alloc_page(GFP_KERNEL);
page             1568 fs/nfs/nfs4proc.c 	if (page == NULL)
page             1574 fs/nfs/nfs4proc.c 	status = nfs4_proc_fs_locations(dir, name, locations, page);
page             1590 fs/nfs/nfs4proc.c 	if (page)
page             1591 fs/nfs/nfs4proc.c 		__free_page(page);
page             1839 fs/nfs/nfs4proc.c 		.pages    = &page,
page             1857 fs/nfs/nfs4proc.c 				_nfs4_proc_readlink(inode, page, pgbase, pglen),
page             2147 fs/nfs/nfs4proc.c 	data->arg.u.symlink.pages = &page;
page             2164 fs/nfs/nfs4proc.c 				_nfs4_proc_symlink(dir, dentry, page,
page             2207 fs/nfs/nfs4proc.c 		.pages = &page,
page             2245 fs/nfs/nfs4proc.c 					page, count, plus),
page             2622 fs/nfs/nfs4proc.c 	struct page *pages[NFS4ACL_MAXPAGES];
page             2635 fs/nfs/nfs4proc.c 	struct page *localpage = NULL;
page             2707 fs/nfs/nfs4proc.c 	struct page *pages[NFS4ACL_MAXPAGES];
page             3645 fs/nfs/nfs4proc.c 		.page = page,
page             2160 fs/nfs/nfs4xdr.c 	xdr_inline_pages(&req->rq_rcv_buf, replen, &args->page,
page             3495 fs/nfs/nfs4xdr.c 	struct page	*page = *rcvbuf->pages;
page             3521 fs/nfs/nfs4xdr.c 	kaddr = p = kmap_atomic(page, KM_USER0);
page               77 fs/nfs/pagelist.c 	req->wb_page    = page;
page               79 fs/nfs/pagelist.c 	req->wb_index	= page->index;
page               80 fs/nfs/pagelist.c 	page_cache_get(page);
page               81 fs/nfs/pagelist.c 	BUG_ON(PagePrivate(page));
page               82 fs/nfs/pagelist.c 	BUG_ON(!PageLocked(page));
page               83 fs/nfs/pagelist.c 	BUG_ON(page->mapping->host != inode);
page              150 fs/nfs/pagelist.c 	struct page *page = req->wb_page;
page              151 fs/nfs/pagelist.c 	if (page != NULL) {
page              152 fs/nfs/pagelist.c 		page_cache_release(page);
page              176 fs/nfs/proc.c  		.pages		= &page
page              368 fs/nfs/proc.c  		.pages		= &page,
page              470 fs/nfs/proc.c  		.pages		= &page,
page               51 fs/nfs/read.c  			p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
page               79 fs/nfs/read.c  	zero_user(page, 0, PAGE_CACHE_SIZE);
page               80 fs/nfs/read.c  	SetPageUptodate(page);
page               81 fs/nfs/read.c  	unlock_page(page);
page               90 fs/nfs/read.c  	struct page **pages;
page              121 fs/nfs/read.c  	len = nfs_page_length(page);
page              123 fs/nfs/read.c  		return nfs_return_empty_page(page);
page              124 fs/nfs/read.c  	new = nfs_create_request(ctx, inode, page, 0, len);
page              126 fs/nfs/read.c  		unlock_page(page);
page              130 fs/nfs/read.c  		zero_user_segment(page, len, PAGE_CACHE_SIZE);
page              240 fs/nfs/read.c  	struct page *page = req->wb_page;
page              263 fs/nfs/read.c  	ClearPageError(page);
page              272 fs/nfs/read.c  		data->pagevec[0] = page;
page              292 fs/nfs/read.c  	SetPageError(page);
page              300 fs/nfs/read.c  	struct page		**pages;
page              389 fs/nfs/read.c  	struct page *page = req->wb_page;
page              393 fs/nfs/read.c  		SetPageError(page);
page              396 fs/nfs/read.c  		if (!PageError(page))
page              397 fs/nfs/read.c  			SetPageUptodate(page);
page              412 fs/nfs/read.c  	struct page **pages;
page              480 fs/nfs/read.c  	struct inode *inode = page->mapping->host;
page              484 fs/nfs/read.c  		page, PAGE_CACHE_SIZE, page->index);
page              495 fs/nfs/read.c  	error = nfs_wb_page(inode, page);
page              498 fs/nfs/read.c  	if (PageUptodate(page))
page              513 fs/nfs/read.c  	error = nfs_readpage_async(ctx, inode, page);
page              518 fs/nfs/read.c  	unlock_page(page);
page              531 fs/nfs/read.c  	struct inode *inode = page->mapping->host;
page              536 fs/nfs/read.c  	error = nfs_wb_page(inode, page);
page              539 fs/nfs/read.c  	if (PageUptodate(page))
page              542 fs/nfs/read.c  	len = nfs_page_length(page);
page              544 fs/nfs/read.c  		return nfs_return_empty_page(page);
page              546 fs/nfs/read.c  	new = nfs_create_request(desc->ctx, inode, page, 0, len);
page              551 fs/nfs/read.c  		zero_user_segment(page, len, PAGE_CACHE_SIZE);
page              559 fs/nfs/read.c  	SetPageError(page);
page              561 fs/nfs/read.c  	unlock_page(page);
page               34 fs/nfs/symlink.c 	error = NFS_PROTO(inode)->readlink(inode, page, 0, PAGE_SIZE);
page               37 fs/nfs/symlink.c 	SetPageUptodate(page);
page               38 fs/nfs/symlink.c 	unlock_page(page);
page               42 fs/nfs/symlink.c 	SetPageError(page);
page               43 fs/nfs/symlink.c 	unlock_page(page);
page               50 fs/nfs/symlink.c 	struct page *page;
page               56 fs/nfs/symlink.c 	page = read_cache_page(&inode->i_data, 0,
page               58 fs/nfs/symlink.c 	if (IS_ERR(page)) {
page               59 fs/nfs/symlink.c 		err = page;
page               62 fs/nfs/symlink.c 	nd_set_link(nd, kmap(page));
page               63 fs/nfs/symlink.c 	return page;
page               77 fs/nfs/write.c 			p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
page              113 fs/nfs/write.c 	if (PagePrivate(page)) {
page              114 fs/nfs/write.c 		req = (struct nfs_page *)page_private(page);
page              123 fs/nfs/write.c 	struct inode *inode = page->mapping->host;
page              127 fs/nfs/write.c 	req = nfs_page_find_request_locked(page);
page              135 fs/nfs/write.c 	struct inode *inode = page->mapping->host;
page              142 fs/nfs/write.c 	if (i_size > 0 && page->index < end_index)
page              144 fs/nfs/write.c 	end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
page              156 fs/nfs/write.c 	SetPageError(page);
page              157 fs/nfs/write.c 	nfs_zap_mapping(page->mapping->host, page->mapping);
page              165 fs/nfs/write.c 	if (PageUptodate(page))
page              169 fs/nfs/write.c 	if (count != nfs_page_length(page))
page              171 fs/nfs/write.c 	SetPageUptodate(page);
page              195 fs/nfs/write.c 	int ret = test_set_page_writeback(page);
page              198 fs/nfs/write.c 		struct inode *inode = page->mapping->host;
page              210 fs/nfs/write.c 	struct inode *inode = page->mapping->host;
page              213 fs/nfs/write.c 	end_page_writeback(page);
page              225 fs/nfs/write.c 	struct inode *inode = page->mapping->host;
page              231 fs/nfs/write.c 		req = nfs_page_find_request_locked(page);
page              254 fs/nfs/write.c 	if (nfs_set_page_writeback(page) != 0) {
page              268 fs/nfs/write.c 	struct inode *inode = page->mapping->host;
page              273 fs/nfs/write.c 	nfs_pageio_cond_complete(pgio, page->index);
page              274 fs/nfs/write.c 	return nfs_page_async_flush(pgio, page);
page              285 fs/nfs/write.c 	nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc));
page              286 fs/nfs/write.c 	err = nfs_do_writepage(page, wbc, &pgio);
page              299 fs/nfs/write.c 	ret = nfs_writepage_locked(page, wbc);
page              300 fs/nfs/write.c 	unlock_page(page);
page              308 fs/nfs/write.c 	ret = nfs_do_writepage(page, wbc, data);
page              309 fs/nfs/write.c 	unlock_page(page);
page              421 fs/nfs/write.c 	struct page *page = req->wb_page;
page              424 fs/nfs/write.c 		dec_zone_page_state(page, NR_UNSTABLE_NFS);
page              425 fs/nfs/write.c 		dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE);
page              574 fs/nfs/write.c 	if (!PagePrivate(page))
page              581 fs/nfs/write.c 		req = nfs_page_find_request_locked(page);
page              627 fs/nfs/write.c 	error = nfs_wb_page(inode, page);
page              642 fs/nfs/write.c 	struct inode *inode = page->mapping->host;
page              646 fs/nfs/write.c 	req = nfs_try_to_update_request(inode, page, offset, bytes);
page              649 fs/nfs/write.c 	req = nfs_create_request(ctx, inode, page, offset, bytes);
page              666 fs/nfs/write.c 	req = nfs_setup_write_request(ctx, page, offset, count);
page              670 fs/nfs/write.c 	nfs_grow_file(page, offset, count);
page              671 fs/nfs/write.c 	nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
page              690 fs/nfs/write.c 		req = nfs_page_find_request(page);
page              693 fs/nfs/write.c 		do_flush = req->wb_page != page || req->wb_context != ctx;
page              697 fs/nfs/write.c 		status = nfs_wb_page(page->mapping->host, page);
page              709 fs/nfs/write.c 	return PageUptodate(page) &&
page              723 fs/nfs/write.c 	struct inode	*inode = page->mapping->host;
page              731 fs/nfs/write.c 		(long long)(page_offset(page) + offset));
page              738 fs/nfs/write.c 	if (nfs_write_pageuptodate(page, inode) &&
page              741 fs/nfs/write.c 		count = max(count + offset, nfs_page_length(page));
page              745 fs/nfs/write.c 	status = nfs_writepage_setup(ctx, page, offset, count);
page              747 fs/nfs/write.c 		nfs_set_pageerror(page);
page              749 fs/nfs/write.c 		__set_page_dirty_nobuffers(page);
page              868 fs/nfs/write.c 	struct page *page = req->wb_page;
page              891 fs/nfs/write.c 	ClearPageError(page);
page              900 fs/nfs/write.c 		data->pagevec[0] = page;
page              935 fs/nfs/write.c 	struct page		**pages;
page              995 fs/nfs/write.c 	struct page		*page = req->wb_page;
page              999 fs/nfs/write.c 		nfs_set_pageerror(page);
page             1006 fs/nfs/write.c 		struct inode *inode = page->mapping->host;
page             1056 fs/nfs/write.c 		struct page *page = req->wb_page;
page             1068 fs/nfs/write.c 			nfs_set_pageerror(page);
page             1077 fs/nfs/write.c 			nfs_end_page_writeback(page);
page             1083 fs/nfs/write.c 		nfs_end_page_writeback(page);
page             1458 fs/nfs/write.c 	loff_t range_start = page_offset(page);
page             1461 fs/nfs/write.c 		.bdi = page->mapping->backing_dev_info,
page             1469 fs/nfs/write.c 	BUG_ON(!PageLocked(page));
page             1471 fs/nfs/write.c 		req = nfs_page_find_request(page);
page             1484 fs/nfs/write.c 			cancel_dirty_page(page, PAGE_CACHE_SIZE);
page             1492 fs/nfs/write.c 	if (!PagePrivate(page))
page             1494 fs/nfs/write.c 	ret = nfs_sync_mapping_wait(page->mapping, &wbc, FLUSH_INVALIDATE);
page             1502 fs/nfs/write.c 	loff_t range_start = page_offset(page);
page             1505 fs/nfs/write.c 		.bdi = page->mapping->backing_dev_info,
page             1514 fs/nfs/write.c 		if (clear_page_dirty_for_io(page)) {
page             1515 fs/nfs/write.c 			ret = nfs_writepage_locked(page, &wbc);
page             1518 fs/nfs/write.c 		} else if (!PagePrivate(page))
page             1520 fs/nfs/write.c 		ret = nfs_sync_mapping_wait(page->mapping, &wbc, how);
page             1523 fs/nfs/write.c 	} while (PagePrivate(page));
page             1535 fs/nfs/write.c 	return nfs_wb_page_priority(inode, page, FLUSH_STABLE);
page             2300 fs/nfsd/nfs4xdr.c 	char *page;
page             2308 fs/nfsd/nfs4xdr.c 	page = page_address(resp->rqstp->rq_respages[resp->rqstp->rq_resused++]);
page             2319 fs/nfsd/nfs4xdr.c 	nfserr = nfsd_readlink(readlink->rl_rqstp, readlink->rl_fhp, page, &maxcount);
page             2349 fs/nfsd/nfs4xdr.c 	__be32 *page, *savep, *tailbase;
page             2382 fs/nfsd/nfs4xdr.c 	page = page_address(resp->rqstp->rq_respages[resp->rqstp->rq_resused++]);
page             2385 fs/nfsd/nfs4xdr.c 	readdir->buffer = page;
page             2394 fs/nfsd/nfs4xdr.c 	    readdir->buffer == page) 
page              840 fs/nfsd/vfs.c  	struct page **pp = rqstp->rq_respages + rqstp->rq_resused;
page              841 fs/nfsd/vfs.c  	struct page *page = buf->page;
page              852 fs/nfsd/vfs.c  		get_page(page);
page              854 fs/nfsd/vfs.c  		*pp = page;
page              858 fs/nfsd/vfs.c  	} else if (page != pp[-1]) {
page              859 fs/nfsd/vfs.c  		get_page(page);
page              862 fs/nfsd/vfs.c  		*pp = page;
page               62 fs/ntfs/aops.c 	struct page *page;
page               67 fs/ntfs/aops.c 	page = bh->b_page;
page               68 fs/ntfs/aops.c 	vi = page->mapping->host;
page               77 fs/ntfs/aops.c 		file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) +
page               96 fs/ntfs/aops.c 			kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ);
page               99 fs/ntfs/aops.c 			flush_dcache_page(page);
page              105 fs/ntfs/aops.c 		SetPageError(page);
page              109 fs/ntfs/aops.c 	first = page_buffers(page);
page              137 fs/ntfs/aops.c 		if (likely(page_uptodate && !PageError(page)))
page              138 fs/ntfs/aops.c 			SetPageUptodate(page);
page              149 fs/ntfs/aops.c 		kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ);
page              155 fs/ntfs/aops.c 		flush_dcache_page(page);
page              156 fs/ntfs/aops.c 		if (likely(page_uptodate && !PageError(page)))
page              157 fs/ntfs/aops.c 			SetPageUptodate(page);
page              159 fs/ntfs/aops.c 	unlock_page(page);
page              201 fs/ntfs/aops.c 	vi = page->mapping->host;
page              211 fs/ntfs/aops.c 	if (!page_has_buffers(page)) {
page              212 fs/ntfs/aops.c 		create_empty_buffers(page, blocksize, 0);
page              213 fs/ntfs/aops.c 		if (unlikely(!page_has_buffers(page))) {
page              214 fs/ntfs/aops.c 			unlock_page(page);
page              218 fs/ntfs/aops.c 	bh = head = page_buffers(page);
page              232 fs/ntfs/aops.c 	iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
page              322 fs/ntfs/aops.c 			SetPageError(page);
page              341 fs/ntfs/aops.c 		zero_user(page, i * blocksize, blocksize);
page              372 fs/ntfs/aops.c 	if (likely(!PageError(page)))
page              373 fs/ntfs/aops.c 		SetPageUptodate(page);
page              376 fs/ntfs/aops.c 	unlock_page(page);
page              411 fs/ntfs/aops.c 	BUG_ON(!PageLocked(page));
page              412 fs/ntfs/aops.c 	vi = page->mapping->host;
page              415 fs/ntfs/aops.c 	if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >>
page              417 fs/ntfs/aops.c 		zero_user(page, 0, PAGE_CACHE_SIZE);
page              425 fs/ntfs/aops.c 	if (PageUptodate(page)) {
page              426 fs/ntfs/aops.c 		unlock_page(page);
page              449 fs/ntfs/aops.c 			return ntfs_read_compressed_block(page);
page              455 fs/ntfs/aops.c 		return ntfs_read_block(page);
page              465 fs/ntfs/aops.c 	if (unlikely(page->index > 0)) {
page              466 fs/ntfs/aops.c 		zero_user(page, 0, PAGE_CACHE_SIZE);
page              506 fs/ntfs/aops.c 	addr = kmap_atomic(page, KM_USER0);
page              513 fs/ntfs/aops.c 	flush_dcache_page(page);
page              520 fs/ntfs/aops.c 	SetPageUptodate(page);
page              522 fs/ntfs/aops.c 	unlock_page(page);
page              568 fs/ntfs/aops.c 	vi = page->mapping->host;
page              573 fs/ntfs/aops.c 			"0x%lx.", ni->mft_no, ni->type, page->index);
page              579 fs/ntfs/aops.c 	if (!page_has_buffers(page)) {
page              580 fs/ntfs/aops.c 		BUG_ON(!PageUptodate(page));
page              581 fs/ntfs/aops.c 		create_empty_buffers(page, blocksize,
page              583 fs/ntfs/aops.c 		if (unlikely(!page_has_buffers(page))) {
page              591 fs/ntfs/aops.c 			redirty_page_for_writepage(wbc, page);
page              592 fs/ntfs/aops.c 			unlock_page(page);
page              596 fs/ntfs/aops.c 	bh = head = page_buffers(page);
page              602 fs/ntfs/aops.c 	block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
page              690 fs/ntfs/aops.c 			if (!PageUptodate(page)) {
page              748 fs/ntfs/aops.c 			kaddr = kmap_atomic(page, KM_USER0);
page              795 fs/ntfs/aops.c 			zero_user(page, bh_offset(bh), blocksize);
page              822 fs/ntfs/aops.c 	if (unlikely(!PageUptodate(page))) {
page              832 fs/ntfs/aops.c 			SetPageUptodate(page);
page              866 fs/ntfs/aops.c 			redirty_page_for_writepage(wbc, page);
page              869 fs/ntfs/aops.c 			SetPageError(page);
page              872 fs/ntfs/aops.c 	BUG_ON(PageWriteback(page));
page              873 fs/ntfs/aops.c 	set_page_writeback(page);	/* Keeps try_to_free_buffers() away. */
page              885 fs/ntfs/aops.c 	unlock_page(page);
page              889 fs/ntfs/aops.c 		end_page_writeback(page);
page              923 fs/ntfs/aops.c 	struct inode *vi = page->mapping->host;
page              938 fs/ntfs/aops.c 			"0x%lx.", vi->i_ino, ni->type, page->index);
page              960 fs/ntfs/aops.c 	bh = head = page_buffers(page);
page              969 fs/ntfs/aops.c 	rec_block = block = (sector_t)page->index <<
page             1119 fs/ntfs/aops.c 	kaddr = kmap(page);
page             1121 fs/ntfs/aops.c 	BUG_ON(!PageUptodate(page));
page             1122 fs/ntfs/aops.c 	ClearPageUptodate(page);
page             1136 fs/ntfs/aops.c 			mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs)
page             1175 fs/ntfs/aops.c 					ni->type, page->index, ofs);
page             1191 fs/ntfs/aops.c 	flush_dcache_page(page);
page             1223 fs/ntfs/aops.c 					page->index, bh_offset(tbh));
page             1252 fs/ntfs/aops.c 			mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs)
page             1272 fs/ntfs/aops.c 	flush_dcache_page(page);
page             1295 fs/ntfs/aops.c 	SetPageUptodate(page);
page             1296 fs/ntfs/aops.c 	kunmap(page);
page             1304 fs/ntfs/aops.c 			SetPageError(page);
page             1310 fs/ntfs/aops.c 				"record 0x%lx.", page->index <<
page             1312 fs/ntfs/aops.c 		redirty_page_for_writepage(wbc, page);
page             1313 fs/ntfs/aops.c 		unlock_page(page);
page             1320 fs/ntfs/aops.c 		BUG_ON(PageWriteback(page));
page             1321 fs/ntfs/aops.c 		set_page_writeback(page);
page             1322 fs/ntfs/aops.c 		unlock_page(page);
page             1323 fs/ntfs/aops.c 		end_page_writeback(page);
page             1356 fs/ntfs/aops.c 	struct inode *vi = page->mapping->host;
page             1365 fs/ntfs/aops.c 	BUG_ON(!PageLocked(page));
page             1368 fs/ntfs/aops.c 	if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >>
page             1374 fs/ntfs/aops.c 		block_invalidatepage(page, 0);
page             1375 fs/ntfs/aops.c 		unlock_page(page);
page             1390 fs/ntfs/aops.c 			unlock_page(page);
page             1401 fs/ntfs/aops.c 			unlock_page(page);
page             1408 fs/ntfs/aops.c 			unlock_page(page);
page             1417 fs/ntfs/aops.c 		if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) {
page             1420 fs/ntfs/aops.c 			zero_user_segment(page, ofs, PAGE_CACHE_SIZE);
page             1424 fs/ntfs/aops.c 			return ntfs_write_mst_block(page, wbc);
page             1426 fs/ntfs/aops.c 		return ntfs_write_block(page, wbc);
page             1436 fs/ntfs/aops.c 	BUG_ON(page_has_buffers(page));
page             1437 fs/ntfs/aops.c 	BUG_ON(!PageUptodate(page));
page             1438 fs/ntfs/aops.c 	if (unlikely(page->index > 0)) {
page             1440 fs/ntfs/aops.c 				"Aborting write.", page->index);
page             1441 fs/ntfs/aops.c 		BUG_ON(PageWriteback(page));
page             1442 fs/ntfs/aops.c 		set_page_writeback(page);
page             1443 fs/ntfs/aops.c 		unlock_page(page);
page             1444 fs/ntfs/aops.c 		end_page_writeback(page);
page             1480 fs/ntfs/aops.c 	BUG_ON(PageWriteback(page));
page             1481 fs/ntfs/aops.c 	set_page_writeback(page);
page             1482 fs/ntfs/aops.c 	unlock_page(page);
page             1497 fs/ntfs/aops.c 	addr = kmap_atomic(page, KM_USER0);
page             1505 fs/ntfs/aops.c 	flush_dcache_page(page);
page             1508 fs/ntfs/aops.c 	end_page_writeback(page);
page             1522 fs/ntfs/aops.c 		redirty_page_for_writepage(wbc, page);
page             1527 fs/ntfs/aops.c 		SetPageError(page);
page             1530 fs/ntfs/aops.c 	unlock_page(page);
page             1592 fs/ntfs/aops.c 	struct address_space *mapping = page->mapping;
page             1597 fs/ntfs/aops.c 	BUG_ON(!PageUptodate(page));
page             1601 fs/ntfs/aops.c 	if (unlikely(!page_has_buffers(page))) {
page             1603 fs/ntfs/aops.c 		bh = head = alloc_page_buffers(page, bh_size, 1);
page             1605 fs/ntfs/aops.c 		if (likely(!page_has_buffers(page))) {
page             1614 fs/ntfs/aops.c 			attach_page_buffers(page, head);
page             1618 fs/ntfs/aops.c 	bh = head = page_buffers(page);
page             1629 fs/ntfs/aops.c 	__set_page_dirty_nobuffers(page);
page               42 fs/ntfs/aops.h 	kunmap(page);
page               43 fs/ntfs/aops.h 	page_cache_release(page);
page               86 fs/ntfs/aops.h static inline struct page *ntfs_map_page(struct address_space *mapping,
page               89 fs/ntfs/aops.h 	struct page *page = read_mapping_page(mapping, index, NULL);
page               91 fs/ntfs/aops.h 	if (!IS_ERR(page)) {
page               92 fs/ntfs/aops.h 		kmap(page);
page               93 fs/ntfs/aops.h 		if (!PageError(page))
page               94 fs/ntfs/aops.h 			return page;
page               95 fs/ntfs/aops.h 		ntfs_unmap_page(page);
page               98 fs/ntfs/aops.h 	return page;
page               91 fs/ntfs/attrib.c 	struct page *put_this_page = NULL;
page              153 fs/ntfs/attrib.c 				put_this_page = old_ctx.ntfs_ino->page;
page             1543 fs/ntfs/attrib.c 	struct page *page;
page             1579 fs/ntfs/attrib.c 		page = find_or_create_page(vi->i_mapping, 0,
page             1581 fs/ntfs/attrib.c 		if (unlikely(!page))
page             1596 fs/ntfs/attrib.c 		page = NULL;
page             1657 fs/ntfs/attrib.c 	if (page && !PageUptodate(page)) {
page             1658 fs/ntfs/attrib.c 		kaddr = kmap_atomic(page, KM_USER0);
page             1664 fs/ntfs/attrib.c 		flush_dcache_page(page);
page             1665 fs/ntfs/attrib.c 		SetPageUptodate(page);
page             1747 fs/ntfs/attrib.c 	if (page) {
page             1748 fs/ntfs/attrib.c 		set_page_dirty(page);
page             1749 fs/ntfs/attrib.c 		unlock_page(page);
page             1750 fs/ntfs/attrib.c 		mark_page_accessed(page);
page             1751 fs/ntfs/attrib.c 		page_cache_release(page);
page             1807 fs/ntfs/attrib.c 	if (page) {
page             1808 fs/ntfs/attrib.c 		kaddr = kmap_atomic(page, KM_USER0);
page             1837 fs/ntfs/attrib.c 		unlock_page(page);
page             1838 fs/ntfs/attrib.c 		page_cache_release(page);
page             2497 fs/ntfs/attrib.c 	struct page *page;
page             2529 fs/ntfs/attrib.c 		page = read_mapping_page(mapping, idx, NULL);
page             2530 fs/ntfs/attrib.c 		if (IS_ERR(page)) {
page             2533 fs/ntfs/attrib.c 			return PTR_ERR(page);
page             2542 fs/ntfs/attrib.c 		kaddr = kmap_atomic(page, KM_USER0);
page             2544 fs/ntfs/attrib.c 		flush_dcache_page(page);
page             2546 fs/ntfs/attrib.c 		set_page_dirty(page);
page             2547 fs/ntfs/attrib.c 		page_cache_release(page);
page             2557 fs/ntfs/attrib.c 		page = grab_cache_page(mapping, idx);
page             2558 fs/ntfs/attrib.c 		if (unlikely(!page)) {
page             2563 fs/ntfs/attrib.c 		kaddr = kmap_atomic(page, KM_USER0);
page             2565 fs/ntfs/attrib.c 		flush_dcache_page(page);
page             2571 fs/ntfs/attrib.c 		if (page_has_buffers(page)) {
page             2574 fs/ntfs/attrib.c 			bh = head = page_buffers(page);
page             2580 fs/ntfs/attrib.c 		SetPageUptodate(page);
page             2585 fs/ntfs/attrib.c 		set_page_dirty(page);
page             2587 fs/ntfs/attrib.c 		unlock_page(page);
page             2588 fs/ntfs/attrib.c 		page_cache_release(page);
page             2594 fs/ntfs/attrib.c 		page = read_mapping_page(mapping, idx, NULL);
page             2595 fs/ntfs/attrib.c 		if (IS_ERR(page)) {
page             2598 fs/ntfs/attrib.c 			return PTR_ERR(page);
page             2600 fs/ntfs/attrib.c 		kaddr = kmap_atomic(page, KM_USER0);
page             2602 fs/ntfs/attrib.c 		flush_dcache_page(page);
page             2604 fs/ntfs/attrib.c 		set_page_dirty(page);
page             2605 fs/ntfs/attrib.c 		page_cache_release(page);
page               53 fs/ntfs/bitmap.c 	struct page *page;
page               75 fs/ntfs/bitmap.c 	page = ntfs_map_page(mapping, index);
page               76 fs/ntfs/bitmap.c 	if (IS_ERR(page)) {
page               79 fs/ntfs/bitmap.c 					"%li), aborting.", PTR_ERR(page));
page               80 fs/ntfs/bitmap.c 		return PTR_ERR(page);
page               82 fs/ntfs/bitmap.c 	kaddr = page_address(page);
page              124 fs/ntfs/bitmap.c 		flush_dcache_page(page);
page              125 fs/ntfs/bitmap.c 		set_page_dirty(page);
page              126 fs/ntfs/bitmap.c 		ntfs_unmap_page(page);
page              127 fs/ntfs/bitmap.c 		page = ntfs_map_page(mapping, ++index);
page              128 fs/ntfs/bitmap.c 		if (IS_ERR(page))
page              130 fs/ntfs/bitmap.c 		kaddr = page_address(page);
page              160 fs/ntfs/bitmap.c 	flush_dcache_page(page);
page              161 fs/ntfs/bitmap.c 	set_page_dirty(page);
page              162 fs/ntfs/bitmap.c 	ntfs_unmap_page(page);
page              172 fs/ntfs/bitmap.c 		return PTR_ERR(page);
page              181 fs/ntfs/bitmap.c 				"%li), aborting.", PTR_ERR(page));
page              187 fs/ntfs/bitmap.c 				"Unmount and run chkdsk.", PTR_ERR(page), pos);
page              190 fs/ntfs/bitmap.c 	return PTR_ERR(page);
page              102 fs/ntfs/compress.c 	u8 *kp = page_address(page);
page              106 fs/ntfs/compress.c 	if (((s64)page->index << PAGE_CACHE_SHIFT) >= initialized_size) {
page              125 fs/ntfs/compress.c 	if ((page->index >= (initialized_size >> PAGE_CACHE_SHIFT)) &&
page              127 fs/ntfs/compress.c 		zero_partial_compressed_page(page, initialized_size);
page              184 fs/ntfs/compress.c 	struct page *dp;	/* Current destination page being worked on. */
page              485 fs/ntfs/compress.c 	struct address_space *mapping = page->mapping;
page              494 fs/ntfs/compress.c 	unsigned long offset, index = page->index;
page              521 fs/ntfs/compress.c 	struct page **pages;
page              533 fs/ntfs/compress.c 	pages = kmalloc(nr_pages * sizeof(struct page *), GFP_NOFS);
page              542 fs/ntfs/compress.c 		unlock_page(page);
page              553 fs/ntfs/compress.c 	pages[xpage] = page;
page              568 fs/ntfs/compress.c 		zero_user(page, 0, PAGE_CACHE_SIZE);
page              570 fs/ntfs/compress.c 		SetPageUptodate(page);
page              571 fs/ntfs/compress.c 		unlock_page(page);
page              579 fs/ntfs/compress.c 		page = pages[i];
page              580 fs/ntfs/compress.c 		if (page) {
page              586 fs/ntfs/compress.c 			if (!PageDirty(page) && (!PageUptodate(page) ||
page              587 fs/ntfs/compress.c 					PageError(page))) {
page              588 fs/ntfs/compress.c 				ClearPageError(page);
page              589 fs/ntfs/compress.c 				kmap(page);
page              592 fs/ntfs/compress.c 			unlock_page(page);
page              593 fs/ntfs/compress.c 			page_cache_release(page);
page              754 fs/ntfs/compress.c 			page = pages[cur_page];
page              755 fs/ntfs/compress.c 			if (page) {
page              762 fs/ntfs/compress.c 					clear_page(page_address(page));
page              764 fs/ntfs/compress.c 					memset(page_address(page) + cur_ofs, 0,
page              767 fs/ntfs/compress.c 				flush_dcache_page(page);
page              768 fs/ntfs/compress.c 				kunmap(page);
page              769 fs/ntfs/compress.c 				SetPageUptodate(page);
page              770 fs/ntfs/compress.c 				unlock_page(page);
page              774 fs/ntfs/compress.c 					page_cache_release(page);
page              784 fs/ntfs/compress.c 			page = pages[cur_page];
page              785 fs/ntfs/compress.c 			if (page)
page              786 fs/ntfs/compress.c 				memset(page_address(page) + cur_ofs, 0,
page              816 fs/ntfs/compress.c 			page = pages[cur_page];
page              817 fs/ntfs/compress.c 			if (page)
page              818 fs/ntfs/compress.c 				memcpy(page_address(page) + cur_ofs, cb_pos,
page              827 fs/ntfs/compress.c 			page = pages[cur_page];
page              828 fs/ntfs/compress.c 			if (page)
page              829 fs/ntfs/compress.c 				memcpy(page_address(page) + cur_ofs, cb_pos,
page              838 fs/ntfs/compress.c 			page = pages[cur2_page];
page              839 fs/ntfs/compress.c 			if (page) {
page              844 fs/ntfs/compress.c 				handle_bounds_compressed_page(page, i_size,
page              846 fs/ntfs/compress.c 				flush_dcache_page(page);
page              847 fs/ntfs/compress.c 				kunmap(page);
page              848 fs/ntfs/compress.c 				SetPageUptodate(page);
page              849 fs/ntfs/compress.c 				unlock_page(page);
page              853 fs/ntfs/compress.c 					page_cache_release(page);
page              881 fs/ntfs/compress.c 				page = pages[prev_cur_page];
page              882 fs/ntfs/compress.c 				if (page) {
page              883 fs/ntfs/compress.c 					flush_dcache_page(page);
page              884 fs/ntfs/compress.c 					kunmap(page);
page              885 fs/ntfs/compress.c 					unlock_page(page);
page              887 fs/ntfs/compress.c 						page_cache_release(page);
page              907 fs/ntfs/compress.c 		page = pages[cur_page];
page              908 fs/ntfs/compress.c 		if (page) {
page              912 fs/ntfs/compress.c 					"0x%lx.", ni->mft_no, page->index);
page              913 fs/ntfs/compress.c 			flush_dcache_page(page);
page              914 fs/ntfs/compress.c 			kunmap(page);
page              915 fs/ntfs/compress.c 			unlock_page(page);
page              917 fs/ntfs/compress.c 				page_cache_release(page);
page              958 fs/ntfs/compress.c 		page = pages[i];
page              959 fs/ntfs/compress.c 		if (page) {
page              960 fs/ntfs/compress.c 			flush_dcache_page(page);
page              961 fs/ntfs/compress.c 			kunmap(page);
page              962 fs/ntfs/compress.c 			unlock_page(page);
page              964 fs/ntfs/compress.c 				page_cache_release(page);
page               94 fs/ntfs/dir.c  	struct page *page;
page              320 fs/ntfs/dir.c  	page = ntfs_map_page(ia_mapping, vcn <<
page              322 fs/ntfs/dir.c  	if (IS_ERR(page)) {
page              324 fs/ntfs/dir.c  				-PTR_ERR(page));
page              325 fs/ntfs/dir.c  		err = PTR_ERR(page);
page              328 fs/ntfs/dir.c  	lock_page(page);
page              329 fs/ntfs/dir.c  	kaddr = (u8*)page_address(page);
page              449 fs/ntfs/dir.c  			unlock_page(page);
page              450 fs/ntfs/dir.c  			ntfs_unmap_page(page);
page              482 fs/ntfs/dir.c  				unlock_page(page);
page              483 fs/ntfs/dir.c  				ntfs_unmap_page(page);
page              565 fs/ntfs/dir.c  			unlock_page(page);
page              566 fs/ntfs/dir.c  			ntfs_unmap_page(page);
page              579 fs/ntfs/dir.c  		unlock_page(page);
page              580 fs/ntfs/dir.c  		ntfs_unmap_page(page);
page              586 fs/ntfs/dir.c  	unlock_page(page);
page              587 fs/ntfs/dir.c  	ntfs_unmap_page(page);
page              648 fs/ntfs/dir.c  	struct page *page;
page              798 fs/ntfs/dir.c  	page = ntfs_map_page(ia_mapping, vcn <<
page              800 fs/ntfs/dir.c  	if (IS_ERR(page)) {
page              802 fs/ntfs/dir.c  				-PTR_ERR(page));
page              803 fs/ntfs/dir.c  		err = PTR_ERR(page);
page              806 fs/ntfs/dir.c  	lock_page(page);
page              807 fs/ntfs/dir.c  	kaddr = (u8*)page_address(page);
page              910 fs/ntfs/dir.c  			unlock_page(page);
page              911 fs/ntfs/dir.c  			ntfs_unmap_page(page);
page              974 fs/ntfs/dir.c  			unlock_page(page);
page              975 fs/ntfs/dir.c  			ntfs_unmap_page(page);
page              986 fs/ntfs/dir.c  	unlock_page(page);
page              987 fs/ntfs/dir.c  	ntfs_unmap_page(page);
page             1114 fs/ntfs/dir.c  	struct page *bmp_page = NULL, *ia_page = NULL;
page              125 fs/ntfs/file.c 	struct page *page = NULL;
page              234 fs/ntfs/file.c 		page = read_mapping_page(mapping, index, NULL);
page              235 fs/ntfs/file.c 		if (IS_ERR(page)) {
page              236 fs/ntfs/file.c 			err = PTR_ERR(page);
page              239 fs/ntfs/file.c 		if (unlikely(PageError(page))) {
page              240 fs/ntfs/file.c 			page_cache_release(page);
page              254 fs/ntfs/file.c 		set_page_dirty(page);
page              255 fs/ntfs/file.c 		page_cache_release(page);
page              498 fs/ntfs/file.c 	struct page *page;
page              534 fs/ntfs/file.c 		page = pages[u];
page              535 fs/ntfs/file.c 		BUG_ON(!page);
page              540 fs/ntfs/file.c 		if (!page_has_buffers(page)) {
page              541 fs/ntfs/file.c 			create_empty_buffers(page, blocksize, 0);
page              542 fs/ntfs/file.c 			if (unlikely(!page_has_buffers(page)))
page              562 fs/ntfs/file.c 	page = pages[u];
page              563 fs/ntfs/file.c 	bh_pos = (s64)page->index << PAGE_CACHE_SHIFT;
page              564 fs/ntfs/file.c 	bh = head = page_buffers(page);
page              587 fs/ntfs/file.c 			if (PageUptodate(page)) {
page              610 fs/ntfs/file.c 					zero_user(page, bh_offset(bh),
page              645 fs/ntfs/file.c 			if (PageUptodate(page)) {
page              686 fs/ntfs/file.c 						zero_user(page, bh_offset(bh),
page              705 fs/ntfs/file.c 					zero_user(page, bh_offset(bh),
page              718 fs/ntfs/file.c 				kaddr = kmap_atomic(page, KM_USER0);
page              728 fs/ntfs/file.c 				flush_dcache_page(page);
page              741 fs/ntfs/file.c 			if (PageUptodate(page)) {
page              745 fs/ntfs/file.c 				zero_user(page, bh_offset(bh), blocksize);
page              865 fs/ntfs/file.c 				if (PageUptodate(page)) {
page              869 fs/ntfs/file.c 					zero_user(page, bh_offset(bh),
page             1117 fs/ntfs/file.c 			page = bh->b_page;
page             1118 fs/ntfs/file.c 			bh_pos = ((s64)page->index << PAGE_CACHE_SHIFT) +
page             1129 fs/ntfs/file.c 				zero_user_segment(page, bh_offset(bh) + ofs,
page             1256 fs/ntfs/file.c 		page = pages[u];
page             1257 fs/ntfs/file.c 		bh = head = page_buffers(page);
page             1260 fs/ntfs/file.c 					((s64)page->index << PAGE_CACHE_SHIFT) +
page             1267 fs/ntfs/file.c 				if (PageUptodate(page))
page             1270 fs/ntfs/file.c 					zero_user(page, bh_offset(bh),
page             1291 fs/ntfs/file.c 	struct page **last_page = pages + nr_pages;
page             1408 fs/ntfs/file.c 	struct page **last_page = pages + nr_pages;
page             1503 fs/ntfs/file.c 		struct page *page;
page             1506 fs/ntfs/file.c 		page = pages[u];
page             1507 fs/ntfs/file.c 		bh_pos = (s64)page->index << PAGE_CACHE_SHIFT;
page             1508 fs/ntfs/file.c 		bh = head = page_buffers(page);
page             1526 fs/ntfs/file.c 		if (!partial && !PageUptodate(page))
page             1527 fs/ntfs/file.c 			SetPageUptodate(page);
page             1643 fs/ntfs/file.c 	struct page *page;
page             1654 fs/ntfs/file.c 	page = pages[0];
page             1655 fs/ntfs/file.c 	BUG_ON(!page);
page             1656 fs/ntfs/file.c 	vi = page->mapping->host;
page             1660 fs/ntfs/file.c 			vi->i_ino, ni->type, page->index, nr_pages,
page             1706 fs/ntfs/file.c 	kaddr = kmap_atomic(page, KM_USER0);
page             1718 fs/ntfs/file.c 	if (!PageUptodate(page)) {
page             1725 fs/ntfs/file.c 		flush_dcache_page(page);
page             1726 fs/ntfs/file.c 		SetPageUptodate(page);
page             1752 fs/ntfs/file.c 		if (PageUptodate(page)) {
page             1760 fs/ntfs/file.c 			__set_page_dirty_nobuffers(page);
page             1791 fs/ntfs/file.c 	struct page *pages[NTFS_MAX_PAGES_PER_CLUSTER];
page             1792 fs/ntfs/file.c 	struct page *cached_page = NULL;
page               64 fs/ntfs/index.c 			struct page *page = ictx->page;
page               65 fs/ntfs/index.c 			if (page) {
page               66 fs/ntfs/index.c 				BUG_ON(!PageLocked(page));
page               67 fs/ntfs/index.c 				unlock_page(page);
page               68 fs/ntfs/index.c 				ntfs_unmap_page(page);
page              132 fs/ntfs/index.c 	struct page *page;
page              212 fs/ntfs/index.c 			ictx->page = NULL;
page              276 fs/ntfs/index.c 	page = ntfs_map_page(ia_mapping, vcn <<
page              278 fs/ntfs/index.c 	if (IS_ERR(page)) {
page              280 fs/ntfs/index.c 				-PTR_ERR(page));
page              281 fs/ntfs/index.c 		err = PTR_ERR(page);
page              284 fs/ntfs/index.c 	lock_page(page);
page              285 fs/ntfs/index.c 	kaddr = (u8*)page_address(page);
page              381 fs/ntfs/index.c 			ictx->page = page;
page              432 fs/ntfs/index.c 		unlock_page(page);
page              433 fs/ntfs/index.c 		ntfs_unmap_page(page);
page              439 fs/ntfs/index.c 	unlock_page(page);
page              440 fs/ntfs/index.c 	ntfs_unmap_page(page);
page               85 fs/ntfs/index.h 	struct page *page;
page              116 fs/ntfs/index.h 		flush_dcache_page(ictx->page);
page              142 fs/ntfs/index.h 		mark_ntfs_record_dirty(ictx->page,
page              143 fs/ntfs/index.h 				(u8*)ictx->ia - (u8*)page_address(ictx->page));
page              339 fs/ntfs/inode.c 	BUG_ON(ni->page);
page              362 fs/ntfs/inode.c 	BUG_ON(ni->page);
page              396 fs/ntfs/inode.c 	ni->page = NULL;
page             3052 fs/ntfs/inode.c 			mark_ntfs_record_dirty(ctx->ntfs_ino->page,
page               87 fs/ntfs/inode.h 	struct page *page;	/* The page containing the mft record of the
page              158 fs/ntfs/lcnalloc.c 	struct page *page = NULL;
page              276 fs/ntfs/lcnalloc.c 		if (likely(page)) {
page              279 fs/ntfs/lcnalloc.c 				flush_dcache_page(page);
page              280 fs/ntfs/lcnalloc.c 				set_page_dirty(page);
page              283 fs/ntfs/lcnalloc.c 			ntfs_unmap_page(page);
page              285 fs/ntfs/lcnalloc.c 		page = ntfs_map_page(mapping, last_read_pos >>
page              287 fs/ntfs/lcnalloc.c 		if (IS_ERR(page)) {
page              288 fs/ntfs/lcnalloc.c 			err = PTR_ERR(page);
page              293 fs/ntfs/lcnalloc.c 		buf = page_address(page) + buf_size;
page              744 fs/ntfs/lcnalloc.c 	if (likely(page && !IS_ERR(page))) {
page              747 fs/ntfs/lcnalloc.c 			flush_dcache_page(page);
page              748 fs/ntfs/lcnalloc.c 			set_page_dirty(page);
page              751 fs/ntfs/lcnalloc.c 		ntfs_unmap_page(page);
page              388 fs/ntfs/logfile.c 		struct page *page;
page              399 fs/ntfs/logfile.c 			page = ntfs_map_page(vi->i_mapping, idx);
page              400 fs/ntfs/logfile.c 			if (IS_ERR(page)) {
page              403 fs/ntfs/logfile.c 				err = PTR_ERR(page);
page              409 fs/ntfs/logfile.c 			memcpy((u8*)trp + have_read, page_address(page), size);
page              410 fs/ntfs/logfile.c 			ntfs_unmap_page(page);
page              489 fs/ntfs/logfile.c 	struct page *page = NULL;
page              542 fs/ntfs/logfile.c 		if (!page || page->index != idx) {
page              543 fs/ntfs/logfile.c 			if (page)
page              544 fs/ntfs/logfile.c 				ntfs_unmap_page(page);
page              545 fs/ntfs/logfile.c 			page = ntfs_map_page(mapping, idx);
page              546 fs/ntfs/logfile.c 			if (IS_ERR(page)) {
page              552 fs/ntfs/logfile.c 		kaddr = (u8*)page_address(page) + (pos & ~PAGE_CACHE_MASK);
page              605 fs/ntfs/logfile.c 			ntfs_unmap_page(page);
page              612 fs/ntfs/logfile.c 	if (page)
page              613 fs/ntfs/logfile.c 		ntfs_unmap_page(page);
page               51 fs/ntfs/mft.c  	struct page *page;
page               55 fs/ntfs/mft.c  	BUG_ON(ni->page);
page               74 fs/ntfs/mft.c  			page = ERR_PTR(-ENOENT);
page               83 fs/ntfs/mft.c  	page = ntfs_map_page(mft_vi->i_mapping, index);
page               84 fs/ntfs/mft.c  	if (likely(!IS_ERR(page))) {
page               86 fs/ntfs/mft.c  		if (likely(ntfs_is_mft_recordp((le32*)(page_address(page) +
page               88 fs/ntfs/mft.c  			ni->page = page;
page               90 fs/ntfs/mft.c  			return page_address(page) + ofs;
page               94 fs/ntfs/mft.c  		ntfs_unmap_page(page);
page               95 fs/ntfs/mft.c  		page = ERR_PTR(-EIO);
page               99 fs/ntfs/mft.c  	ni->page = NULL;
page              101 fs/ntfs/mft.c  	return (void*)page;
page              192 fs/ntfs/mft.c  	BUG_ON(!ni->page);
page              195 fs/ntfs/mft.c  	ntfs_unmap_page(ni->page);
page              196 fs/ntfs/mft.c  	ni->page = NULL;
page              214 fs/ntfs/mft.c  	struct page *page = ni->page;
page              216 fs/ntfs/mft.c  	BUG_ON(!page);
page              401 fs/ntfs/mft.c  	mark_ntfs_record_dirty(ni->page, ni->page_ofs);
page              468 fs/ntfs/mft.c  	struct page *page;
page              489 fs/ntfs/mft.c  	page = ntfs_map_page(vol->mftmirr_ino->i_mapping, mft_no >>
page              491 fs/ntfs/mft.c  	if (IS_ERR(page)) {
page              493 fs/ntfs/mft.c  		err = PTR_ERR(page);
page              496 fs/ntfs/mft.c  	lock_page(page);
page              497 fs/ntfs/mft.c  	BUG_ON(!PageUptodate(page));
page              498 fs/ntfs/mft.c  	ClearPageUptodate(page);
page              502 fs/ntfs/mft.c  	kmirr = page_address(page) + page_ofs;
page              506 fs/ntfs/mft.c  	if (unlikely(!page_has_buffers(page))) {
page              509 fs/ntfs/mft.c  		bh = head = alloc_page_buffers(page, blocksize, 1);
page              516 fs/ntfs/mft.c  		attach_page_buffers(page, head);
page              518 fs/ntfs/mft.c  	bh = head = page_buffers(page);
page              523 fs/ntfs/mft.c  	m_start = kmirr - (u8*)page_address(page);
page              619 fs/ntfs/mft.c  	flush_dcache_page(page);
page              620 fs/ntfs/mft.c  	SetPageUptodate(page);
page              621 fs/ntfs/mft.c  	unlock_page(page);
page              622 fs/ntfs/mft.c  	ntfs_unmap_page(page);
page              672 fs/ntfs/mft.c  	struct page *page = ni->page;
page              685 fs/ntfs/mft.c  	BUG_ON(!PageLocked(page));
page              694 fs/ntfs/mft.c  	bh = head = page_buffers(page);
page              804 fs/ntfs/mft.c  			if (PageUptodate(page))
page             1138 fs/ntfs/mft.c  	struct page *page;
page             1192 fs/ntfs/mft.c  			page = ntfs_map_page(mftbmp_mapping,
page             1194 fs/ntfs/mft.c  			if (IS_ERR(page)) {
page             1197 fs/ntfs/mft.c  				return PTR_ERR(page);
page             1199 fs/ntfs/mft.c  			buf = (u8*)page_address(page) + page_ofs;
page             1214 fs/ntfs/mft.c  						ntfs_unmap_page(page);
page             1218 fs/ntfs/mft.c  					flush_dcache_page(page);
page             1219 fs/ntfs/mft.c  					set_page_dirty(page);
page             1220 fs/ntfs/mft.c  					ntfs_unmap_page(page);
page             1232 fs/ntfs/mft.c  			ntfs_unmap_page(page);
page             1283 fs/ntfs/mft.c  	struct page *page;
page             1330 fs/ntfs/mft.c  	page = ntfs_map_page(vol->lcnbmp_ino->i_mapping,
page             1332 fs/ntfs/mft.c  	if (IS_ERR(page)) {
page             1335 fs/ntfs/mft.c  		return PTR_ERR(page);
page             1337 fs/ntfs/mft.c  	b = (u8*)page_address(page) + (ll & ~PAGE_CACHE_MASK);
page             1343 fs/ntfs/mft.c  		flush_dcache_page(page);
page             1344 fs/ntfs/mft.c  		set_page_dirty(page);
page             1346 fs/ntfs/mft.c  		ntfs_unmap_page(page);
page             1354 fs/ntfs/mft.c  		ntfs_unmap_page(page);
page             2095 fs/ntfs/mft.c  	struct page *page;
page             2120 fs/ntfs/mft.c  	page = ntfs_map_page(mft_vi->i_mapping, index);
page             2121 fs/ntfs/mft.c  	if (IS_ERR(page)) {
page             2124 fs/ntfs/mft.c  		return PTR_ERR(page);
page             2126 fs/ntfs/mft.c  	lock_page(page);
page             2127 fs/ntfs/mft.c  	BUG_ON(!PageUptodate(page));
page             2128 fs/ntfs/mft.c  	ClearPageUptodate(page);
page             2129 fs/ntfs/mft.c  	m = (MFT_RECORD*)((u8*)page_address(page) + ofs);
page             2134 fs/ntfs/mft.c  		SetPageUptodate(page);
page             2135 fs/ntfs/mft.c  		unlock_page(page);
page             2136 fs/ntfs/mft.c  		ntfs_unmap_page(page);
page             2139 fs/ntfs/mft.c  	flush_dcache_page(page);
page             2140 fs/ntfs/mft.c  	SetPageUptodate(page);
page             2141 fs/ntfs/mft.c  	unlock_page(page);
page             2147 fs/ntfs/mft.c  	mark_ntfs_record_dirty(page, ofs);
page             2148 fs/ntfs/mft.c  	ntfs_unmap_page(page);
page             2249 fs/ntfs/mft.c  	struct page *page;
page             2521 fs/ntfs/mft.c  	page = ntfs_map_page(vol->mft_ino->i_mapping, index);
page             2522 fs/ntfs/mft.c  	if (IS_ERR(page)) {
page             2525 fs/ntfs/mft.c  		err = PTR_ERR(page);
page             2528 fs/ntfs/mft.c  	lock_page(page);
page             2529 fs/ntfs/mft.c  	BUG_ON(!PageUptodate(page));
page             2530 fs/ntfs/mft.c  	ClearPageUptodate(page);
page             2531 fs/ntfs/mft.c  	m = (MFT_RECORD*)((u8*)page_address(page) + ofs);
page             2543 fs/ntfs/mft.c  			SetPageUptodate(page);
page             2544 fs/ntfs/mft.c  			unlock_page(page);
page             2545 fs/ntfs/mft.c  			ntfs_unmap_page(page);
page             2562 fs/ntfs/mft.c  			SetPageUptodate(page);
page             2563 fs/ntfs/mft.c  			unlock_page(page);
page             2564 fs/ntfs/mft.c  			ntfs_unmap_page(page);
page             2576 fs/ntfs/mft.c  	flush_dcache_page(page);
page             2577 fs/ntfs/mft.c  	SetPageUptodate(page);
page             2599 fs/ntfs/mft.c  			flush_dcache_page(page);
page             2601 fs/ntfs/mft.c  			mark_ntfs_record_dirty(page, ofs);
page             2602 fs/ntfs/mft.c  			unlock_page(page);
page             2603 fs/ntfs/mft.c  			ntfs_unmap_page(page);
page             2613 fs/ntfs/mft.c  		mark_ntfs_record_dirty(page, ofs);
page             2614 fs/ntfs/mft.c  		unlock_page(page);
page             2619 fs/ntfs/mft.c  		ntfs_unmap_page(page);
page             2632 fs/ntfs/mft.c  			flush_dcache_page(page);
page             2634 fs/ntfs/mft.c  			mark_ntfs_record_dirty(page, ofs);
page             2635 fs/ntfs/mft.c  			unlock_page(page);
page             2636 fs/ntfs/mft.c  			ntfs_unmap_page(page);
page             2708 fs/ntfs/mft.c  		ni->page = page;
page             2720 fs/ntfs/mft.c  		mark_ntfs_record_dirty(page, ofs);
page             2721 fs/ntfs/mft.c  		unlock_page(page);
page               57 fs/ntfs/mft.h  	flush_dcache_page(ni->page);
page              104 fs/ntfs/mft.h  	struct page *page = ni->page;
page              107 fs/ntfs/mft.h  	BUG_ON(!page);
page              108 fs/ntfs/mft.h  	lock_page(page);
page              110 fs/ntfs/mft.h  	unlock_page(page);
page             1061 fs/ntfs/super.c 	struct page *mft_page, *mirr_page;
page             1241 fs/ntfs/super.c 	struct page *page;
page             1292 fs/ntfs/super.c 	page = ntfs_map_page(vi->i_mapping, 0);
page             1293 fs/ntfs/super.c 	if (IS_ERR(page)) {
page             1295 fs/ntfs/super.c 		ret = PTR_ERR(page);
page             1298 fs/ntfs/super.c 	kaddr = (u32*)page_address(page);
page             1322 fs/ntfs/super.c 	ntfs_unmap_page(page);
page             1416 fs/ntfs/super.c 	struct page *page;
page             1512 fs/ntfs/super.c 	page = ntfs_map_page(vol->usnjrnl_max_ino->i_mapping, 0);
page             1513 fs/ntfs/super.c 	if (IS_ERR(page)) {
page             1518 fs/ntfs/super.c 	uh = (USN_HEADER*)page_address(page);
page             1526 fs/ntfs/super.c 		ntfs_unmap_page(page);
page             1537 fs/ntfs/super.c 			ntfs_unmap_page(page);
page             1550 fs/ntfs/super.c 		ntfs_unmap_page(page);
page             1553 fs/ntfs/super.c 	ntfs_unmap_page(page);
page             1569 fs/ntfs/super.c 	struct page *page;
page             1595 fs/ntfs/super.c 		page = ntfs_map_page(ino->i_mapping, index);
page             1596 fs/ntfs/super.c 		if (IS_ERR(page))
page             1599 fs/ntfs/super.c 				page_address(page), size);
page             1600 fs/ntfs/super.c 		ntfs_unmap_page(page);
page             1634 fs/ntfs/super.c 	struct page *page;
page             1664 fs/ntfs/super.c 		page = ntfs_map_page(ino->i_mapping, index);
page             1665 fs/ntfs/super.c 		if (IS_ERR(page))
page             1668 fs/ntfs/super.c 				page_address(page), size);
page             1669 fs/ntfs/super.c 		ntfs_unmap_page(page);
page             2474 fs/ntfs/super.c 	struct page *page;
page             2496 fs/ntfs/super.c 		page = read_mapping_page(mapping, index, NULL);
page             2498 fs/ntfs/super.c 		if (IS_ERR(page)) {
page             2504 fs/ntfs/super.c 		kaddr = (u32*)kmap_atomic(page, KM_USER0);
page             2515 fs/ntfs/super.c 		page_cache_release(page);
page             2554 fs/ntfs/super.c 	struct page *page;
page             2567 fs/ntfs/super.c 		page = read_mapping_page(mapping, index, NULL);
page             2569 fs/ntfs/super.c 		if (IS_ERR(page)) {
page             2575 fs/ntfs/super.c 		kaddr = (u32*)kmap_atomic(page, KM_USER0);
page             2586 fs/ntfs/super.c 		page_cache_release(page);
page               52 fs/ntfs/usnjrnl.c 		struct page *page;
page               55 fs/ntfs/usnjrnl.c 		page = ntfs_map_page(vol->usnjrnl_max_ino->i_mapping, 0);
page               56 fs/ntfs/usnjrnl.c 		if (IS_ERR(page)) {
page               61 fs/ntfs/usnjrnl.c 		uh = (USN_HEADER*)page_address(page);
page               74 fs/ntfs/usnjrnl.c 		flush_dcache_page(page);
page               75 fs/ntfs/usnjrnl.c 		set_page_dirty(page);
page               76 fs/ntfs/usnjrnl.c 		ntfs_unmap_page(page);
page             6420 fs/ocfs2/alloc.c 	ret = ocfs2_map_page_blocks(page, phys, inode, from, to, 0);
page             6425 fs/ocfs2/alloc.c 		zero_user_segment(page, from, to);
page             6432 fs/ocfs2/alloc.c 	ret = walk_page_buffers(handle, page_buffers(page),
page             6440 fs/ocfs2/alloc.c 		ret = walk_page_buffers(handle, page_buffers(page),
page             6449 fs/ocfs2/alloc.c 		SetPageUptodate(page);
page             6451 fs/ocfs2/alloc.c 	flush_dcache_page(page);
page             6459 fs/ocfs2/alloc.c 	struct page *page;
page             6470 fs/ocfs2/alloc.c 		page = pages[i];
page             6473 fs/ocfs2/alloc.c 		if ((end >> PAGE_CACHE_SHIFT) == page->index)
page             6479 fs/ocfs2/alloc.c 		ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1,
page             6482 fs/ocfs2/alloc.c 		start = (page->index + 1) << PAGE_CACHE_SHIFT;
page             6543 fs/ocfs2/alloc.c 	struct page **pages = NULL;
page             6556 fs/ocfs2/alloc.c 			sizeof(struct page *), GFP_NOFS);
page             6663 fs/ocfs2/alloc.c 	struct page **pages = NULL;
page             6671 fs/ocfs2/alloc.c 				sizeof(struct page *), GFP_NOFS);
page              244 fs/ocfs2/aops.c 	kaddr = kmap_atomic(page, KM_USER0);
page              249 fs/ocfs2/aops.c 	flush_dcache_page(page);
page              252 fs/ocfs2/aops.c 	SetPageUptodate(page);
page              262 fs/ocfs2/aops.c 	BUG_ON(!PageLocked(page));
page              271 fs/ocfs2/aops.c 	ret = ocfs2_read_inline_data(inode, page, di_bh);
page              273 fs/ocfs2/aops.c 	unlock_page(page);
page              281 fs/ocfs2/aops.c 	struct inode *inode = page->mapping->host;
page              283 fs/ocfs2/aops.c 	loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT;
page              286 fs/ocfs2/aops.c 	mlog_entry("(0x%p, %lu)\n", file, (page ? page->index : 0));
page              288 fs/ocfs2/aops.c 	ret = ocfs2_inode_lock_with_page(inode, NULL, 0, page);
page              312 fs/ocfs2/aops.c 		zero_user(page, 0, PAGE_SIZE);
page              313 fs/ocfs2/aops.c 		SetPageUptodate(page);
page              319 fs/ocfs2/aops.c 		ret = ocfs2_readpage_inline(inode, page);
page              321 fs/ocfs2/aops.c 		ret = block_read_full_page(page, ocfs2_get_block);
page              330 fs/ocfs2/aops.c 		unlock_page(page);
page              351 fs/ocfs2/aops.c 	struct page *last;
page              377 fs/ocfs2/aops.c 	last = list_entry(pages->prev, struct page, lru);
page              406 fs/ocfs2/aops.c 	mlog_entry("(0x%p)\n", page);
page              408 fs/ocfs2/aops.c 	ret = block_write_full_page(page, ocfs2_get_block, wbc);
page              425 fs/ocfs2/aops.c 	ret = block_prepare_write(page, from, to, ocfs2_get_block);
page              486 fs/ocfs2/aops.c 					page_buffers(page),
page              668 fs/ocfs2/aops.c 	journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal;
page              670 fs/ocfs2/aops.c 	jbd2_journal_invalidatepage(journal, page, offset);
page              675 fs/ocfs2/aops.c 	journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal;
page              677 fs/ocfs2/aops.c 	if (!page_has_buffers(page))
page              679 fs/ocfs2/aops.c 	return jbd2_journal_try_to_free_buffers(journal, page, wait);
page              755 fs/ocfs2/aops.c 	kaddr = kmap_atomic(page, KM_USER0);
page              779 fs/ocfs2/aops.c 	u64 offset = page_offset(page) + block_start;
page              806 fs/ocfs2/aops.c 	if (!page_has_buffers(page))
page              807 fs/ocfs2/aops.c 		create_empty_buffers(page, bsize, 0);
page              809 fs/ocfs2/aops.c 	head = page_buffers(page);
page              821 fs/ocfs2/aops.c 			if (PageUptodate(page))
page              838 fs/ocfs2/aops.c 		if (PageUptodate(page)) {
page              843 fs/ocfs2/aops.c 			   ocfs2_should_read_blk(inode, page, block_start) &&
page              877 fs/ocfs2/aops.c 		zero_user(page, block_start, bh->b_size);
page              943 fs/ocfs2/aops.c 	struct page			*w_pages[OCFS2_MAX_CTXT_PAGES];
page              945 fs/ocfs2/aops.c 	struct page			*w_target_page;
page             1025 fs/ocfs2/aops.c 	BUG_ON(!PageLocked(page));
page             1026 fs/ocfs2/aops.c 	if (!page_has_buffers(page))
page             1029 fs/ocfs2/aops.c 	bh = head = page_buffers(page);
page             1036 fs/ocfs2/aops.c 				if (!PageUptodate(page)) {
page             1042 fs/ocfs2/aops.c 					zero_user_segment(page, start, end);
page             1067 fs/ocfs2/aops.c 	struct page *tmppage;
page             1104 fs/ocfs2/aops.c 	if (page == wc->w_target_page) {
page             1109 fs/ocfs2/aops.c 			ret = ocfs2_map_page_blocks(page, p_blkno, inode,
page             1113 fs/ocfs2/aops.c 			ret = ocfs2_map_page_blocks(page, p_blkno, inode,
page             1137 fs/ocfs2/aops.c 		ret = ocfs2_map_page_blocks(page, p_blkno, inode,
page             1155 fs/ocfs2/aops.c 	if (new && !PageUptodate(page))
page             1156 fs/ocfs2/aops.c 		ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb),
page             1159 fs/ocfs2/aops.c 	flush_dcache_page(page);
page             1510 fs/ocfs2/aops.c 	struct page *page;
page             1514 fs/ocfs2/aops.c 	page = find_or_create_page(mapping, 0, GFP_NOFS);
page             1515 fs/ocfs2/aops.c 	if (!page) {
page             1524 fs/ocfs2/aops.c 	wc->w_pages[0] = wc->w_target_page = page;
page             1546 fs/ocfs2/aops.c 	if (!PageUptodate(page)) {
page             1547 fs/ocfs2/aops.c 		ret = ocfs2_read_inline_data(inode, page, wc->w_di_bh);
page             1885 fs/ocfs2/aops.c 	struct page *tmppage;
page             1964 fs/ocfs2/aops.c 	ret = ocfs2_write_end_nolock(mapping, pos, len, copied, page, fsdata);
page              127 fs/ocfs2/cluster/heartbeat.c 	struct page             **hr_slot_data;
page              246 fs/ocfs2/cluster/heartbeat.c 	struct page *page;
page              268 fs/ocfs2/cluster/heartbeat.c 		page = reg->hr_slot_data[current_page];
page              276 fs/ocfs2/cluster/heartbeat.c 		len = bio_add_page(bio, page, vec_len, vec_start);
page              963 fs/ocfs2/cluster/heartbeat.c 	struct page *page;
page              971 fs/ocfs2/cluster/heartbeat.c 			page = reg->hr_slot_data[i];
page              972 fs/ocfs2/cluster/heartbeat.c 			if (page)
page              973 fs/ocfs2/cluster/heartbeat.c 				__free_page(page);
page              998 fs/ocfs2/cluster/heartbeat.c 	char *p = (char *)page;
page             1021 fs/ocfs2/cluster/heartbeat.c 	return sprintf(page, "%u\n", reg->hr_block_bytes);
page             1035 fs/ocfs2/cluster/heartbeat.c 	status = o2hb_read_block_input(reg, page, count,
page             1049 fs/ocfs2/cluster/heartbeat.c 	return sprintf(page, "%llu\n", reg->hr_start_block);
page             1057 fs/ocfs2/cluster/heartbeat.c 	char *p = (char *)page;
page             1074 fs/ocfs2/cluster/heartbeat.c 	return sprintf(page, "%d\n", reg->hr_blocks);
page             1082 fs/ocfs2/cluster/heartbeat.c 	char *p = (char *)page;
page             1105 fs/ocfs2/cluster/heartbeat.c 		ret = sprintf(page, "%s\n", reg->hr_dev_name);
page             1128 fs/ocfs2/cluster/heartbeat.c 	struct page *page;
page             1157 fs/ocfs2/cluster/heartbeat.c 	reg->hr_slot_data = kcalloc(reg->hr_num_pages, sizeof(struct page *),
page             1165 fs/ocfs2/cluster/heartbeat.c 		page = alloc_page(GFP_KERNEL);
page             1166 fs/ocfs2/cluster/heartbeat.c 		if (!page) {
page             1171 fs/ocfs2/cluster/heartbeat.c 		reg->hr_slot_data[i] = page;
page             1174 fs/ocfs2/cluster/heartbeat.c 		raw = page_address(page);
page             1235 fs/ocfs2/cluster/heartbeat.c 	char *p = (char *)page;
page             1381 fs/ocfs2/cluster/heartbeat.c 	return sprintf(page, "%u\n", pid);
page             1448 fs/ocfs2/cluster/heartbeat.c 		ret = o2hb_region_attr->show(reg, page);
page             1462 fs/ocfs2/cluster/heartbeat.c 		ret = o2hb_region_attr->store(reg, page, count);
page             1553 fs/ocfs2/cluster/heartbeat.c 		ret = o2hb_heartbeat_group_attr->show(reg, page);
page             1567 fs/ocfs2/cluster/heartbeat.c 		ret = o2hb_heartbeat_group_attr->store(reg, page, count);
page             1574 fs/ocfs2/cluster/heartbeat.c 	return sprintf(page, "%u\n", o2hb_dead_threshold);
page             1582 fs/ocfs2/cluster/heartbeat.c 	char *p = (char *)page;
page              173 fs/ocfs2/cluster/nodemanager.c 	return sprintf(page, "%d\n", node->nd_num);
page              195 fs/ocfs2/cluster/nodemanager.c 	char *p = (char *)page;
page              228 fs/ocfs2/cluster/nodemanager.c 	return sprintf(page, "%u\n", ntohs(node->nd_ipv4_port));
page              235 fs/ocfs2/cluster/nodemanager.c 	char *p = (char *)page;
page              253 fs/ocfs2/cluster/nodemanager.c 	return sprintf(page, "%u.%u.%u.%u\n", NIPQUAD(node->nd_ipv4_address));
page              266 fs/ocfs2/cluster/nodemanager.c 	ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2],
page              296 fs/ocfs2/cluster/nodemanager.c 	return sprintf(page, "%d\n", node->nd_local);
page              304 fs/ocfs2/cluster/nodemanager.c 	char *p = (char *)page;
page              415 fs/ocfs2/cluster/nodemanager.c 		ret = o2nm_node_attr->show(node, page);
page              437 fs/ocfs2/cluster/nodemanager.c 	ret = o2nm_node_attr->store(node, page, count);
page              484 fs/ocfs2/cluster/nodemanager.c 	char *p = (char *)page;
page              503 fs/ocfs2/cluster/nodemanager.c 	return sprintf(page, "%u\n", cluster->cl_idle_timeout_ms);
page              512 fs/ocfs2/cluster/nodemanager.c 	ret =  o2nm_cluster_attr_write(page, count, &val);
page              538 fs/ocfs2/cluster/nodemanager.c 	return sprintf(page, "%u\n", cluster->cl_keepalive_delay_ms);
page              547 fs/ocfs2/cluster/nodemanager.c 	ret =  o2nm_cluster_attr_write(page, count, &val);
page              573 fs/ocfs2/cluster/nodemanager.c 	return sprintf(page, "%u\n", cluster->cl_reconnect_delay_ms);
page              579 fs/ocfs2/cluster/nodemanager.c 	return o2nm_cluster_attr_write(page, count,
page              622 fs/ocfs2/cluster/nodemanager.c 		ret = o2nm_cluster_attr->show(cluster, page);
page              640 fs/ocfs2/cluster/nodemanager.c 	ret = o2nm_cluster_attr->store(cluster, page, count);
page              378 fs/ocfs2/cluster/tcp.c 	struct page *page = NULL;
page              380 fs/ocfs2/cluster/tcp.c 	page = alloc_page(GFP_NOFS);
page              382 fs/ocfs2/cluster/tcp.c 	if (sc == NULL || page == NULL)
page              401 fs/ocfs2/cluster/tcp.c 	sc->sc_page = page;
page              404 fs/ocfs2/cluster/tcp.c 	page = NULL;
page              407 fs/ocfs2/cluster/tcp.c 	if (page)
page              408 fs/ocfs2/cluster/tcp.c 		__free_page(page);
page              163 fs/ocfs2/cluster/tcp_internal.h 	struct page 		*sc_page;
page             2239 fs/ocfs2/dlmglue.c 		unlock_page(page);
page              688 fs/ocfs2/file.c 	struct page *page;
page              704 fs/ocfs2/file.c 	page = grab_cache_page(mapping, index);
page              705 fs/ocfs2/file.c 	if (!page) {
page              711 fs/ocfs2/file.c 	ret = ocfs2_prepare_write_nolock(inode, page, offset, offset);
page              718 fs/ocfs2/file.c 		handle = ocfs2_start_walk_page_trans(inode, page, offset,
page              728 fs/ocfs2/file.c 	ret = block_commit_write(page, offset, offset);
page              737 fs/ocfs2/file.c 	unlock_page(page);
page              738 fs/ocfs2/file.c 	page_cache_release(page);
page               83 fs/ocfs2/mmap.c 	mlog_exit_ptr(vmf->page);
page               92 fs/ocfs2/mmap.c 	loff_t pos = page_offset(page);
page               95 fs/ocfs2/mmap.c 	struct page *locked_page = NULL;
page              104 fs/ocfs2/mmap.c 	if (page->index > last_index) {
page              115 fs/ocfs2/mmap.c 	if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
page              130 fs/ocfs2/mmap.c 	if (page->index == last_index)
page              134 fs/ocfs2/mmap.c 				       &fsdata, di_bh, page);
page              184 fs/ocfs2/mmap.c 	ret = __ocfs2_page_mkwrite(inode, di_bh, page);
page               66 fs/ocfs2/symlink.c 	struct page * page;
page               68 fs/ocfs2/symlink.c 	page = read_mapping_page(mapping, 0, NULL);
page               69 fs/ocfs2/symlink.c 	if (IS_ERR(page))
page               71 fs/ocfs2/symlink.c 	*ppage = page;
page               72 fs/ocfs2/symlink.c 	return kmap(page);
page               75 fs/ocfs2/symlink.c 	return (char*)page;
page              137 fs/ocfs2/symlink.c 	struct page *page = NULL;
page              143 fs/ocfs2/symlink.c 		link = ocfs2_page_getlink(dentry, &page);
page              153 fs/ocfs2/symlink.c 	if (page) {
page              154 fs/ocfs2/symlink.c 		kunmap(page);
page              155 fs/ocfs2/symlink.c 		page_cache_release(page);
page              306 fs/omfs/file.c 	return block_read_full_page(page, omfs_get_block);
page              317 fs/omfs/file.c 	return block_write_full_page(page, omfs_get_block, wbc);
page              564 fs/partitions/check.c 	struct page *page;
page              566 fs/partitions/check.c 	page = read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
page              568 fs/partitions/check.c 	if (!IS_ERR(page)) {
page              569 fs/partitions/check.c 		if (PageError(page))
page              571 fs/partitions/check.c 		p->v = page;
page              572 fs/partitions/check.c 		return (unsigned char *)page_address(page) +  ((n & ((1 << (PAGE_CACHE_SHIFT - 9)) - 1)) << 9);
page              574 fs/partitions/check.c 		page_cache_release(page);
page              154 fs/pipe.c      	struct page *page = buf->page;
page              161 fs/pipe.c      	if (page_count(page) == 1 && !pipe->tmp_page)
page              162 fs/pipe.c      		pipe->tmp_page = page;
page              164 fs/pipe.c      		page_cache_release(page);
page              186 fs/pipe.c      		return kmap_atomic(buf->page, KM_USER0);
page              189 fs/pipe.c      	return kmap(buf->page);
page              208 fs/pipe.c      		kunmap(buf->page);
page              226 fs/pipe.c      	struct page *page = buf->page;
page              233 fs/pipe.c      	if (page_count(page) == 1) {
page              234 fs/pipe.c      		lock_page(page);
page              253 fs/pipe.c      	page_cache_get(buf->page);
page              476 fs/pipe.c      			struct page *page = pipe->tmp_page;
page              480 fs/pipe.c      			if (!page) {
page              481 fs/pipe.c      				page = alloc_page(GFP_HIGHUSER);
page              482 fs/pipe.c      				if (unlikely(!page)) {
page              486 fs/pipe.c      				pipe->tmp_page = page;
page              501 fs/pipe.c      				src = kmap_atomic(page, KM_USER0);
page              503 fs/pipe.c      				src = kmap(page);
page              510 fs/pipe.c      				kunmap(page);
page              524 fs/pipe.c      			buf->page = page;
page              697 fs/proc/base.c 	unsigned long page;
page              709 fs/proc/base.c 	if (!(page = __get_free_page(GFP_TEMPORARY)))
page              712 fs/proc/base.c 	length = PROC_I(inode)->op.proc_read(task, (char*)page);
page              715 fs/proc/base.c 		length = simple_read_from_buffer(buf, count, ppos, (char *)page, length);
page              716 fs/proc/base.c 	free_page(page);
page              776 fs/proc/base.c 	char *page;
page              788 fs/proc/base.c 	page = (char *)__get_free_page(GFP_TEMPORARY);
page              789 fs/proc/base.c 	if (!page)
page              809 fs/proc/base.c 		retval = access_process_vm(task, src, page, this_len, 0);
page              816 fs/proc/base.c 		if (copy_to_user(buf, page, retval)) {
page              831 fs/proc/base.c 	free_page((unsigned long) page);
page              846 fs/proc/base.c 	char *page;
page              858 fs/proc/base.c 	page = (char *)__get_free_page(GFP_TEMPORARY);
page              859 fs/proc/base.c 	if (!page)
page              867 fs/proc/base.c 		if (copy_from_user(page, buf, this_len)) {
page              871 fs/proc/base.c 		retval = access_process_vm(task, dst, page, this_len, 1);
page              883 fs/proc/base.c 	free_page((unsigned long) page);
page              918 fs/proc/base.c 	char *page;
page              930 fs/proc/base.c 	page = (char *)__get_free_page(GFP_TEMPORARY);
page              931 fs/proc/base.c 	if (!page)
page              952 fs/proc/base.c 			page, this_len, 0);
page              959 fs/proc/base.c 		if (copy_to_user(buf, page, retval)) {
page              973 fs/proc/base.c 	free_page((unsigned long) page);
page             1061 fs/proc/base.c 	char *page, *tmp;
page             1078 fs/proc/base.c 	page = (char*)__get_free_page(GFP_TEMPORARY);
page             1079 fs/proc/base.c 	if (!page)
page             1082 fs/proc/base.c 	if (copy_from_user(page, buf, count))
page             1085 fs/proc/base.c 	page[count] = '\0';
page             1086 fs/proc/base.c 	loginuid = simple_strtoul(page, &tmp, 10);
page             1087 fs/proc/base.c 	if (tmp == page) {
page             1097 fs/proc/base.c 	free_page((unsigned long) page);
page             2084 fs/proc/base.c 	char *page;
page             2100 fs/proc/base.c 	page = (char*)__get_free_page(GFP_TEMPORARY);
page             2101 fs/proc/base.c 	if (!page)
page             2105 fs/proc/base.c 	if (copy_from_user(page, buf, count))
page             2110 fs/proc/base.c 				      (void*)page, count);
page             2112 fs/proc/base.c 	free_page((unsigned long) page);
page               45 fs/proc/generic.c 	char 	*page;
page               65 fs/proc/generic.c 	if (!(page = (char*) __get_free_page(GFP_TEMPORARY)))
page              120 fs/proc/generic.c 			n = dp->read_proc(page, &start, *ppos,
page              144 fs/proc/generic.c 			start = page + *ppos;
page              145 fs/proc/generic.c 		} else if (start < page) {
page              160 fs/proc/generic.c 			unsigned long startoff = (unsigned long)(start - page);
page              170 fs/proc/generic.c  		n -= copy_to_user(buf, start < page ? page : start, n);
page              177 fs/proc/generic.c 		*ppos += start < page ? (unsigned long)start : n;
page              182 fs/proc/generic.c 	free_page((unsigned long) page);
page               41 fs/proc/proc_devtree.c 	memcpy(page, (char *)pp->value + off, n);
page               42 fs/proc/proc_devtree.c 	*start = page;
page               76 fs/proc/proc_misc.c 	*start = page + off;
page               97 fs/proc/proc_misc.c 	len = sprintf(page,"%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
page              103 fs/proc/proc_misc.c 	return proc_calc_metrics(page, start, off, count, eof, len);
page              117 fs/proc/proc_misc.c 	len = sprintf(page,"%lu.%02lu %lu.%02lu\n",
page              123 fs/proc/proc_misc.c 	return proc_calc_metrics(page, start, off, count, eof, len);
page              161 fs/proc/proc_misc.c 	len = sprintf(page,
page              233 fs/proc/proc_misc.c 		len += hugetlb_report_meminfo(page + len);
page              235 fs/proc/proc_misc.c 	len += arch_report_meminfo(page + len);
page              237 fs/proc/proc_misc.c 	return proc_calc_metrics(page, start, off, count, eof, len);
page              283 fs/proc/proc_misc.c 	len = snprintf(page, PAGE_SIZE, linux_proc_banner,
page              287 fs/proc/proc_misc.c 	return proc_calc_metrics(page, start, off, count, eof, len);
page              377 fs/proc/proc_misc.c 	int len = get_hardware_list(page);
page              378 fs/proc/proc_misc.c 	return proc_calc_metrics(page, start, off, count, eof, len);
page              386 fs/proc/proc_misc.c 	int len = get_stram_list(page);
page              387 fs/proc/proc_misc.c 	return proc_calc_metrics(page, start, off, count, eof, len);
page              673 fs/proc/proc_misc.c 	int len = get_filesystem_list(page);
page              674 fs/proc/proc_misc.c 	return proc_calc_metrics(page, start, off, count, eof, len);
page              682 fs/proc/proc_misc.c 	len = sprintf(page, "%s\n", saved_command_line);
page              683 fs/proc/proc_misc.c 	return proc_calc_metrics(page, start, off, count, eof, len);
page              703 fs/proc/proc_misc.c 	int len = get_exec_domain_list(page);
page              704 fs/proc/proc_misc.c 	return proc_calc_metrics(page, start, off, count, eof, len);
page              741 fs/proc/proc_misc.c 	struct page *ppage;
page              807 fs/proc/proc_misc.c 	struct page *ppage;
page              321 fs/proc/task_mmu.c 	struct page *page;
page              338 fs/proc/task_mmu.c 		page = vm_normal_page(vma, addr, ptent);
page              339 fs/proc/task_mmu.c 		if (!page)
page              343 fs/proc/task_mmu.c 		if (pte_young(ptent) || PageReferenced(page))
page              345 fs/proc/task_mmu.c 		mapcount = page_mapcount(page);
page              433 fs/proc/task_mmu.c 	struct page *page;
page              441 fs/proc/task_mmu.c 		page = vm_normal_page(vma, addr, ptent);
page              442 fs/proc/task_mmu.c 		if (!page)
page              447 fs/proc/task_mmu.c 		ClearPageReferenced(page);
page              630 fs/proc/task_mmu.c 	struct page **pages, *page;
page              666 fs/proc/task_mmu.c 	pages = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
page              718 fs/proc/task_mmu.c 		page = pages[pagecount-1];
page              719 fs/proc/task_mmu.c 		if (!PageReserved(page))
page              720 fs/proc/task_mmu.c 			SetPageDirty(page);
page              721 fs/proc/task_mmu.c 		page_cache_release(page);
page              435 fs/qnx4/inode.c 	return block_write_full_page(page,qnx4_get_block, wbc);
page              440 fs/qnx4/inode.c 	return block_read_full_page(page,qnx4_get_block);
page               65 fs/ramfs/file-nommu.c 	struct page *pages;
page              108 fs/ramfs/file-nommu.c 		struct page *page = pages + loop;
page              110 fs/ramfs/file-nommu.c 		ret = add_to_page_cache(page, inode->i_mapping, loop, GFP_KERNEL);
page              114 fs/ramfs/file-nommu.c 		if (!pagevec_add(&lru_pvec, page))
page              117 fs/ramfs/file-nommu.c 		unlock_page(page);
page              243 fs/ramfs/file-nommu.c 	struct page **pages = NULL, **ptr, *page;
page              263 fs/ramfs/file-nommu.c 	pages = kzalloc(lpages * sizeof(struct page *), GFP_KERNEL);
page              273 fs/ramfs/file-nommu.c 	page = *ptr++;
page              274 fs/ramfs/file-nommu.c 	page++;
page              276 fs/ramfs/file-nommu.c 		if (*ptr++ != page++)
page              182 fs/reiserfs/file.c 	for (bh = head = page_buffers(page), block_start = 0;
page              203 fs/reiserfs/file.c 				    (new || page->index >= i_size_index)) {
page              221 fs/reiserfs/file.c 		SetPageUptodate(page);
page              162 fs/reiserfs/inode.c 	if (page && page_has_buffers(page)) {
page              163 fs/reiserfs/inode.c 		head = page_buffers(page);
page              520 fs/reiserfs/inode.c 	struct page *tail_page;
page              521 fs/reiserfs/inode.c 	struct page *hole_page = bh_result->b_page;
page             2012 fs/reiserfs/inode.c 	struct page *page;
page             2023 fs/reiserfs/inode.c 	page = grab_cache_page(p_s_inode->i_mapping, index);
page             2025 fs/reiserfs/inode.c 	if (!page) {
page             2031 fs/reiserfs/inode.c 	error = block_prepare_write(page, start, offset,
page             2036 fs/reiserfs/inode.c 	head = page_buffers(page);
page             2060 fs/reiserfs/inode.c 	*page_result = page;
page             2066 fs/reiserfs/inode.c 	unlock_page(page);
page             2067 fs/reiserfs/inode.c 	page_cache_release(page);
page             2084 fs/reiserfs/inode.c 	struct page *page = NULL;
page             2092 fs/reiserfs/inode.c 		if ((error = grab_tail_page(p_s_inode, &page, &bh))) {
page             2100 fs/reiserfs/inode.c 			page = NULL;
page             2125 fs/reiserfs/inode.c 	err2 = reiserfs_do_truncate(&th, p_s_inode, page, update_timestamps);
page             2143 fs/reiserfs/inode.c 	if (page) {
page             2148 fs/reiserfs/inode.c 			zero_user(page, offset, length);
page             2153 fs/reiserfs/inode.c 		unlock_page(page);
page             2154 fs/reiserfs/inode.c 		page_cache_release(page);
page             2160 fs/reiserfs/inode.c 	if (page) {
page             2161 fs/reiserfs/inode.c 		unlock_page(page);
page             2162 fs/reiserfs/inode.c 		page_cache_release(page);
page             2328 fs/reiserfs/inode.c 	struct inode *inode = page->mapping->host;
page             2336 fs/reiserfs/inode.c 	int checked = PageChecked(page);
page             2344 fs/reiserfs/inode.c 		redirty_page_for_writepage(wbc, page);
page             2345 fs/reiserfs/inode.c 		unlock_page(page);
page             2354 fs/reiserfs/inode.c 	if (!page_has_buffers(page)) {
page             2355 fs/reiserfs/inode.c 		create_empty_buffers(page, s->s_blocksize,
page             2358 fs/reiserfs/inode.c 	head = page_buffers(page);
page             2363 fs/reiserfs/inode.c 	if (page->index >= end_index) {
page             2368 fs/reiserfs/inode.c 		if (page->index >= end_index + 1 || !last_offset) {
page             2369 fs/reiserfs/inode.c 			unlock_page(page);
page             2372 fs/reiserfs/inode.c 		zero_user_segment(page, last_offset, PAGE_CACHE_SIZE);
page             2375 fs/reiserfs/inode.c 	block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits);
page             2410 fs/reiserfs/inode.c 		ClearPageChecked(page);
page             2439 fs/reiserfs/inode.c 				redirty_page_for_writepage(wbc, page);
page             2456 fs/reiserfs/inode.c 	BUG_ON(PageWriteback(page));
page             2457 fs/reiserfs/inode.c 	set_page_writeback(page);
page             2458 fs/reiserfs/inode.c 	unlock_page(page);
page             2493 fs/reiserfs/inode.c 			SetPageUptodate(page);
page             2494 fs/reiserfs/inode.c 		end_page_writeback(page);
page             2503 fs/reiserfs/inode.c 	ClearPageUptodate(page);
page             2519 fs/reiserfs/inode.c 	SetPageError(page);
page             2520 fs/reiserfs/inode.c 	BUG_ON(PageWriteback(page));
page             2521 fs/reiserfs/inode.c 	set_page_writeback(page);
page             2522 fs/reiserfs/inode.c 	unlock_page(page);
page             2538 fs/reiserfs/inode.c 	return block_read_full_page(page, reiserfs_get_block);
page             2543 fs/reiserfs/inode.c 	struct inode *inode = page->mapping->host;
page             2545 fs/reiserfs/inode.c 	return reiserfs_write_full_page(page, wbc);
page             2554 fs/reiserfs/inode.c 	struct page *page;
page             2568 fs/reiserfs/inode.c 	page = __grab_cache_page(mapping, index);
page             2569 fs/reiserfs/inode.c 	if (!page)
page             2571 fs/reiserfs/inode.c 	*pagep = page;
page             2574 fs/reiserfs/inode.c 	fix_tail_page_for_writing(page);
page             2612 fs/reiserfs/inode.c 		unlock_page(page);
page             2613 fs/reiserfs/inode.c 		page_cache_release(page);
page             2621 fs/reiserfs/inode.c 	struct inode *inode = page->mapping->host;
page             2626 fs/reiserfs/inode.c 	fix_tail_page_for_writing(page);
page             2637 fs/reiserfs/inode.c 	ret = block_prepare_write(page, from, to, reiserfs_get_block);
page             2676 fs/reiserfs/inode.c 	struct inode *inode = page->mapping->host;
page             2693 fs/reiserfs/inode.c 		if (!PageUptodate(page))
page             2696 fs/reiserfs/inode.c 		page_zero_new_buffers(page, start + copied, start + len);
page             2698 fs/reiserfs/inode.c 	flush_dcache_page(page);
page             2700 fs/reiserfs/inode.c 	reiserfs_commit_page(inode, page, start, start + copied);
page             2750 fs/reiserfs/inode.c 	unlock_page(page);
page             2751 fs/reiserfs/inode.c 	page_cache_release(page);
page             2769 fs/reiserfs/inode.c 	struct inode *inode = page->mapping->host;
page             2770 fs/reiserfs/inode.c 	loff_t pos = ((loff_t) page->index << PAGE_CACHE_SHIFT) + to;
page             2779 fs/reiserfs/inode.c 	reiserfs_commit_page(inode, page, from, to);
page             2948 fs/reiserfs/inode.c 	struct inode *inode = page->mapping->host;
page             2952 fs/reiserfs/inode.c 	BUG_ON(!PageLocked(page));
page             2955 fs/reiserfs/inode.c 		ClearPageChecked(page);
page             2957 fs/reiserfs/inode.c 	if (!page_has_buffers(page))
page             2960 fs/reiserfs/inode.c 	head = page_buffers(page);
page             2985 fs/reiserfs/inode.c 		ret = try_to_release_page(page, 0);
page             2994 fs/reiserfs/inode.c 	struct inode *inode = page->mapping->host;
page             2996 fs/reiserfs/inode.c 		SetPageChecked(page);
page             2997 fs/reiserfs/inode.c 		return __set_page_dirty_nobuffers(page);
page             2999 fs/reiserfs/inode.c 	return __set_page_dirty_buffers(page);
page             3013 fs/reiserfs/inode.c 	struct inode *inode = page->mapping->host;
page             3019 fs/reiserfs/inode.c 	WARN_ON(PageChecked(page));
page             3021 fs/reiserfs/inode.c 	head = page_buffers(page);
page             3035 fs/reiserfs/inode.c 		ret = try_to_free_buffers(page);
page              164 fs/reiserfs/ioctl.c 	struct page *page;
page              197 fs/reiserfs/ioctl.c 	page = grab_cache_page(mapping, index);
page              199 fs/reiserfs/ioctl.c 	if (!page) {
page              202 fs/reiserfs/ioctl.c 	retval = reiserfs_prepare_write(NULL, page, write_from, write_from);
page              207 fs/reiserfs/ioctl.c 	flush_dcache_page(page);
page              208 fs/reiserfs/ioctl.c 	retval = reiserfs_commit_write(NULL, page, write_from, write_from);
page              212 fs/reiserfs/ioctl.c 	unlock_page(page);
page              213 fs/reiserfs/ioctl.c 	page_cache_release(page);
page              629 fs/reiserfs/journal.c 	struct page *page = bh->b_page;
page              630 fs/reiserfs/journal.c 	if (!page->mapping && trylock_page(page)) {
page              631 fs/reiserfs/journal.c 		page_cache_get(page);
page              633 fs/reiserfs/journal.c 		if (!page->mapping)
page              634 fs/reiserfs/journal.c 			try_to_free_buffers(page);
page              635 fs/reiserfs/journal.c 		unlock_page(page);
page              636 fs/reiserfs/journal.c 		page_cache_release(page);
page             4117 fs/reiserfs/journal.c 			struct page *page;
page             4125 fs/reiserfs/journal.c 			page = cn->bh->b_page;
page             4126 fs/reiserfs/journal.c 			addr = kmap(page);
page             4130 fs/reiserfs/journal.c 			kunmap(page);
page             1439 fs/reiserfs/stree.c 	if (page) {
page             1440 fs/reiserfs/stree.c 		if (page_has_buffers(page)) {
page             1443 fs/reiserfs/stree.c 			head = page_buffers(page);
page             1483 fs/reiserfs/stree.c 	    !page || (REISERFS_I(p_s_inode)->i_flags & i_nopack_mask)) {
page             1493 fs/reiserfs/stree.c 	return indirect2direct(th, p_s_inode, page, p_s_path, p_s_item_key,
page             1588 fs/reiserfs/stree.c 			    maybe_indirect_to_direct(th, p_s_inode, page,
page             1729 fs/reiserfs/stree.c 		unmap_buffers(page, tail_pos);
page             1851 fs/reiserfs/stree.c 					   p_s_inode, page, n_new_file_size);
page              211 fs/reiserfs/tail_conversion.c 	tail = (char *)kmap(page);	/* this can schedule */
page              257 fs/reiserfs/tail_conversion.c 		kunmap(page);
page              260 fs/reiserfs/tail_conversion.c 	kunmap(page);
page              374 fs/reiserfs/xattr.c 	kunmap(page);
page              375 fs/reiserfs/xattr.c 	page_cache_release(page);
page              378 fs/reiserfs/xattr.c static struct page *reiserfs_get_page(struct inode *dir, unsigned long n)
page              381 fs/reiserfs/xattr.c 	struct page *page;
page              385 fs/reiserfs/xattr.c 	page = read_mapping_page(mapping, n, NULL);
page              386 fs/reiserfs/xattr.c 	if (!IS_ERR(page)) {
page              387 fs/reiserfs/xattr.c 		kmap(page);
page              388 fs/reiserfs/xattr.c 		if (PageError(page))
page              391 fs/reiserfs/xattr.c 	return page;
page              394 fs/reiserfs/xattr.c 	reiserfs_put_page(page);
page              420 fs/reiserfs/xattr.c 	struct page *page;
page              476 fs/reiserfs/xattr.c 		page = reiserfs_get_page(xinode, file_pos >> PAGE_CACHE_SHIFT);
page              477 fs/reiserfs/xattr.c 		if (IS_ERR(page)) {
page              478 fs/reiserfs/xattr.c 			err = PTR_ERR(page);
page              482 fs/reiserfs/xattr.c 		lock_page(page);
page              483 fs/reiserfs/xattr.c 		data = page_address(page);
page              495 fs/reiserfs/xattr.c 		err = reiserfs_prepare_write(NULL, page, page_offset,
page              500 fs/reiserfs/xattr.c 			err = reiserfs_commit_write(NULL, page, page_offset,
page              504 fs/reiserfs/xattr.c 		unlock_page(page);
page              505 fs/reiserfs/xattr.c 		reiserfs_put_page(page);
page              542 fs/reiserfs/xattr.c 	struct page *page;
page              584 fs/reiserfs/xattr.c 		page = reiserfs_get_page(xinode, file_pos >> PAGE_CACHE_SHIFT);
page              585 fs/reiserfs/xattr.c 		if (IS_ERR(page)) {
page              586 fs/reiserfs/xattr.c 			err = PTR_ERR(page);
page              590 fs/reiserfs/xattr.c 		lock_page(page);
page              591 fs/reiserfs/xattr.c 		data = page_address(page);
page              599 fs/reiserfs/xattr.c 				unlock_page(page);
page              600 fs/reiserfs/xattr.c 				reiserfs_put_page(page);
page              611 fs/reiserfs/xattr.c 		unlock_page(page);
page              612 fs/reiserfs/xattr.c 		reiserfs_put_page(page);
page              420 fs/romfs/inode.c 	struct inode *inode = page->mapping->host;
page              426 fs/romfs/inode.c 	page_cache_get(page);
page              428 fs/romfs/inode.c 	buf = kmap(page);
page              433 fs/romfs/inode.c 	offset = page_offset(page);
page              446 fs/romfs/inode.c 			SetPageError(page);
page              456 fs/romfs/inode.c 		SetPageUptodate(page);
page              457 fs/romfs/inode.c 	flush_dcache_page(page);
page              459 fs/romfs/inode.c 	unlock_page(page);
page              461 fs/romfs/inode.c 	kunmap(page);
page              463 fs/romfs/inode.c 	page_cache_release(page);
page               34 fs/smbfs/cache.c 	struct page *page = NULL;
page               36 fs/smbfs/cache.c 	page = grab_cache_page(&dir->i_data, 0);
page               37 fs/smbfs/cache.c 	if (!page)
page               40 fs/smbfs/cache.c 	if (!PageUptodate(page))
page               43 fs/smbfs/cache.c 	cache = kmap(page);
page               46 fs/smbfs/cache.c 	kunmap(page);
page               47 fs/smbfs/cache.c 	SetPageUptodate(page);
page               49 fs/smbfs/cache.c 	unlock_page(page);
page               50 fs/smbfs/cache.c 	page_cache_release(page);
page              172 fs/smbfs/cache.c 		if (ctl.page) {
page              173 fs/smbfs/cache.c 			kunmap(ctl.page);
page              174 fs/smbfs/cache.c 			SetPageUptodate(ctl.page);
page              175 fs/smbfs/cache.c 			unlock_page(ctl.page);
page              176 fs/smbfs/cache.c 			page_cache_release(ctl.page);
page              181 fs/smbfs/cache.c 		ctl.page  = grab_cache_page(&inode->i_data, ctl.ofs);
page              182 fs/smbfs/cache.c 		if (ctl.page)
page              183 fs/smbfs/cache.c 			ctl.cache = kmap(ctl.page);
page               87 fs/smbfs/dir.c 	struct page *page = NULL;
page               90 fs/smbfs/dir.c 	ctl.page  = NULL;
page              120 fs/smbfs/dir.c 	page = grab_cache_page(&dir->i_data, 0);
page              121 fs/smbfs/dir.c 	if (!page)
page              124 fs/smbfs/dir.c 	ctl.cache = cache = kmap(page);
page              127 fs/smbfs/dir.c 	if (!PageUptodate(page) || !ctl.head.eof) {
page              129 fs/smbfs/dir.c 			 DENTRY_PATH(dentry), PageUptodate(page),ctl.head.eof);
page              157 fs/smbfs/dir.c 			ctl.page = find_lock_page(&dir->i_data, ctl.ofs);
page              158 fs/smbfs/dir.c 			if (!ctl.page)
page              160 fs/smbfs/dir.c 			ctl.cache = kmap(ctl.page);
page              161 fs/smbfs/dir.c 			if (!PageUptodate(ctl.page))
page              184 fs/smbfs/dir.c 		if (ctl.page) {
page              185 fs/smbfs/dir.c 			kunmap(ctl.page);
page              186 fs/smbfs/dir.c 			SetPageUptodate(ctl.page);
page              187 fs/smbfs/dir.c 			unlock_page(ctl.page);
page              188 fs/smbfs/dir.c 			page_cache_release(ctl.page);
page              189 fs/smbfs/dir.c 			ctl.page = NULL;
page              195 fs/smbfs/dir.c 	if (ctl.page) {
page              196 fs/smbfs/dir.c 		kunmap(ctl.page);
page              197 fs/smbfs/dir.c 		unlock_page(ctl.page);
page              198 fs/smbfs/dir.c 		page_cache_release(ctl.page);
page              199 fs/smbfs/dir.c 		ctl.page = NULL;
page              213 fs/smbfs/dir.c 	if (result == -ERESTARTSYS && page)
page              214 fs/smbfs/dir.c 		ClearPageUptodate(page);
page              220 fs/smbfs/dir.c 	if (page) {
page              222 fs/smbfs/dir.c 		kunmap(page);
page              224 fs/smbfs/dir.c 			SetPageUptodate(page);
page              225 fs/smbfs/dir.c 		unlock_page(page);
page              226 fs/smbfs/dir.c 		page_cache_release(page);
page              228 fs/smbfs/dir.c 	if (ctl.page) {
page              229 fs/smbfs/dir.c 		kunmap(ctl.page);
page              230 fs/smbfs/dir.c 		SetPageUptodate(ctl.page);
page              231 fs/smbfs/dir.c 		unlock_page(ctl.page);
page              232 fs/smbfs/dir.c 		page_cache_release(ctl.page);
page               57 fs/smbfs/file.c 	char *buffer = kmap(page);
page               58 fs/smbfs/file.c 	loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
page               89 fs/smbfs/file.c 	flush_dcache_page(page);
page               90 fs/smbfs/file.c 	SetPageUptodate(page);
page               94 fs/smbfs/file.c 	kunmap(page);
page               95 fs/smbfs/file.c 	unlock_page(page);
page              108 fs/smbfs/file.c 	page_cache_get(page);
page              109 fs/smbfs/file.c 	error = smb_readpage_sync(dentry, page);
page              110 fs/smbfs/file.c 	page_cache_release(page);
page              123 fs/smbfs/file.c 	char *buffer = kmap(page) + pageoffset;
page              128 fs/smbfs/file.c 	offset = ((loff_t)page->index << PAGE_CACHE_SHIFT) + pageoffset;
page              163 fs/smbfs/file.c 	kunmap(page);
page              176 fs/smbfs/file.c 	struct address_space *mapping = page->mapping;
page              189 fs/smbfs/file.c 	if (page->index < end_index)
page              194 fs/smbfs/file.c 	if (page->index >= end_index+1 || !offset)
page              197 fs/smbfs/file.c 	page_cache_get(page);
page              198 fs/smbfs/file.c 	err = smb_writepage_sync(inode, page, 0, offset);
page              199 fs/smbfs/file.c 	SetPageUptodate(page);
page              200 fs/smbfs/file.c 	unlock_page(page);
page              201 fs/smbfs/file.c 	page_cache_release(page);
page              212 fs/smbfs/file.c 		((unsigned long long)page->index << PAGE_CACHE_SHIFT) + offset);
page              214 fs/smbfs/file.c 	return smb_writepage_sync(dentry->d_inode, page, offset, count);
page              314 fs/smbfs/file.c 	status = smb_updatepage(file, page, offset, copied);
page              318 fs/smbfs/file.c 		if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
page              319 fs/smbfs/file.c 			SetPageUptodate(page);
page              323 fs/smbfs/file.c 	unlock_page(page);
page              324 fs/smbfs/file.c 	page_cache_release(page);
page               42 fs/splice.c    	struct page *page = buf->page;
page               45 fs/splice.c    	lock_page(page);
page               47 fs/splice.c    	mapping = page_mapping(page);
page               49 fs/splice.c    		WARN_ON(!PageUptodate(page));
page               59 fs/splice.c    		wait_on_page_writeback(page);
page               61 fs/splice.c    		if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL))
page               68 fs/splice.c    		if (remove_mapping(mapping, page)) {
page               79 fs/splice.c    	unlock_page(page);
page               86 fs/splice.c    	page_cache_release(buf->page);
page               97 fs/splice.c    	struct page *page = buf->page;
page              100 fs/splice.c    	if (!PageUptodate(page)) {
page              101 fs/splice.c    		lock_page(page);
page              107 fs/splice.c    		if (!page->mapping) {
page              115 fs/splice.c    		if (!PageUptodate(page)) {
page              123 fs/splice.c    		unlock_page(page);
page              128 fs/splice.c    	unlock_page(page);
page              198 fs/splice.c    			buf->page = spd->pages[page_nr];
page              275 fs/splice.c    	struct page *pages[PIPE_BUFFERS];
page              277 fs/splice.c    	struct page *page;
page              314 fs/splice.c    		page = find_get_page(mapping, index);
page              315 fs/splice.c    		if (!page) {
page              319 fs/splice.c    			page = page_cache_alloc_cold(mapping);
page              320 fs/splice.c    			if (!page)
page              323 fs/splice.c    			error = add_to_page_cache_lru(page, mapping, index,
page              326 fs/splice.c    				page_cache_release(page);
page              335 fs/splice.c    			unlock_page(page);
page              338 fs/splice.c    		pages[spd.nr_pages++] = page;
page              359 fs/splice.c    		page = pages[page_nr];
page              361 fs/splice.c    		if (PageReadahead(page))
page              363 fs/splice.c    					page, index, req_pages - page_nr);
page              368 fs/splice.c    		if (!PageUptodate(page)) {
page              374 fs/splice.c    				if (!trylock_page(page)) {
page              379 fs/splice.c    				lock_page(page);
page              387 fs/splice.c    			if (!page->mapping) {
page              388 fs/splice.c    				unlock_page(page);
page              389 fs/splice.c    				page = find_or_create_page(mapping, index,
page              392 fs/splice.c    				if (!page) {
page              397 fs/splice.c    				pages[page_nr] = page;
page              402 fs/splice.c    			if (PageUptodate(page)) {
page              403 fs/splice.c    				unlock_page(page);
page              410 fs/splice.c    			error = mapping->a_ops->readpage(in, page);
page              529 fs/splice.c    		ret = file->f_op->sendpage(file, buf->page, buf->offset,
page              562 fs/splice.c    	struct page *page;
page              580 fs/splice.c    				AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata);
page              584 fs/splice.c    	if (buf->page != page) {
page              589 fs/splice.c    		char *dst = kmap_atomic(page, KM_USER1);
page              592 fs/splice.c    		flush_dcache_page(page);
page              597 fs/splice.c    				page, fsdata);
page             1399 fs/splice.c    	struct page *pages[PIPE_BUFFERS];
page               47 fs/sysfs/file.c 	char			* page;
page               74 fs/sysfs/file.c 	if (!buffer->page)
page               75 fs/sysfs/file.c 		buffer->page = (char *) get_zeroed_page(GFP_KERNEL);
page               76 fs/sysfs/file.c 	if (!buffer->page)
page               84 fs/sysfs/file.c 	count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
page              139 fs/sysfs/file.c 		 __func__, count, *ppos, buffer->page);
page              140 fs/sysfs/file.c 	retval = simple_read_from_buffer(buf, count, ppos, buffer->page,
page              162 fs/sysfs/file.c 	if (!buffer->page)
page              163 fs/sysfs/file.c 		buffer->page = (char *)get_zeroed_page(GFP_KERNEL);
page              164 fs/sysfs/file.c 	if (!buffer->page)
page              169 fs/sysfs/file.c 	error = copy_from_user(buffer->page,buf,count);
page              173 fs/sysfs/file.c 	buffer->page[count] = 0;
page              201 fs/sysfs/file.c 	rc = ops->store(kobj, attr_sd->s_attr.attr, buffer->page, count);
page              399 fs/sysfs/file.c 	if (buffer->page)
page              400 fs/sysfs/file.c 		free_page((unsigned long)buffer->page);
page              194 fs/sysfs/symlink.c 	unsigned long page = get_zeroed_page(GFP_KERNEL);
page              195 fs/sysfs/symlink.c 	if (page)
page              196 fs/sysfs/symlink.c 		error = sysfs_getlink(dentry, (char *) page); 
page              197 fs/sysfs/symlink.c 	nd_set_link(nd, error ? ERR_PTR(error) : (char *)page);
page              203 fs/sysfs/symlink.c 	char *page = nd_get_link(nd);
page              204 fs/sysfs/symlink.c 	if (!IS_ERR(page))
page              205 fs/sysfs/symlink.c 		free_page((unsigned long)page);
page               32 fs/sysv/dir.c  	kunmap(page);
page               33 fs/sysv/dir.c  	page_cache_release(page);
page               43 fs/sysv/dir.c  	struct address_space *mapping = page->mapping;
page               47 fs/sysv/dir.c  	block_write_end(NULL, mapping, pos, len, len, page, NULL);
page               53 fs/sysv/dir.c  		err = write_one_page(page, 1);
page               55 fs/sysv/dir.c  		unlock_page(page);
page               59 fs/sysv/dir.c  static struct page * dir_get_page(struct inode *dir, unsigned long n)
page               62 fs/sysv/dir.c  	struct page *page = read_mapping_page(mapping, n, NULL);
page               63 fs/sysv/dir.c  	if (!IS_ERR(page))
page               64 fs/sysv/dir.c  		kmap(page);
page               65 fs/sysv/dir.c  	return page;
page               86 fs/sysv/dir.c  		struct page *page = dir_get_page(inode, n);
page               88 fs/sysv/dir.c  		if (IS_ERR(page))
page               90 fs/sysv/dir.c  		kaddr = (char *)page_address(page);
page              107 fs/sysv/dir.c  				dir_put_page(page);
page              111 fs/sysv/dir.c  		dir_put_page(page);
page              146 fs/sysv/dir.c  	struct page *page = NULL;
page              158 fs/sysv/dir.c  		page = dir_get_page(dir, n);
page              159 fs/sysv/dir.c  		if (!IS_ERR(page)) {
page              160 fs/sysv/dir.c  			kaddr = (char*)page_address(page);
page              171 fs/sysv/dir.c  		dir_put_page(page);
page              181 fs/sysv/dir.c  	*res_page = page;
page              190 fs/sysv/dir.c  	struct page *page = NULL;
page              200 fs/sysv/dir.c  		page = dir_get_page(dir, n);
page              201 fs/sysv/dir.c  		err = PTR_ERR(page);
page              202 fs/sysv/dir.c  		if (IS_ERR(page))
page              204 fs/sysv/dir.c  		kaddr = (char*)page_address(page);
page              215 fs/sysv/dir.c  		dir_put_page(page);
page              221 fs/sysv/dir.c  	pos = page_offset(page) +
page              222 fs/sysv/dir.c  			(char*)de - (char*)page_address(page);
page              223 fs/sysv/dir.c  	lock_page(page);
page              224 fs/sysv/dir.c  	err = __sysv_write_begin(NULL, page->mapping, pos, SYSV_DIRSIZE,
page              225 fs/sysv/dir.c  				AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
page              231 fs/sysv/dir.c  	err = dir_commit_chunk(page, pos, SYSV_DIRSIZE);
page              235 fs/sysv/dir.c  	dir_put_page(page);
page              239 fs/sysv/dir.c  	unlock_page(page);
page              245 fs/sysv/dir.c  	struct address_space *mapping = page->mapping;
page              247 fs/sysv/dir.c  	char *kaddr = (char*)page_address(page);
page              248 fs/sysv/dir.c  	loff_t pos = page_offset(page) + (char *)de - kaddr;
page              251 fs/sysv/dir.c  	lock_page(page);
page              253 fs/sysv/dir.c  				AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
page              256 fs/sysv/dir.c  	err = dir_commit_chunk(page, pos, SYSV_DIRSIZE);
page              257 fs/sysv/dir.c  	dir_put_page(page);
page              266 fs/sysv/dir.c  	struct page *page = grab_cache_page(mapping, 0);
page              271 fs/sysv/dir.c  	if (!page)
page              274 fs/sysv/dir.c  				AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
page              276 fs/sysv/dir.c  		unlock_page(page);
page              279 fs/sysv/dir.c  	kmap(page);
page              281 fs/sysv/dir.c  	base = (char*)page_address(page);
page              291 fs/sysv/dir.c  	kunmap(page);
page              292 fs/sysv/dir.c  	err = dir_commit_chunk(page, 0, 2 * SYSV_DIRSIZE);
page              294 fs/sysv/dir.c  	page_cache_release(page);
page              304 fs/sysv/dir.c  	struct page *page = NULL;
page              310 fs/sysv/dir.c  		page = dir_get_page(inode, i);
page              312 fs/sysv/dir.c  		if (IS_ERR(page))
page              315 fs/sysv/dir.c  		kaddr = (char *)page_address(page);
page              334 fs/sysv/dir.c  		dir_put_page(page);
page              339 fs/sysv/dir.c  	dir_put_page(page);
page              347 fs/sysv/dir.c  	struct address_space *mapping = page->mapping;
page              349 fs/sysv/dir.c  	loff_t pos = page_offset(page) +
page              350 fs/sysv/dir.c  			(char *)de-(char*)page_address(page);
page              353 fs/sysv/dir.c  	lock_page(page);
page              355 fs/sysv/dir.c  				AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
page              358 fs/sysv/dir.c  	err = dir_commit_chunk(page, pos, SYSV_DIRSIZE);
page              359 fs/sysv/dir.c  	dir_put_page(page);
page              366 fs/sysv/dir.c  	struct page *page = dir_get_page(dir, 0);
page              369 fs/sysv/dir.c  	if (!IS_ERR(page)) {
page              370 fs/sysv/dir.c  		de = (struct sysv_dir_entry*) page_address(page) + 1;
page              371 fs/sysv/dir.c  		*p = page;
page              378 fs/sysv/dir.c  	struct page *page;
page              379 fs/sysv/dir.c  	struct sysv_dir_entry *de = sysv_find_entry (dentry, &page);
page              384 fs/sysv/dir.c  		dir_put_page(page);
page              454 fs/sysv/itree.c 	return block_write_full_page(page,get_block,wbc);
page              459 fs/sysv/itree.c 	return block_read_full_page(page,get_block);
page              176 fs/sysv/namei.c 	struct page * page;
page              180 fs/sysv/namei.c 	de = sysv_find_entry(dentry, &page);
page              184 fs/sysv/namei.c 	err = sysv_delete_entry (de, page);
page              219 fs/sysv/namei.c 	struct page * dir_page = NULL;
page              221 fs/sysv/namei.c 	struct page * old_page;
page              237 fs/sysv/namei.c 		struct page * new_page;
page              111 fs/ubifs/file.c 	struct inode *inode = page->mapping->host;
page              115 fs/ubifs/file.c 		inode->i_ino, page->index, i_size, page->flags);
page              116 fs/ubifs/file.c 	ubifs_assert(!PageChecked(page));
page              117 fs/ubifs/file.c 	ubifs_assert(!PagePrivate(page));
page              119 fs/ubifs/file.c 	addr = kmap(page);
page              121 fs/ubifs/file.c 	block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
page              125 fs/ubifs/file.c 		SetPageChecked(page);
page              160 fs/ubifs/file.c 			SetPageChecked(page);
page              165 fs/ubifs/file.c 			  page->index, inode->i_ino, err);
page              172 fs/ubifs/file.c 	SetPageUptodate(page);
page              173 fs/ubifs/file.c 	ClearPageError(page);
page              174 fs/ubifs/file.c 	flush_dcache_page(page);
page              175 fs/ubifs/file.c 	kunmap(page);
page              180 fs/ubifs/file.c 	ClearPageUptodate(page);
page              181 fs/ubifs/file.c 	SetPageError(page);
page              182 fs/ubifs/file.c 	flush_dcache_page(page);
page              183 fs/ubifs/file.c 	kunmap(page);
page              223 fs/ubifs/file.c 	struct page *page;
page              244 fs/ubifs/file.c 	page = __grab_cache_page(mapping, index);
page              245 fs/ubifs/file.c 	if (unlikely(!page)) {
page              250 fs/ubifs/file.c 	if (!PageUptodate(page)) {
page              252 fs/ubifs/file.c 			SetPageChecked(page);
page              254 fs/ubifs/file.c 			err = do_readpage(page);
page              256 fs/ubifs/file.c 				unlock_page(page);
page              257 fs/ubifs/file.c 				page_cache_release(page);
page              262 fs/ubifs/file.c 		SetPageUptodate(page);
page              263 fs/ubifs/file.c 		ClearPageError(page);
page              266 fs/ubifs/file.c 	if (PagePrivate(page))
page              278 fs/ubifs/file.c 	else if (!PageChecked(page))
page              304 fs/ubifs/file.c 	*pagep = page;
page              326 fs/ubifs/file.c 	if (PagePrivate(page)) {
page              353 fs/ubifs/file.c 		if (PageChecked(page))
page              426 fs/ubifs/file.c 	struct page *page;
page              435 fs/ubifs/file.c 	page = __grab_cache_page(mapping, index);
page              436 fs/ubifs/file.c 	if (unlikely(!page))
page              439 fs/ubifs/file.c 	if (!PageUptodate(page)) {
page              449 fs/ubifs/file.c 			SetPageChecked(page);
page              451 fs/ubifs/file.c 			err = do_readpage(page);
page              453 fs/ubifs/file.c 				unlock_page(page);
page              454 fs/ubifs/file.c 				page_cache_release(page);
page              459 fs/ubifs/file.c 		SetPageUptodate(page);
page              460 fs/ubifs/file.c 		ClearPageError(page);
page              463 fs/ubifs/file.c 	err = allocate_budget(c, page, ui, appending);
page              477 fs/ubifs/file.c 		unlock_page(page);
page              478 fs/ubifs/file.c 		page_cache_release(page);
page              489 fs/ubifs/file.c 	*pagep = page;
page              512 fs/ubifs/file.c 	if (!PagePrivate(page)) {
page              513 fs/ubifs/file.c 		if (PageChecked(page))
page              531 fs/ubifs/file.c 		inode->i_ino, pos, page->index, len, copied, inode->i_size);
page              545 fs/ubifs/file.c 		cancel_budget(c, page, ui, appending);
page              551 fs/ubifs/file.c 		copied = do_readpage(page);
page              555 fs/ubifs/file.c 	if (!PagePrivate(page)) {
page              556 fs/ubifs/file.c 		SetPagePrivate(page);
page              558 fs/ubifs/file.c 		__set_page_dirty_nobuffers(page);
page              575 fs/ubifs/file.c 	unlock_page(page);
page              576 fs/ubifs/file.c 	page_cache_release(page);
page              582 fs/ubifs/file.c 	do_readpage(page);
page              583 fs/ubifs/file.c 	unlock_page(page);
page              593 fs/ubifs/file.c 	struct inode *inode = page->mapping->host;
page              598 fs/ubifs/file.c 	ubifs_assert(page->index <= ui->synced_i_size << PAGE_CACHE_SIZE);
page              603 fs/ubifs/file.c 	set_page_writeback(page);
page              605 fs/ubifs/file.c 	addr = kmap(page);
page              606 fs/ubifs/file.c 	block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
page              621 fs/ubifs/file.c 		SetPageError(page);
page              623 fs/ubifs/file.c 			  page->index, inode->i_ino, err);
page              627 fs/ubifs/file.c 	ubifs_assert(PagePrivate(page));
page              628 fs/ubifs/file.c 	if (PageChecked(page))
page              634 fs/ubifs/file.c 	ClearPagePrivate(page);
page              635 fs/ubifs/file.c 	ClearPageChecked(page);
page              637 fs/ubifs/file.c 	kunmap(page);
page              638 fs/ubifs/file.c 	unlock_page(page);
page              639 fs/ubifs/file.c 	end_page_writeback(page);
page              687 fs/ubifs/file.c 	struct inode *inode = page->mapping->host;
page              695 fs/ubifs/file.c 		inode->i_ino, page->index, page->flags);
page              696 fs/ubifs/file.c 	ubifs_assert(PagePrivate(page));
page              699 fs/ubifs/file.c 	if (page->index > end_index || (page->index == end_index && !len)) {
page              709 fs/ubifs/file.c 	if (page->index < end_index) {
page              710 fs/ubifs/file.c 		if (page->index >= synced_i_size >> PAGE_CACHE_SHIFT) {
page              723 fs/ubifs/file.c 		return do_writepage(page, PAGE_CACHE_SIZE);
page              733 fs/ubifs/file.c 	kaddr = kmap_atomic(page, KM_USER0);
page              735 fs/ubifs/file.c 	flush_dcache_page(page);
page              744 fs/ubifs/file.c 	return do_writepage(page, len);
page              747 fs/ubifs/file.c 	unlock_page(page);
page              830 fs/ubifs/file.c 		struct page *page;
page              832 fs/ubifs/file.c 		page = find_lock_page(inode->i_mapping, index);
page              833 fs/ubifs/file.c 		if (page) {
page              834 fs/ubifs/file.c 			if (PageDirty(page)) {
page              843 fs/ubifs/file.c 				ubifs_assert(PagePrivate(page));
page              845 fs/ubifs/file.c 				clear_page_dirty_for_io(page);
page              849 fs/ubifs/file.c 				err = do_writepage(page, offset);
page              850 fs/ubifs/file.c 				page_cache_release(page);
page              863 fs/ubifs/file.c 				unlock_page(page);
page              864 fs/ubifs/file.c 				page_cache_release(page);
page              977 fs/ubifs/file.c 	struct inode *inode = page->mapping->host;
page              980 fs/ubifs/file.c 	ubifs_assert(PagePrivate(page));
page              985 fs/ubifs/file.c 	if (PageChecked(page))
page              991 fs/ubifs/file.c 	ClearPagePrivate(page);
page              992 fs/ubifs/file.c 	ClearPageChecked(page);
page             1114 fs/ubifs/file.c 	ret = __set_page_dirty_nobuffers(page);
page             1129 fs/ubifs/file.c 	if (PageWriteback(page))
page             1131 fs/ubifs/file.c 	ubifs_assert(PagePrivate(page));
page             1133 fs/ubifs/file.c 	ClearPagePrivate(page);
page             1134 fs/ubifs/file.c 	ClearPageChecked(page);
page             1150 fs/ubifs/file.c 	dbg_gen("ino %lu, pg %lu, i_size %lld",	inode->i_ino, page->index,
page             1191 fs/ubifs/file.c 	lock_page(page);
page             1192 fs/ubifs/file.c 	if (unlikely(page->mapping != inode->i_mapping ||
page             1193 fs/ubifs/file.c 		     page_offset(page) > i_size_read(inode))) {
page             1199 fs/ubifs/file.c 	if (PagePrivate(page))
page             1202 fs/ubifs/file.c 		if (!PageChecked(page))
page             1204 fs/ubifs/file.c 		SetPagePrivate(page);
page             1206 fs/ubifs/file.c 		__set_page_dirty_nobuffers(page);
page             1222 fs/ubifs/file.c 	unlock_page(page);
page             1226 fs/ubifs/file.c 	unlock_page(page);
page               45 fs/udf/file.c  	struct inode *inode = page->mapping->host;
page               49 fs/udf/file.c  	BUG_ON(!PageLocked(page));
page               51 fs/udf/file.c  	kaddr = kmap(page);
page               54 fs/udf/file.c  	flush_dcache_page(page);
page               55 fs/udf/file.c  	SetPageUptodate(page);
page               56 fs/udf/file.c  	kunmap(page);
page               57 fs/udf/file.c  	unlock_page(page);
page               65 fs/udf/file.c  	struct inode *inode = page->mapping->host;
page               69 fs/udf/file.c  	BUG_ON(!PageLocked(page));
page               71 fs/udf/file.c  	kaddr = kmap(page);
page               74 fs/udf/file.c  	SetPageUptodate(page);
page               75 fs/udf/file.c  	kunmap(page);
page               76 fs/udf/file.c  	unlock_page(page);
page               91 fs/udf/file.c  	kaddr = kmap_atomic(page, KM_USER0);
page               96 fs/udf/file.c  	return simple_write_end(file, mapping, pos, len, copied, page, fsdata);
page              117 fs/udf/inode.c 	return block_write_full_page(page, udf_get_block, wbc);
page              122 fs/udf/inode.c 	return block_read_full_page(page, udf_get_block);
page              150 fs/udf/inode.c 	struct page *page;
page              170 fs/udf/inode.c 	page = grab_cache_page(inode->i_mapping, 0);
page              171 fs/udf/inode.c 	BUG_ON(!PageLocked(page));
page              173 fs/udf/inode.c 	if (!PageUptodate(page)) {
page              174 fs/udf/inode.c 		kaddr = kmap(page);
page              179 fs/udf/inode.c 		flush_dcache_page(page);
page              180 fs/udf/inode.c 		SetPageUptodate(page);
page              181 fs/udf/inode.c 		kunmap(page);
page              191 fs/udf/inode.c 	inode->i_data.a_ops->writepage(page, &udf_wbc);
page              192 fs/udf/inode.c 	page_cache_release(page);
page               76 fs/udf/symlink.c 	struct inode *inode = page->mapping->host;
page               80 fs/udf/symlink.c 	char *p = kmap(page);
page              100 fs/udf/symlink.c 	SetPageUptodate(page);
page              101 fs/udf/symlink.c 	kunmap(page);
page              102 fs/udf/symlink.c 	unlock_page(page);
page              107 fs/udf/symlink.c 	SetPageError(page);
page              108 fs/udf/symlink.c 	kunmap(page);
page              109 fs/udf/symlink.c 	unlock_page(page);
page              259 fs/ufs/balloc.c 	struct page *page;
page              276 fs/ufs/balloc.c 			page = ufs_get_locked_page(mapping, index);
page              277 fs/ufs/balloc.c 			if (!page)/* it was truncated */
page              279 fs/ufs/balloc.c 			if (IS_ERR(page)) {/* or EIO */
page              286 fs/ufs/balloc.c 			page = locked_page;
page              288 fs/ufs/balloc.c 		head = page_buffers(page);
page              330 fs/ufs/balloc.c 			ufs_put_locked_page(page);
page               45 fs/ufs/dir.c   	struct address_space *mapping = page->mapping;
page               50 fs/ufs/dir.c   	block_write_end(NULL, mapping, pos, len, len, page, NULL);
page               56 fs/ufs/dir.c   		err = write_one_page(page, 1);
page               58 fs/ufs/dir.c   		unlock_page(page);
page               64 fs/ufs/dir.c   	kunmap(page);
page               65 fs/ufs/dir.c   	page_cache_release(page);
page               77 fs/ufs/dir.c   	struct page *page;
page               79 fs/ufs/dir.c   	de = ufs_find_entry(dir, dentry, &page);
page               82 fs/ufs/dir.c   		ufs_put_page(page);
page               92 fs/ufs/dir.c   	loff_t pos = page_offset(page) +
page               93 fs/ufs/dir.c   			(char *) de - (char *) page_address(page);
page               97 fs/ufs/dir.c   	lock_page(page);
page               98 fs/ufs/dir.c   	err = __ufs_write_begin(NULL, page->mapping, pos, len,
page               99 fs/ufs/dir.c   				AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
page              105 fs/ufs/dir.c   	err = ufs_commit_chunk(page, pos, len);
page              106 fs/ufs/dir.c   	ufs_put_page(page);
page              114 fs/ufs/dir.c   	struct inode *dir = page->mapping->host;
page              116 fs/ufs/dir.c   	char *kaddr = page_address(page);
page              123 fs/ufs/dir.c   	if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
page              149 fs/ufs/dir.c   	SetPageChecked(page);
page              177 fs/ufs/dir.c   		   dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
page              185 fs/ufs/dir.c   		   dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs);
page              187 fs/ufs/dir.c   	SetPageChecked(page);
page              188 fs/ufs/dir.c   	SetPageError(page);
page              191 fs/ufs/dir.c   static struct page *ufs_get_page(struct inode *dir, unsigned long n)
page              194 fs/ufs/dir.c   	struct page *page = read_mapping_page(mapping, n, NULL);
page              195 fs/ufs/dir.c   	if (!IS_ERR(page)) {
page              196 fs/ufs/dir.c   		kmap(page);
page              197 fs/ufs/dir.c   		if (!PageChecked(page))
page              198 fs/ufs/dir.c   			ufs_check_page(page);
page              199 fs/ufs/dir.c   		if (PageError(page))
page              202 fs/ufs/dir.c   	return page;
page              205 fs/ufs/dir.c   	ufs_put_page(page);
page              233 fs/ufs/dir.c   	struct page *page = ufs_get_page(dir, 0);
page              236 fs/ufs/dir.c   	if (!IS_ERR(page)) {
page              238 fs/ufs/dir.c   				    (struct ufs_dir_entry *)page_address(page));
page              239 fs/ufs/dir.c   		*p = page;
page              261 fs/ufs/dir.c   	struct page *page = NULL;
page              280 fs/ufs/dir.c   		page = ufs_get_page(dir, n);
page              281 fs/ufs/dir.c   		if (!IS_ERR(page)) {
page              282 fs/ufs/dir.c   			kaddr = page_address(page);
page              289 fs/ufs/dir.c   					ufs_put_page(page);
page              296 fs/ufs/dir.c   			ufs_put_page(page);
page              305 fs/ufs/dir.c   	*res_page = page;
page              322 fs/ufs/dir.c   	struct page *page = NULL;
page              340 fs/ufs/dir.c   		page = ufs_get_page(dir, n);
page              341 fs/ufs/dir.c   		err = PTR_ERR(page);
page              342 fs/ufs/dir.c   		if (IS_ERR(page))
page              344 fs/ufs/dir.c   		lock_page(page);
page              345 fs/ufs/dir.c   		kaddr = page_address(page);
page              375 fs/ufs/dir.c   		unlock_page(page);
page              376 fs/ufs/dir.c   		ufs_put_page(page);
page              382 fs/ufs/dir.c   	pos = page_offset(page) +
page              383 fs/ufs/dir.c   			(char*)de - (char*)page_address(page);
page              384 fs/ufs/dir.c   	err = __ufs_write_begin(NULL, page->mapping, pos, rec_len,
page              385 fs/ufs/dir.c   				AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
page              402 fs/ufs/dir.c   	err = ufs_commit_chunk(page, pos, rec_len);
page              408 fs/ufs/dir.c   	ufs_put_page(page);
page              412 fs/ufs/dir.c   	unlock_page(page);
page              456 fs/ufs/dir.c   		struct page *page = ufs_get_page(inode, n);
page              458 fs/ufs/dir.c   		if (IS_ERR(page)) {
page              465 fs/ufs/dir.c   		kaddr = page_address(page);
page              480 fs/ufs/dir.c   				ufs_put_page(page);
page              501 fs/ufs/dir.c   					ufs_put_page(page);
page              507 fs/ufs/dir.c   		ufs_put_page(page);
page              521 fs/ufs/dir.c   	struct address_space *mapping = page->mapping;
page              522 fs/ufs/dir.c   	char *kaddr = page_address(page);
page              548 fs/ufs/dir.c   		from = (char*)pde - (char*)page_address(page);
page              550 fs/ufs/dir.c   	pos = page_offset(page) + from;
page              551 fs/ufs/dir.c   	lock_page(page);
page              553 fs/ufs/dir.c   				AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
page              558 fs/ufs/dir.c   	err = ufs_commit_chunk(page, pos, to - from);
page              562 fs/ufs/dir.c   	ufs_put_page(page);
page              571 fs/ufs/dir.c   	struct page *page = grab_cache_page(mapping, 0);
page              577 fs/ufs/dir.c   	if (!page)
page              581 fs/ufs/dir.c   				AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
page              583 fs/ufs/dir.c   		unlock_page(page);
page              587 fs/ufs/dir.c   	kmap(page);
page              588 fs/ufs/dir.c   	base = (char*)page_address(page);
page              605 fs/ufs/dir.c   	kunmap(page);
page              607 fs/ufs/dir.c   	err = ufs_commit_chunk(page, 0, chunk_size);
page              609 fs/ufs/dir.c   	page_cache_release(page);
page              619 fs/ufs/dir.c   	struct page *page = NULL;
page              625 fs/ufs/dir.c   		page = ufs_get_page(inode, i);
page              627 fs/ufs/dir.c   		if (IS_ERR(page))
page              630 fs/ufs/dir.c   		kaddr = page_address(page);
page              657 fs/ufs/dir.c   		ufs_put_page(page);
page              662 fs/ufs/dir.c   	ufs_put_page(page);
page              560 fs/ufs/inode.c 	return block_write_full_page(page,ufs_getfrag_block,wbc);
page              565 fs/ufs/inode.c 	return block_read_full_page(page,ufs_getfrag_block);
page              237 fs/ufs/namei.c 	struct page *page;
page              240 fs/ufs/namei.c 	de = ufs_find_entry(dir, dentry, &page);
page              244 fs/ufs/namei.c 	err = ufs_delete_entry(dir, de, page);
page              278 fs/ufs/namei.c 	struct page *dir_page = NULL;
page              280 fs/ufs/namei.c 	struct page *old_page;
page              296 fs/ufs/namei.c 		struct page *new_page;
page              388 fs/ufs/truncate.c 	struct page *lastpage;
page              248 fs/ufs/util.c  struct page *ufs_get_locked_page(struct address_space *mapping,
page              251 fs/ufs/util.c  	struct page *page;
page              253 fs/ufs/util.c  	page = find_lock_page(mapping, index);
page              254 fs/ufs/util.c  	if (!page) {
page              255 fs/ufs/util.c  		page = read_mapping_page(mapping, index, NULL);
page              257 fs/ufs/util.c  		if (IS_ERR(page)) {
page              264 fs/ufs/util.c  		lock_page(page);
page              266 fs/ufs/util.c  		if (unlikely(page->mapping == NULL)) {
page              268 fs/ufs/util.c  			unlock_page(page);
page              269 fs/ufs/util.c  			page_cache_release(page);
page              270 fs/ufs/util.c  			page = NULL;
page              274 fs/ufs/util.c  		if (!PageUptodate(page) || PageError(page)) {
page              275 fs/ufs/util.c  			unlock_page(page);
page              276 fs/ufs/util.c  			page_cache_release(page);
page              282 fs/ufs/util.c  			page = ERR_PTR(-EIO);
page              286 fs/ufs/util.c  	return page;
page              284 fs/ufs/util.h  extern struct page *ufs_get_locked_page(struct address_space *mapping,
page              288 fs/ufs/util.h         unlock_page(page);
page              289 fs/ufs/util.h         page_cache_release(page);
page               56 fs/xfs/linux-2.6/xfs_aops.c 	bh = head = page_buffers(page);
page               77 fs/xfs/linux-2.6/xfs_aops.c 	loff_t		offset = page_offset(page);
page               80 fs/xfs/linux-2.6/xfs_aops.c 	if (page_has_buffers(page))
page               81 fs/xfs/linux-2.6/xfs_aops.c 		xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
page               91 fs/xfs/linux-2.6/xfs_aops.c 		(void *)page,
page              414 fs/xfs/linux-2.6/xfs_aops.c 	ASSERT(PageLocked(page));
page              415 fs/xfs/linux-2.6/xfs_aops.c 	ASSERT(!PageWriteback(page));
page              417 fs/xfs/linux-2.6/xfs_aops.c 		clear_page_dirty_for_io(page);
page              418 fs/xfs/linux-2.6/xfs_aops.c 	set_page_writeback(page);
page              419 fs/xfs/linux-2.6/xfs_aops.c 	unlock_page(page);
page              422 fs/xfs/linux-2.6/xfs_aops.c 		end_page_writeback(page);
page              605 fs/xfs/linux-2.6/xfs_aops.c 	if (PageWriteback(page))
page              608 fs/xfs/linux-2.6/xfs_aops.c 	if (page->mapping && PageDirty(page)) {
page              609 fs/xfs/linux-2.6/xfs_aops.c 		if (page_has_buffers(page)) {
page              612 fs/xfs/linux-2.6/xfs_aops.c 			bh = head = page_buffers(page);
page              664 fs/xfs/linux-2.6/xfs_aops.c 			struct page *page = pvec.pages[i];
page              677 fs/xfs/linux-2.6/xfs_aops.c 			if (page->index == tindex && trylock_page(page)) {
page              678 fs/xfs/linux-2.6/xfs_aops.c 				pg_len = xfs_probe_page(page, pg_offset, mapped);
page              679 fs/xfs/linux-2.6/xfs_aops.c 				unlock_page(page);
page              707 fs/xfs/linux-2.6/xfs_aops.c 	if (PageWriteback(page))
page              710 fs/xfs/linux-2.6/xfs_aops.c 	if (page->mapping && page_has_buffers(page)) {
page              714 fs/xfs/linux-2.6/xfs_aops.c 		bh = head = page_buffers(page);
page              757 fs/xfs/linux-2.6/xfs_aops.c  	xfs_off_t		offset = page_offset(page);
page              759 fs/xfs/linux-2.6/xfs_aops.c 	if (page->index != tindex)
page              761 fs/xfs/linux-2.6/xfs_aops.c 	if (!trylock_page(page))
page              763 fs/xfs/linux-2.6/xfs_aops.c 	if (PageWriteback(page))
page              765 fs/xfs/linux-2.6/xfs_aops.c 	if (page->mapping != inode->i_mapping)
page              767 fs/xfs/linux-2.6/xfs_aops.c 	if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
page              784 fs/xfs/linux-2.6/xfs_aops.c 			(xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
page              793 fs/xfs/linux-2.6/xfs_aops.c 	bh = head = page_buffers(page);
page              799 fs/xfs/linux-2.6/xfs_aops.c 		if (!(PageUptodate(page) || buffer_uptodate(bh))) {
page              844 fs/xfs/linux-2.6/xfs_aops.c 		SetPageUptodate(page);
page              859 fs/xfs/linux-2.6/xfs_aops.c 		xfs_start_page_writeback(page, !page_dirty, count);
page              864 fs/xfs/linux-2.6/xfs_aops.c 	unlock_page(page);
page              956 fs/xfs/linux-2.6/xfs_aops.c 	if (page->index >= end_index) {
page              957 fs/xfs/linux-2.6/xfs_aops.c 		if ((page->index >= end_index + 1) ||
page              960 fs/xfs/linux-2.6/xfs_aops.c 				unlock_page(page);
page              979 fs/xfs/linux-2.6/xfs_aops.c 			(xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
page              986 fs/xfs/linux-2.6/xfs_aops.c 	bh = head = page_buffers(page);
page              987 fs/xfs/linux-2.6/xfs_aops.c 	offset = page_offset(page);
page              998 fs/xfs/linux-2.6/xfs_aops.c 		if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
page             1021 fs/xfs/linux-2.6/xfs_aops.c 		    ((buffer_uptodate(bh) || PageUptodate(page)) &&
page             1054 fs/xfs/linux-2.6/xfs_aops.c 							page, bh, head, 0);
page             1088 fs/xfs/linux-2.6/xfs_aops.c 				size = xfs_probe_cluster(inode, page, bh,
page             1117 fs/xfs/linux-2.6/xfs_aops.c 		} else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
page             1128 fs/xfs/linux-2.6/xfs_aops.c 		SetPageUptodate(page);
page             1131 fs/xfs/linux-2.6/xfs_aops.c 		xfs_start_page_writeback(page, 1, count);
page             1137 fs/xfs/linux-2.6/xfs_aops.c 		xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
page             1157 fs/xfs/linux-2.6/xfs_aops.c 			block_invalidatepage(page, 0);
page             1158 fs/xfs/linux-2.6/xfs_aops.c 		ClearPageUptodate(page);
page             1191 fs/xfs/linux-2.6/xfs_aops.c 	struct inode		*inode = page->mapping->host;
page             1193 fs/xfs/linux-2.6/xfs_aops.c 	xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
page             1203 fs/xfs/linux-2.6/xfs_aops.c 	if (!page_has_buffers(page)) {
page             1207 fs/xfs/linux-2.6/xfs_aops.c 		xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
page             1208 fs/xfs/linux-2.6/xfs_aops.c 		if (!PageUptodate(page))
page             1226 fs/xfs/linux-2.6/xfs_aops.c 	if (!page_has_buffers(page))
page             1227 fs/xfs/linux-2.6/xfs_aops.c 		create_empty_buffers(page, 1 << inode->i_blkbits, 0);
page             1233 fs/xfs/linux-2.6/xfs_aops.c 	error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
page             1242 fs/xfs/linux-2.6/xfs_aops.c 	redirty_page_for_writepage(wbc, page);
page             1243 fs/xfs/linux-2.6/xfs_aops.c 	unlock_page(page);
page             1246 fs/xfs/linux-2.6/xfs_aops.c 	unlock_page(page);
page             1283 fs/xfs/linux-2.6/xfs_aops.c 	struct inode		*inode = page->mapping->host;
page             1290 fs/xfs/linux-2.6/xfs_aops.c 	xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0);
page             1292 fs/xfs/linux-2.6/xfs_aops.c 	if (!page_has_buffers(page))
page             1295 fs/xfs/linux-2.6/xfs_aops.c 	xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
page             1314 fs/xfs/linux-2.6/xfs_aops.c 	dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
page             1320 fs/xfs/linux-2.6/xfs_aops.c 	return try_to_free_buffers(page);
page             1552 fs/xfs/linux-2.6/xfs_aops.c 	return mpage_readpage(page, xfs_get_blocks);
page             1571 fs/xfs/linux-2.6/xfs_aops.c 			page->mapping->host, page, offset);
page             1572 fs/xfs/linux-2.6/xfs_aops.c 	block_invalidatepage(page, offset);
page              151 fs/xfs/linux-2.6/xfs_buf.c 	set_page_private(page,
page              152 fs/xfs/linux-2.6/xfs_buf.c 		page_private(page) | page_region_mask(offset, length));
page              153 fs/xfs/linux-2.6/xfs_buf.c 	if (page_private(page) == ~0UL)
page              154 fs/xfs/linux-2.6/xfs_buf.c 		SetPageUptodate(page);
page              165 fs/xfs/linux-2.6/xfs_buf.c 	return (mask && (page_private(page) & mask) == mask);
page              295 fs/xfs/linux-2.6/xfs_buf.c 			bp->b_pages = kmem_alloc(sizeof(struct page *) *
page              300 fs/xfs/linux-2.6/xfs_buf.c 		memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
page              339 fs/xfs/linux-2.6/xfs_buf.c 			struct page	*page = bp->b_pages[i];
page              342 fs/xfs/linux-2.6/xfs_buf.c 				ASSERT(!PagePrivate(page));
page              343 fs/xfs/linux-2.6/xfs_buf.c 			page_cache_release(page);
page              381 fs/xfs/linux-2.6/xfs_buf.c 		struct page	*page;
page              385 fs/xfs/linux-2.6/xfs_buf.c 		page = find_or_create_page(mapping, first + i, gfp_mask);
page              386 fs/xfs/linux-2.6/xfs_buf.c 		if (unlikely(page == NULL)) {
page              417 fs/xfs/linux-2.6/xfs_buf.c 		ASSERT(!PagePrivate(page));
page              418 fs/xfs/linux-2.6/xfs_buf.c 		if (!PageUptodate(page)) {
page              423 fs/xfs/linux-2.6/xfs_buf.c 			} else if (!PagePrivate(page)) {
page              424 fs/xfs/linux-2.6/xfs_buf.c 				if (test_page_region(page, offset, nbytes))
page              429 fs/xfs/linux-2.6/xfs_buf.c 		bp->b_pages[i] = page;
page              707 fs/xfs/linux-2.6/xfs_buf.c static inline struct page *
page             1121 fs/xfs/linux-2.6/xfs_buf.c 		struct page	*page = bvec->bv_page;
page             1123 fs/xfs/linux-2.6/xfs_buf.c 		ASSERT(!PagePrivate(page));
page             1126 fs/xfs/linux-2.6/xfs_buf.c 				ClearPageUptodate(page);
page             1128 fs/xfs/linux-2.6/xfs_buf.c 			SetPageUptodate(page);
page             1129 fs/xfs/linux-2.6/xfs_buf.c 		} else if (!PagePrivate(page) &&
page             1131 fs/xfs/linux-2.6/xfs_buf.c 			set_page_region(page, bvec->bv_offset, bvec->bv_len);
page             1138 fs/xfs/linux-2.6/xfs_buf.c 			unlock_page(page);
page             1285 fs/xfs/linux-2.6/xfs_buf.c 	struct page		*page;
page             1291 fs/xfs/linux-2.6/xfs_buf.c 	page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
page             1292 fs/xfs/linux-2.6/xfs_buf.c 	return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
page             1307 fs/xfs/linux-2.6/xfs_buf.c 	struct page		*page;
page             1311 fs/xfs/linux-2.6/xfs_buf.c 		page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
page             1320 fs/xfs/linux-2.6/xfs_buf.c 			memset(page_address(page) + cpoff, 0, csize);
page             1323 fs/xfs/linux-2.6/xfs_buf.c 			memcpy(data, page_address(page) + cpoff, csize);
page             1326 fs/xfs/linux-2.6/xfs_buf.c 			memcpy(page_address(page) + cpoff, data, csize);
page              175 fs/xfs/linux-2.6/xfs_buf.h 	struct page		**b_pages;	/* array of page pointers */
page              176 fs/xfs/linux-2.6/xfs_buf.h 	struct page		*b_page_array[XB_PAGES]; /* inline pages */
page              432 fs/xfs/linux-2.6/xfs_file.c 	return block_page_mkwrite(vma, page, xfs_get_blocks);
page              136 fs/xfs/linux-2.6/xfs_lrw.c 	struct page		*page;
page              152 fs/xfs/linux-2.6/xfs_lrw.c 					&page, &fsdata);
page              156 fs/xfs/linux-2.6/xfs_lrw.c 		zero_user(page, offset, bytes);
page              159 fs/xfs/linux-2.6/xfs_lrw.c 					page, fsdata);
page               68 include/asm-cris/dma-mapping.h 	return page_to_phys(page) + offset;
page               12 include/asm-cris/page.h #define clear_page(page)        memset((void *)(page), 0, PAGE_SIZE)
page               15 include/asm-cris/page.h #define clear_user_page(page, vaddr, pg)    clear_page(page)
page               29 include/asm-cris/page.h typedef struct page *pgtable_t;
page               54 include/asm-cris/page.h #define VALID_PAGE(page)       (((page) - mem_map) < max_mapnr)
page               61 include/asm-cris/page.h #define page_to_phys(page)     __pa((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
page               33 include/asm-cris/pgalloc.h 	struct page *pte;
page              183 include/asm-cris/pgtable.h 	pte_val(pte) = __pa(page) | pgprot_val(pgprot);
page              187 include/asm-cris/pgtable.h #define mk_pte(page, pgprot) __mk_pte(page_address(page), (pgprot))
page              216 include/asm-cris/pgtable.h #define __page_address(page)    (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
page               55 include/asm-frv/cacheflush.h 	unsigned long addr = page_to_phys(page);
page               62 include/asm-frv/cacheflush.h 	flush_dcache_page(page);
page               88 include/asm-frv/cacheflush.h 	flush_icache_user_range(vma, page, page_to_phys(page), PAGE_SIZE);
page               98 include/asm-frv/cacheflush.h 	flush_icache_user_range((vma), (page), (vaddr), (len));	\
page               64 include/asm-frv/highmem.h extern struct page *kmap_atomic_to_page(void *ptr);
page              119 include/asm-frv/highmem.h 	paddr = page_to_phys(page);
page               28 include/asm-frv/page.h typedef struct page *pgtable_t;
page              423 include/asm-frv/pgtable.h #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
page               37 include/asm-frv/virtconvert.h #define __page_address(page)	(PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
page               38 include/asm-frv/virtconvert.h #define page_to_phys(page)	virt_to_phys((void *)__page_address(page))
page               78 include/asm-generic/dma-mapping.h 	return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction);
page               31 include/asm-generic/memory_model.h #define __page_to_pfn(page)	((unsigned long)((page) - mem_map) + \
page               42 include/asm-generic/memory_model.h ({	struct page *__pg = (pg);					\
page               52 include/asm-generic/memory_model.h #define __page_to_pfn(page)	((page) - vmemmap)
page               60 include/asm-generic/memory_model.h ({	struct page *__pg = (pg);				\
page               73 include/asm-generic/memory_model.h struct page;
page               75 include/asm-generic/memory_model.h extern struct page *pfn_to_page(unsigned long pfn);
page               49 include/asm-generic/pci-dma-compat.h 	return dma_map_page(hwdev == NULL ? NULL : &hwdev->dev, page, offset, size, (enum dma_data_direction)direction);
page               44 include/asm-generic/tlb.h 	struct page *		pages[FREE_PTE_NR];
page              105 include/asm-generic/tlb.h 		free_page_and_swap_cache(page);
page              108 include/asm-generic/tlb.h 	tlb->pages[tlb->nr++] = page;
page               64 include/asm-m32r/cacheflush.h 	flush_icache_user_range(vma, page, vaddr, len);		\
page               74 include/asm-m32r/io.h #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
page               14 include/asm-m32r/page.h #define clear_user_page(page, vaddr, pg)	clear_page(page)
page               31 include/asm-m32r/page.h typedef struct page *pgtable_t;
page               44 include/asm-m32r/pgalloc.h 	struct page *pte = alloc_page(GFP_KERNEL|__GFP_ZERO);
page              290 include/asm-m32r/pgtable.h #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), pgprot)
page               27 include/asm-m32r/tlbflush.h #define flush_tlb_page(vma, page)	local_flush_tlb_page(vma, page)
page               46 include/asm-m32r/tlbflush.h #define flush_tlb_page(vma, page)	smp_flush_tlb_page(vma, page)
page               71 include/asm-m32r/tlbflush.h 		: "r" (page), "i" (MMU_REG_BASE), "i" (MSVA_offset),
page              131 include/asm-m68k/cacheflush.h #define flush_dcache_page(page)		__flush_page_to_ram(page_address(page))
page              134 include/asm-m68k/cacheflush.h #define flush_icache_page(vma, page)	__flush_page_to_ram(page_address(page))
page              144 include/asm-m68k/cacheflush.h 	flush_cache_page(vma, vaddr, page_to_pfn(page));
page              146 include/asm-m68k/cacheflush.h 	flush_icache_user_range(vma, page, vaddr, len);
page              152 include/asm-m68k/cacheflush.h 	flush_cache_page(vma, vaddr, page_to_pfn(page));
page               32 include/asm-m68k/motorola_pgalloc.h 	struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
page               35 include/asm-m68k/motorola_pgalloc.h 	if(!page)
page               38 include/asm-m68k/motorola_pgalloc.h 	pte = kmap(page);
page               45 include/asm-m68k/motorola_pgalloc.h 	pgtable_page_ctor(page);
page               46 include/asm-m68k/motorola_pgalloc.h 	return page;
page               51 include/asm-m68k/motorola_pgalloc.h 	pgtable_page_dtor(page);
page               52 include/asm-m68k/motorola_pgalloc.h 	cache_page(kmap(page));
page               53 include/asm-m68k/motorola_pgalloc.h 	kunmap(page);
page               54 include/asm-m68k/motorola_pgalloc.h 	__free_page(page);
page               59 include/asm-m68k/motorola_pgalloc.h 	pgtable_page_dtor(page);
page               60 include/asm-m68k/motorola_pgalloc.h 	cache_page(kmap(page));
page               61 include/asm-m68k/motorola_pgalloc.h 	kunmap(page);
page               62 include/asm-m68k/motorola_pgalloc.h 	__free_page(page);
page              100 include/asm-m68k/motorola_pgalloc.h 	pmd_set(pmd, page_address(page));
page              100 include/asm-m68k/motorola_pgtable.h #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
page               54 include/asm-m68k/page.h 	unsigned long *sp = page;
page               69 include/asm-m68k/page.h 			     : "a" (page), "0" (sp),
page               74 include/asm-m68k/page.h #define clear_page(page)	memset((page), 0, PAGE_SIZE)
page               80 include/asm-m68k/page.h 		flush_dcache_page(page);	\
page               84 include/asm-m68k/page.h 		flush_dcache_page(page);	\
page               94 include/asm-m68k/page.h typedef struct page *pgtable_t;
page              202 include/asm-m68k/page.h 	pfn_to_virt(page_to_pfn(page));					\
page              212 include/asm-m68k/page.h 	struct page *__p = (_page);					\
page               31 include/asm-m68k/sun3_pgalloc.h 	pgtable_page_dtor(page);
page               32 include/asm-m68k/sun3_pgalloc.h         __free_page(page);
page               44 include/asm-m68k/sun3_pgalloc.h 	unsigned long page = __get_free_page(GFP_KERNEL|__GFP_REPEAT);
page               46 include/asm-m68k/sun3_pgalloc.h 	if (!page)
page               49 include/asm-m68k/sun3_pgalloc.h 	memset((void *)page, 0, PAGE_SIZE);
page               50 include/asm-m68k/sun3_pgalloc.h 	return (pte_t *) (page);
page               56 include/asm-m68k/sun3_pgalloc.h         struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
page               58 include/asm-m68k/sun3_pgalloc.h 	if (page == NULL)
page               61 include/asm-m68k/sun3_pgalloc.h 	clear_highpage(page);
page               62 include/asm-m68k/sun3_pgalloc.h 	pgtable_page_ctor(page);
page               63 include/asm-m68k/sun3_pgalloc.h 	return page;
page               74 include/asm-m68k/sun3_pgalloc.h 	pmd_val(*pmd) = __pa((unsigned long)page_address(page));
page              104 include/asm-m68k/sun3_pgtable.h #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
page               31 include/asm-m68k/virtconvert.h 	__pa(PAGE_OFFSET + (((page) - pg_data_map[0].node_mem_map) << PAGE_SHIFT))
page               34 include/asm-m68k/virtconvert.h 	struct page *__page = _page;					\
page               54 include/asm-mn10300/cacheflush.h 		flush_icache_page(vma, page);	\
page              116 include/asm-mn10300/dma-mapping.h 	return page_to_bus(page) + offset;
page               52 include/asm-mn10300/highmem.h 	if (page < highmem_start_page)
page               53 include/asm-mn10300/highmem.h 		return page_address(page);
page               54 include/asm-mn10300/highmem.h 	return kmap_high(page);
page               61 include/asm-mn10300/highmem.h 	if (page < highmem_start_page)
page               63 include/asm-mn10300/highmem.h 	kunmap_high(page);
page               77 include/asm-mn10300/highmem.h 	if (page < highmem_start_page)
page               78 include/asm-mn10300/highmem.h 		return page_address(page);
page               86 include/asm-mn10300/highmem.h 	set_pte(kmap_pte - idx, mk_pte(page, kmap_prot));
page               29 include/asm-mn10300/page.h #define clear_page(page)	memset((void *)(page), 0, PAGE_SIZE)
page               41 include/asm-mn10300/page.h typedef struct page *pgtable_t;
page              109 include/asm-mn10300/page.h #define page_to_pfn(page)	((unsigned long)((page) - mem_map) + __pfn_disp)
page              119 include/asm-mn10300/page.h #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
page               20 include/asm-mn10300/pgalloc.h struct page;
page               41 include/asm-mn10300/pgalloc.h extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
page               44 include/asm-mn10300/pgtable.h extern struct page *pgd_list;
page              384 include/asm-mn10300/pgtable.h #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
page              395 include/asm-mn10300/pgtable.h #define page_pte(page)	page_pte_prot((page), __pgprot(0))
page               53 include/asm-parisc/cacheflush.h 	flush_kernel_dcache_page(page);			\
page               54 include/asm-parisc/cacheflush.h 	flush_kernel_icache_page(page_address(page)); 	\
page               64 include/asm-parisc/cacheflush.h 	flush_cache_page(vma, vaddr, page_to_pfn(page)); \
page               71 include/asm-parisc/cacheflush.h 	flush_cache_page(vma, vaddr, page_to_pfn(page)); \
page               83 include/asm-parisc/cacheflush.h 	if (PageAnon(page))
page               91 include/asm-parisc/cacheflush.h 	flush_kernel_dcache_page_addr(page_address(page));
page              107 include/asm-parisc/cacheflush.h 	return page_address(page);
page              110 include/asm-parisc/cacheflush.h #define kunmap(page)			kunmap_parisc(page_address(page))
page              112 include/asm-parisc/cacheflush.h #define kmap_atomic(page, idx)		page_address(page)
page              109 include/asm-parisc/dma-mapping.h 	return dma_map_single(dev, (page_address(page) + (offset)), size, direction);
page               24 include/asm-parisc/page.h #define clear_page(page)	memset((void *)(page), 0, PAGE_SIZE)
page               27 include/asm-parisc/page.h struct page;
page               94 include/asm-parisc/page.h typedef struct page *pgtable_t;
page              164 include/asm-parisc/page.h #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
page              123 include/asm-parisc/pgalloc.h 	struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
page              124 include/asm-parisc/pgalloc.h 	if (page)
page              125 include/asm-parisc/pgalloc.h 		pgtable_page_ctor(page);
page              126 include/asm-parisc/pgalloc.h 	return page;
page              349 include/asm-parisc/pgtable.h #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
page               19 include/asm-um/page.h struct page;
page               28 include/asm-um/page.h #define clear_page(page)	memset((void *)(page), 0, PAGE_SIZE)
page               31 include/asm-um/page.h #define clear_user_page(page, vaddr, pg)	clear_page(page)
page               86 include/asm-um/page.h typedef struct page *pgtable_t;
page              275 include/asm-um/pgtable.h #define page_to_phys(page) pfn_to_phys((pfn_t) page_to_pfn(page))
page              281 include/asm-um/pgtable.h 	pte_set_val(pte, page_to_phys(page), (pgprot));	\
page              102 include/asm-um/tlb.h 	free_page_and_swap_cache(page);
page               15 include/asm-x86/agp.h #define map_page_into_agp(page) set_pages_uc(page, 1)
page               16 include/asm-x86/agp.h #define unmap_page_from_agp(page) set_pages_wb(page, 1)
page              220 include/asm-x86/dma-mapping.h 	return ops->map_single(dev, page_to_phys(page) + offset,
page               69 include/asm-x86/highmem.h struct page *kmap_atomic_to_page(void *ptr);
page               72 include/asm-x86/highmem.h #define kmap_atomic_pte(page, type)	kmap_atomic(page, type)
page               95 include/asm-x86/io_32.h #define page_to_phys(page)    ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
page              164 include/asm-x86/io_64.h #define page_to_phys(page)    ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
page              209 include/asm-x86/kvm_host.h 			      struct kvm_mmu_page *page);
page              294 include/asm-x86/kvm_host.h 	struct page *time_page;
page              325 include/asm-x86/kvm_host.h 	struct page *apic_access_page;
page              329 include/asm-x86/kvm_host.h 	struct page *ept_identity_pagetable;
page              568 include/asm-x86/kvm_host.h 	struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
page              570 include/asm-x86/kvm_host.h 	return (struct kvm_mmu_page *)page_private(page);
page               70 include/asm-x86/page.h struct page;
page               75 include/asm-x86/page.h 	clear_page(page);
page               74 include/asm-x86/page_32.h typedef struct page *pgtable_t;
page              116 include/asm-x86/page_32.h 	mmx_clear_page(page);
page              128 include/asm-x86/page_32.h 	memset(page, 0, PAGE_SIZE);
page               84 include/asm-x86/page_64.h typedef struct page *pgtable_t;
page               88 include/asm-x86/page_64.h #define vmemmap ((struct page *)VMEMMAP_START)
page               36 include/asm-x86/paravirt.h struct page;
page              313 include/asm-x86/paravirt.h 	void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
page             1040 include/asm-x86/paravirt.h 	ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
page              123 include/asm-x86/pgtable-3level.h #define pud_page(pud) ((struct page *) __va(pud_val(pud) & PTE_PFN_MASK))
page              116 include/asm-x86/pgtable_32.h #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
page              236 include/asm-x86/pgtable_64.h #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn((page)), (pgprot))
page               75 include/asm-xtensa/dma-mapping.h 	return (dma_addr_t)(page_to_pfn(page)) * PAGE_SIZE + offset;
page               99 include/asm-xtensa/page.h typedef struct page *pgtable_t;
page              129 include/asm-xtensa/page.h struct page;
page              142 include/asm-xtensa/page.h # define clear_user_page(page, vaddr, pg)	clear_page(page)
page              161 include/asm-xtensa/page.h #define page_to_virt(page)	__va(page_to_pfn(page) << PAGE_SHIFT)
page              163 include/asm-xtensa/page.h #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
page               26 include/asm-xtensa/pgalloc.h 	(pmd_val(*(pmdp)) = ((unsigned long)page_to_virt(page)))
page               53 include/asm-xtensa/pgalloc.h 	struct page *page;
page               55 include/asm-xtensa/pgalloc.h 	page = virt_to_page(pte_alloc_one_kernel(mm, addr));
page               56 include/asm-xtensa/pgalloc.h 	pgtable_page_ctor(page);
page               57 include/asm-xtensa/pgalloc.h 	return page;
page              239 include/asm-xtensa/pgtable.h #define mk_pte(page, prot)	pfn_pte(page_to_pfn(page), prot)
page               73 include/crypto/algapi.h 			struct page *page;
page               78 include/crypto/algapi.h 			u8 *page;
page               89 include/crypto/algapi.h 	void *page;
page               28 include/crypto/internal/hash.h 	struct page *pg;
page               42 include/crypto/scatterwalk.h 	return kmap_atomic(page, crypto_kmap_type(out));
page              104 include/crypto/scatterwalk.h static inline struct page *scatterwalk_page(struct scatter_walk *walk)
page              483 include/drm/drmP.h 	struct page **pagelist;
page              170 include/linux/aio.h 	struct page		**ring_pages;
page              176 include/linux/aio.h 	struct page		*internal_pages[AIO_RING_PAGES];
page              392 include/linux/atmdev.h 	int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
page               18 include/linux/backing-dev.h struct page;
page               48 include/linux/backing-dev.h 	void (*unplug_io_fn)(struct backing_dev_info *, struct page *);
page               33 include/linux/binfmts.h 	struct page *page[MAX_ARG_PAGES];
page               47 include/linux/bio.h 	struct page	*bv_page;
page              671 include/linux/blkdev.h 	struct page **pages;
page              771 include/linux/blkdev.h 		bdi->unplug_io_fn(bdi, page);
page              960 include/linux/blkdev.h typedef struct {struct page *v;} Sector;
page               46 include/linux/buffer_head.h struct page;
page               63 include/linux/buffer_head.h 	struct page *b_page;		/* the page this bh is mapped to */
page              137 include/linux/buffer_head.h 		BUG_ON(!PagePrivate(page));			\
page              138 include/linux/buffer_head.h 		((struct buffer_head *)page_private(page));	\
page              140 include/linux/buffer_head.h #define page_has_buffers(page)	PagePrivate(page)
page              250 include/linux/buffer_head.h 	page_cache_get(page);
page              251 include/linux/buffer_head.h 	SetPagePrivate(page);
page              252 include/linux/buffer_head.h 	set_page_private(page, (unsigned long)head);
page              185 include/linux/configfs.h 				 char *page)				\
page              193 include/linux/configfs.h 		ret = _item##_attr->show(_item, page);			\
page              198 include/linux/configfs.h 				  const char *page, size_t count)	\
page              206 include/linux/configfs.h 		ret = _item##_attr->store(_item, page, count);		\
page               25 include/linux/dm-io.h 	struct page *page;
page              489 include/linux/dmaengine.h 	struct page **pages;
page              415 include/linux/fs.h struct page;
page              474 include/linux/fs.h 	int (*writepage)(struct page *page, struct writeback_control *wbc);
page              475 include/linux/fs.h 	int (*readpage)(struct file *, struct page *);
page              476 include/linux/fs.h 	void (*sync_page)(struct page *);
page              482 include/linux/fs.h 	int (*set_page_dirty)(struct page *page);
page              491 include/linux/fs.h 	int (*prepare_write)(struct file *, struct page *, unsigned, unsigned);
page              492 include/linux/fs.h 	int (*commit_write)(struct file *, struct page *, unsigned, unsigned);
page              496 include/linux/fs.h 				struct page **pagep, void **fsdata);
page              499 include/linux/fs.h 				struct page *page, void *fsdata);
page              503 include/linux/fs.h 	void (*invalidatepage) (struct page *, unsigned long);
page              504 include/linux/fs.h 	int (*releasepage) (struct page *, gfp_t);
page              511 include/linux/fs.h 			struct page *, struct page *);
page              512 include/linux/fs.h 	int (*launder_page) (struct page *);
page              513 include/linux/fs.h 	int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
page             1315 include/linux/fs.h 	ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
page              176 include/linux/gfp.h struct page *
page              180 include/linux/gfp.h static inline struct page *
page              187 include/linux/gfp.h static inline struct page *
page              195 include/linux/gfp.h static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
page              209 include/linux/gfp.h extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
page              211 include/linux/gfp.h static inline struct page *
page              219 include/linux/gfp.h extern struct page *alloc_page_vma(gfp_t gfp_mask,
page              245 include/linux/gfp.h #define __free_page(page) __free_pages((page), 0)
page               42 include/linux/highmem.h 	return page_address(page);
page               45 include/linux/highmem.h #define kunmap(page) do { (void) (page); } while (0)
page               52 include/linux/highmem.h 	return page_address(page);
page               54 include/linux/highmem.h #define kmap_atomic_prot(page, idx, prot)	kmap_atomic(page, idx)
page               68 include/linux/highmem.h 	void *addr = kmap_atomic(page, KM_USER0);
page               69 include/linux/highmem.h 	clear_user_page(addr, vaddr, page);
page               88 include/linux/highmem.h static inline struct page *
page               93 include/linux/highmem.h 	struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
page               96 include/linux/highmem.h 	if (page)
page               97 include/linux/highmem.h 		clear_user_highpage(page, vaddr);
page               99 include/linux/highmem.h 	return page;
page              111 include/linux/highmem.h static inline struct page *
page              120 include/linux/highmem.h 	void *kaddr = kmap_atomic(page, KM_USER0);
page              129 include/linux/highmem.h 	void *kaddr = kmap_atomic(page, KM_USER0);
page              140 include/linux/highmem.h 	flush_dcache_page(page);
page              146 include/linux/highmem.h 	zero_user_segments(page, start, end, 0, 0);
page              152 include/linux/highmem.h 	zero_user_segments(page, start, start + size, 0, 0);
page              158 include/linux/highmem.h 	zero_user(page, offset, size);
page               50 include/linux/hugetlb.h struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
page               52 include/linux/hugetlb.h struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
page               54 include/linux/hugetlb.h struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
page              263 include/linux/hugetlb.h 	return size_to_hstate(PAGE_SIZE << compound_order(page));
page               87 include/linux/i2o-dev.h 	unsigned int page;	/* HTML page */
page             1043 include/linux/ide.h 	*start = page + off;		\
page               53 include/linux/isdn/capilli.h 	int (*ctr_read_proc)(char *page, char **start, off_t off,
page               85 include/linux/kexec.h 	struct page *control_code_page;
page               86 include/linux/kexec.h 	struct page *swap_page;
page              122 include/linux/kexec.h extern struct page *kimage_alloc_control_pages(struct kimage *image,
page              159 include/linux/kvm_host.h struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
page              161 include/linux/kvm_host.h extern struct page *bad_page;
page              179 include/linux/kvm_host.h struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
page               46 include/linux/kvm_types.h 	struct page *guest_pages[2];
page               38 include/linux/loop.h 				    struct page *raw_page, unsigned raw_off,
page               39 include/linux/loop.h 				    struct page *loop_page, unsigned loop_off,
page              138 include/linux/loop.h 			struct page *raw_page, unsigned raw_off,
page              139 include/linux/loop.h 			struct page *loop_page, unsigned loop_off,
page               25 include/linux/memcontrol.h struct page;
page               30 include/linux/memcontrol.h #define page_reset_bad_cgroup(page)	((page)->page_cgroup = 0)
page                8 include/linux/memory_hotplug.h struct page;
page              220 include/linux/memory_hotplug.h extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
page                7 include/linux/migrate.h typedef struct page *new_page_t(struct page *, unsigned long private, int **);
page               43 include/linux/mm.h #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
page              158 include/linux/mm.h 	struct page *page;		/* ->fault handlers should return a
page              177 include/linux/mm.h 	int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
page              214 include/linux/mm.h #define page_private(page)		((page)->private)
page              215 include/linux/mm.h #define set_page_private(page, v)	((page)->private = (v))
page              241 include/linux/mm.h 	VM_BUG_ON(atomic_read(&page->_count) == 0);
page              242 include/linux/mm.h 	return atomic_dec_and_test(&page->_count);
page              251 include/linux/mm.h 	VM_BUG_ON(PageTail(page));
page              252 include/linux/mm.h 	return atomic_inc_not_zero(&page->_count);
page              256 include/linux/mm.h struct page *vmalloc_to_page(const void *addr);
page              276 include/linux/mm.h static inline struct page *compound_head(struct page *page)
page              278 include/linux/mm.h 	if (unlikely(PageTail(page)))
page              279 include/linux/mm.h 		return page->first_page;
page              280 include/linux/mm.h 	return page;
page              285 include/linux/mm.h 	return atomic_read(&compound_head(page)->_count);
page              290 include/linux/mm.h 	page = compound_head(page);
page              291 include/linux/mm.h 	VM_BUG_ON(atomic_read(&page->_count) == 0);
page              292 include/linux/mm.h 	atomic_inc(&page->_count);
page              295 include/linux/mm.h static inline struct page *virt_to_head_page(const void *x)
page              297 include/linux/mm.h 	struct page *page = virt_to_page(x);
page              298 include/linux/mm.h 	return compound_head(page);
page              307 include/linux/mm.h 	atomic_set(&page->_count, 1);
page              325 include/linux/mm.h 	page[1].lru.next = (void *)dtor;
page              330 include/linux/mm.h 	return (compound_page_dtor *)page[1].lru.next;
page              335 include/linux/mm.h 	if (!PageHead(page))
page              337 include/linux/mm.h 	return (unsigned long)page[1].lru.prev;
page              342 include/linux/mm.h 	page[1].lru.prev = (void *)order;
page              492 include/linux/mm.h 	return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
page              505 include/linux/mm.h 	return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
page              522 include/linux/mm.h 	return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
page              528 include/linux/mm.h 	return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
page              534 include/linux/mm.h 	return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
page              540 include/linux/mm.h 	page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
page              541 include/linux/mm.h 	page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
page              546 include/linux/mm.h 	page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
page              547 include/linux/mm.h 	page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
page              552 include/linux/mm.h 	page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
page              553 include/linux/mm.h 	page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
page              559 include/linux/mm.h 	set_page_zone(page, zone);
page              560 include/linux/mm.h 	set_page_node(page, node);
page              561 include/linux/mm.h 	set_page_section(page, pfn_to_section_nr(pfn));
page              586 include/linux/mm.h 	return __va(page_to_pfn(page) << PAGE_SHIFT);
page              594 include/linux/mm.h #define page_address(page) ((page)->virtual)
page              597 include/linux/mm.h 		(page)->virtual = (address);		\
page              609 include/linux/mm.h #define page_address(page) lowmem_page_address(page)
page              628 include/linux/mm.h 	struct address_space *mapping = page->mapping;
page              630 include/linux/mm.h 	VM_BUG_ON(PageSlab(page));
page              632 include/linux/mm.h 	if (unlikely(PageSwapCache(page)))
page              643 include/linux/mm.h 	return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
page              652 include/linux/mm.h 	if (unlikely(PageSwapCache(page)))
page              653 include/linux/mm.h 		return page_private(page);
page              654 include/linux/mm.h 	return page->index;
page              664 include/linux/mm.h 	atomic_set(&(page)->_mapcount, -1);
page              669 include/linux/mm.h 	return atomic_read(&(page)->_mapcount) + 1;
page              677 include/linux/mm.h 	return atomic_read(&(page)->_mapcount) >= 0;
page              739 include/linux/mm.h struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
page              924 include/linux/mm.h #define __pte_lockptr(page)	&((page)->ptl)
page              928 include/linux/mm.h #define pte_lock_deinit(page)	((page)->mapping = NULL)
page              941 include/linux/mm.h 	pte_lock_init(page);
page              942 include/linux/mm.h 	inc_zone_page_state(page, NR_PAGETABLE);
page              947 include/linux/mm.h 	pte_lock_deinit(page);
page              948 include/linux/mm.h 	dec_zone_page_state(page, NR_PAGETABLE);
page             1205 include/linux/mm.h struct page *follow_page(struct vm_area_struct *, unsigned long address,
page             1272 include/linux/mm.h struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
page                4 include/linux/mm_inline.h 	list_add(&page->lru, &zone->active_list);
page               11 include/linux/mm_inline.h 	list_add(&page->lru, &zone->inactive_list);
page               18 include/linux/mm_inline.h 	list_del(&page->lru);
page               25 include/linux/mm_inline.h 	list_del(&page->lru);
page               32 include/linux/mm_inline.h 	list_del(&page->lru);
page               33 include/linux/mm_inline.h 	if (PageActive(page)) {
page               34 include/linux/mm_inline.h 		__ClearPageActive(page);
page               74 include/linux/mm_types.h 	    struct page *first_page;	/* Compound tail pages */
page               56 include/linux/mmzone.h 	return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
page              508 include/linux/mmzone.h extern struct page *mem_map;
page              528 include/linux/mmzone.h 	struct page *node_mem_map;
page              856 include/linux/mmzone.h struct page;
page              912 include/linux/mmzone.h static inline struct page *__section_mem_map_addr(struct mem_section *section)
page              916 include/linux/mmzone.h 	return (struct page *)map;
page              289 include/linux/mtd/nand.h 					    int page,
page              293 include/linux/mtd/nand.h 					     int page);
page              387 include/linux/mtd/nand.h 	void		(*erase_cmd)(struct mtd_info *mtd, int page);
page              389 include/linux/mtd/nand.h 	int		(*errstat)(struct mtd_info *mtd, struct nand_chip *this, int state, int status, int page);
page              391 include/linux/mtd/nand.h 				      const uint8_t *buf, int page, int cached, int raw);
page              140 include/linux/net.h struct page;
page              186 include/linux/net.h 	ssize_t		(*sendpage)  (struct socket *sock, struct page *page,
page               40 include/linux/nfs_page.h 	struct page		*wb_page;	/* page to read in/write out */
page              252 include/linux/nfs_xdr.h 	struct page **		pages;
page              271 include/linux/nfs_xdr.h 	struct page **		pages;
page              358 include/linux/nfs_xdr.h 	struct page **			acl_pages;
page              365 include/linux/nfs_xdr.h 	struct page **			acl_pages;
page              384 include/linux/nfs_xdr.h 	struct page **		pages;
page              393 include/linux/nfs_xdr.h 	struct page **		pages;
page              399 include/linux/nfs_xdr.h 	struct page **		pages;
page              407 include/linux/nfs_xdr.h 	struct page **		pages;
page              419 include/linux/nfs_xdr.h 	struct page **		pages;
page              460 include/linux/nfs_xdr.h 	struct page **		pages;
page              496 include/linux/nfs_xdr.h 	struct page **		pages;
page              514 include/linux/nfs_xdr.h 	struct page **		pages;
page              563 include/linux/nfs_xdr.h 			struct page **	pages;
page              642 include/linux/nfs_xdr.h 	struct page **			pages;	/* zero-copy data */
page              656 include/linux/nfs_xdr.h 	struct page **			pages;   /* zero-copy data */
page              730 include/linux/nfs_xdr.h 	struct page *page;
page              748 include/linux/nfs_xdr.h 	struct page		**pagevec;
page              755 include/linux/nfs_xdr.h 	struct page		*page_array[NFS_PAGEVEC_SIZE];
page              767 include/linux/nfs_xdr.h 	struct page		**pagevec;
page              774 include/linux/nfs_xdr.h 	struct page		*page_array[NFS_PAGEVEC_SIZE];
page              800 include/linux/nfs_xdr.h 	int	(*readlink)(struct inode *, struct page *, unsigned int,
page              810 include/linux/nfs_xdr.h 	int	(*symlink) (struct inode *, struct dentry *, struct page *,
page              815 include/linux/nfs_xdr.h 			    u64, struct page *, unsigned int, int);
page              389 include/linux/nfsd/xdr4.h 	struct page **			pagelist;
page              123 include/linux/page-flags.h static inline int Page##uname(struct page *page) 			\
page              124 include/linux/page-flags.h 			{ return test_bit(PG_##lname, &page->flags); }
page              127 include/linux/page-flags.h static inline void SetPage##uname(struct page *page)			\
page              128 include/linux/page-flags.h 			{ set_bit(PG_##lname, &page->flags); }
page              131 include/linux/page-flags.h static inline void ClearPage##uname(struct page *page)			\
page              132 include/linux/page-flags.h 			{ clear_bit(PG_##lname, &page->flags); }
page              135 include/linux/page-flags.h static inline void __SetPage##uname(struct page *page)			\
page              136 include/linux/page-flags.h 			{ __set_bit(PG_##lname, &page->flags); }
page              139 include/linux/page-flags.h static inline void __ClearPage##uname(struct page *page)		\
page              140 include/linux/page-flags.h 			{ __clear_bit(PG_##lname, &page->flags); }
page              143 include/linux/page-flags.h static inline int TestSetPage##uname(struct page *page)			\
page              144 include/linux/page-flags.h 		{ return test_and_set_bit(PG_##lname, &page->flags); }
page              147 include/linux/page-flags.h static inline int TestClearPage##uname(struct page *page)		\
page              148 include/linux/page-flags.h 		{ return test_and_clear_bit(PG_##lname, &page->flags); }
page              158 include/linux/page-flags.h static inline int Page##uname(struct page *page) 			\
page              164 include/linux/page-flags.h struct page;	/* forward declaration */
page              222 include/linux/page-flags.h 	int ret = test_bit(PG_uptodate, &(page)->flags);
page              241 include/linux/page-flags.h 	__set_bit(PG_uptodate, &(page)->flags);
page              247 include/linux/page-flags.h 	if (!test_and_set_bit(PG_uptodate, &page->flags))
page              248 include/linux/page-flags.h 		page_clear_dirty(page);
page              259 include/linux/page-flags.h 	set_bit(PG_uptodate, &(page)->flags);
page              272 include/linux/page-flags.h 	test_set_page_writeback(page);
page              287 include/linux/page-flags.h 	return page->flags & ((1L << PG_head) | (1L << PG_tail));
page              314 include/linux/page-flags.h 	return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask);
page              319 include/linux/page-flags.h 	page->flags |= PG_head_tail_mask;
page              324 include/linux/page-flags.h 	page->flags &= ~PG_head_tail_mask;
page               60 include/linux/pageblock-flags.h struct page;
page               69 include/linux/pageblock-flags.h 			get_pageblock_flags_group(page, 0, NR_PAGEBLOCK_BITS-1)
page               71 include/linux/pageblock-flags.h 			set_pageblock_flags_group(page, 0, NR_PAGEBLOCK_BITS-1)
page               63 include/linux/pagemap.h #define page_cache_get(page)		get_page(page)
page               64 include/linux/pagemap.h #define page_cache_release(page)	put_page(page)
page              128 include/linux/pagemap.h 	VM_BUG_ON(page_count(page) == 0);
page              129 include/linux/pagemap.h 	atomic_inc(&page->_count);
page              132 include/linux/pagemap.h 	if (unlikely(!get_page_unless_zero(page))) {
page              141 include/linux/pagemap.h 	VM_BUG_ON(PageTail(page));
page              157 include/linux/pagemap.h 	VM_BUG_ON(page_count(page) == 0);
page              158 include/linux/pagemap.h 	atomic_add(count, &page->_count);
page              161 include/linux/pagemap.h 	if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
page              164 include/linux/pagemap.h 	VM_BUG_ON(PageCompound(page) && page != compound_head(page));
page              171 include/linux/pagemap.h 	return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
page              176 include/linux/pagemap.h 	VM_BUG_ON(page_count(page) != 0);
page              179 include/linux/pagemap.h 	atomic_set(&page->_count, count);
page              183 include/linux/pagemap.h extern struct page *__page_cache_alloc(gfp_t gfp);
page              185 include/linux/pagemap.h static inline struct page *__page_cache_alloc(gfp_t gfp)
page              191 include/linux/pagemap.h static inline struct page *page_cache_alloc(struct address_space *x)
page              196 include/linux/pagemap.h static inline struct page *page_cache_alloc_cold(struct address_space *x)
page              203 include/linux/pagemap.h extern struct page * find_get_page(struct address_space *mapping,
page              205 include/linux/pagemap.h extern struct page * find_lock_page(struct address_space *mapping,
page              207 include/linux/pagemap.h extern struct page * find_or_create_page(struct address_space *mapping,
page              216 include/linux/pagemap.h struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index);
page              221 include/linux/pagemap.h static inline struct page *grab_cache_page(struct address_space *mapping,
page              227 include/linux/pagemap.h extern struct page * grab_cache_page_nowait(struct address_space *mapping,
page              229 include/linux/pagemap.h extern struct page * read_cache_page_async(struct address_space *mapping,
page              232 include/linux/pagemap.h extern struct page * read_cache_page(struct address_space *mapping,
page              238 include/linux/pagemap.h static inline struct page *read_mapping_page_async(
page              246 include/linux/pagemap.h static inline struct page *read_mapping_page(struct address_space *mapping,
page              258 include/linux/pagemap.h 	return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
page              276 include/linux/pagemap.h 	set_bit(PG_locked, &page->flags);
page              281 include/linux/pagemap.h 	clear_bit(PG_locked, &page->flags);
page              286 include/linux/pagemap.h 	return !test_and_set_bit(PG_locked, &page->flags);
page              295 include/linux/pagemap.h 	if (!trylock_page(page))
page              296 include/linux/pagemap.h 		__lock_page(page);
page              307 include/linux/pagemap.h 	if (!trylock_page(page))
page              308 include/linux/pagemap.h 		return __lock_page_killable(page);
page              319 include/linux/pagemap.h 	if (!trylock_page(page))
page              320 include/linux/pagemap.h 		__lock_page_nosync(page);
page              338 include/linux/pagemap.h 	if (PageLocked(page))
page              339 include/linux/pagemap.h 		wait_on_page_bit(page, PG_locked);
page              347 include/linux/pagemap.h 	if (PageWriteback(page))
page              348 include/linux/pagemap.h 		wait_on_page_bit(page, PG_writeback);
page              420 include/linux/pagemap.h 	set_page_locked(page);
page              421 include/linux/pagemap.h 	error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
page              423 include/linux/pagemap.h 		clear_page_locked(page);
page               14 include/linux/pagevec.h struct page;
page               20 include/linux/pagevec.h 	struct page *pages[PAGEVEC_SIZE];
page               61 include/linux/pagevec.h 	pvec->pages[pvec->nr++] = page;
page               12 include/linux/pid_namespace.h        void *page;
page               22 include/linux/pipe_fs_i.h 	struct page *page;
page               48 include/linux/pipe_fs_i.h 	struct page *tmp_page;
page              229 include/linux/pktcdvd.h 	struct page		*pages[PACKET_MAX_SIZE / FRAMES_PER_PAGE];
page              278 include/linux/proc_fs.h 	int (*proc_read)(struct task_struct *task, char *page);
page               17 include/linux/quicklist.h 	void *page;
page               39 include/linux/quicklist.h 	p = q->page;
page               41 include/linux/quicklist.h 		q->page = p[0];
page               61 include/linux/quicklist.h 	*(void **)p = q->page;
page               62 include/linux/quicklist.h 	q->page = p;
page               75 include/linux/quicklist.h 	__quicklist_free(nr, dtor, page_address(page), page);
page              198 include/linux/raid/bitmap.h 	struct page *page;
page              231 include/linux/raid/bitmap.h 	struct page *sb_page; /* cached copy of the bitmap file superblock */
page              232 include/linux/raid/bitmap.h 	struct page **filemap; /* list of cache pages for the file */
page               58 include/linux/raid/md_k.h 	struct page	*sb_page;
page               60 include/linux/raid/raid1.h 	struct page		*tmppage;
page               61 include/linux/raid/raid10.h 	struct page		*tmppage;
page              221 include/linux/raid/raid5.h 		struct page	*page;
page              373 include/linux/raid/raid5.h 	struct page 		*spare_page; /* Used when checking P/Q in raid6 */
page               45 include/linux/relay.h 	struct page **page_array;	/* array of current buffer pages */
page               91 include/linux/rmap.h 	atomic_inc(&page->_mapcount);
page              126 include/linux/rmap.h #define page_referenced(page,l,cnt) TestClearPageReferenced(page)
page               63 include/linux/scatterlist.h 	BUG_ON((unsigned long) page & 0x03);
page               68 include/linux/scatterlist.h 	sg->page_link = page_link | (unsigned long) page;
page               88 include/linux/scatterlist.h 	sg_assign_page(sg, page);
page               93 include/linux/scatterlist.h static inline struct page *sg_page(struct scatterlist *sg)
page               99 include/linux/scatterlist.h 	return (struct page *)((sg)->page_link & ~0x3);
page              248 include/linux/scatterlist.h 	struct page		*page;		/* currently mapped page */
page               18 include/linux/shmem_fs.h 	struct page		*i_indirect;	/* top indirect blocks page */
page              133 include/linux/skbuff.h 	struct page *page;
page              965 include/linux/skbuff.h 	frag->page		  = page;
page             1388 include/linux/skbuff.h extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask);
page             1398 include/linux/skbuff.h static inline struct page *netdev_alloc_page(struct net_device *dev)
page             1405 include/linux/skbuff.h 	__free_page(page);
page             1515 include/linux/skbuff.h 		return page == frag->page &&
page               37 include/linux/slub_def.h 	struct page *page;	/* The slab from which we are allocating */
page              114 include/linux/smb_fs.h 	struct  page			*page;
page               51 include/linux/splice.h 	struct page **pages;		/* page map */
page              103 include/linux/sunrpc/gss_api.h 			struct page		**inpages);
page              223 include/linux/sunrpc/svc.h 	struct page *		rq_pages[RPCSVC_MAXPAGES];
page              224 include/linux/sunrpc/svc.h 	struct page *		*rq_respages;	/* points into rq_pages */
page              310 include/linux/sunrpc/svc.h 		struct page **pp = (rqstp->rq_respages +
page               87 include/linux/sunrpc/svc_rdma.h 	struct page *pages[RPCSVC_MAXPAGES];
page              210 include/linux/sunrpc/svc_rdma.h struct page *svc_rdma_get_page(void);
page               71 include/linux/sunrpc/xdr.h 	struct page **	pages;		/* Array of contiguous pages */
page               73 include/linux/sunrpc/xprt.h 	struct page		**rq_enc_pages;	/* scratch pages for use by
page              232 include/linux/swap.h extern struct page *lookup_swap_cache(swp_entry_t);
page              233 include/linux/swap.h extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
page              235 include/linux/swap.h extern struct page *swapin_readahead(swp_entry_t, gfp_t,
page              287 include/linux/swap.h 	page_cache_release(page)
page              308 include/linux/swap.h static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
page              314 include/linux/swap.h static inline struct page *lookup_swap_cache(swp_entry_t swp)
page               82 include/linux/swapops.h 	BUG_ON(!PageLocked(page));
page               84 include/linux/swapops.h 			page_to_pfn(page));
page               98 include/linux/swapops.h static inline struct page *migration_entry_to_page(swp_entry_t entry)
page              100 include/linux/swapops.h 	struct page *p = pfn_to_page(swp_offset(entry));
page              256 include/linux/tty_driver.h 	int (*read_proc)(char *page, char **start, off_t off,
page               86 include/linux/videotext.h 	int page;	/* number of requested page (hexadecimal) */
page               31 include/linux/vmalloc.h 	struct page		**pages;
page              247 include/linux/vmstat.h 	__inc_zone_state(page_zone(page), item);
page              259 include/linux/vmstat.h 	__dec_zone_state(page_zone(page), item);
page               66 include/media/videobuf-dma-sg.h 	struct page         **pages;
page              328 include/net/bluetooth/hci.h 	__u8     page;
page              560 include/net/bluetooth/hci.h 	__u8     page;
page              766 include/net/bluetooth/hci.h 	__u8     page;
page              268 include/net/sock.h 	struct page		*sk_sndmsg_page;
page              556 include/net/sock.h 	int			(*sendpage)(struct sock *sk, struct page *page,
page             1107 include/net/sock.h 						     page_address(page) + off,
page             1112 include/net/sock.h 	} else if (copy_from_user(page_address(page) + off, from, copy))
page             1214 include/net/sock.h static inline struct page *sk_stream_alloc_page(struct sock *sk)
page             1216 include/net/sock.h 	struct page *page = NULL;
page             1218 include/net/sock.h 	page = alloc_pages(sk->sk_allocation, 0);
page             1219 include/net/sock.h 	if (!page) {
page             1223 include/net/sock.h 	return page;
page              944 include/rdma/ib_verbs.h 				    struct page *page, unsigned long offset,
page             1662 include/rdma/ib_verbs.h 		return dev->dma_ops->map_page(dev, page, offset, size, direction);
page             1663 include/rdma/ib_verbs.h 	return dma_map_page(dev->dma_device, page, offset, size, direction);
page               81 include/sound/memalloc.h 	struct page **page_table;	/* page table (for vmap/vunmap) */
page               78 include/sound/pcm.h 	struct page *(*page)(struct snd_pcm_substream *substream,
page              997 include/sound/pcm.h struct page *snd_pcm_sgbuf_ops_page(struct snd_pcm_substream *substream,
page               96 include/xen/interface/io/fbif.h 	((union xenfb_in_event *)((char *)(page) + XENFB_IN_RING_OFFS))
page               98 include/xen/interface/io/fbif.h 	(XENFB_IN_RING((page))[(idx) % XENFB_IN_RING_LEN])
page              104 include/xen/interface/io/fbif.h 	((union xenfb_out_event *)((char *)(page) + XENFB_OUT_RING_OFFS))
page              106 include/xen/interface/io/fbif.h 	(XENFB_OUT_RING((page))[(idx) % XENFB_OUT_RING_LEN])
page               99 include/xen/interface/io/kbdif.h 	((union xenkbd_in_event *)((char *)(page) + XENKBD_IN_RING_OFFS))
page              101 include/xen/interface/io/kbdif.h 	(XENKBD_IN_RING((page))[(idx) % XENKBD_IN_RING_LEN])
page              107 include/xen/interface/io/kbdif.h 	((union xenkbd_out_event *)((char *)(page) + XENKBD_OUT_RING_OFFS))
page              109 include/xen/interface/io/kbdif.h 	(XENKBD_OUT_RING((page))[(idx) % XENKBD_OUT_RING_LEN])
page              190 init/do_mounts.c 	char *s = page;
page              193 init/do_mounts.c 		strcpy(page, root_fs_names);
page              199 init/do_mounts.c 		int len = get_filesystem_list(page);
page              202 init/do_mounts.c 		page[len] = '\0';
page              203 init/do_mounts.c 		for (p = page-1; p; p = next) {
page             1481 kernel/cpuset.c 	return cpulist_scnprintf(page, PAGE_SIZE, mask);
page             1492 kernel/cpuset.c 	return nodelist_scnprintf(page, PAGE_SIZE, mask);
page             1503 kernel/cpuset.c 	char *page;
page             1507 kernel/cpuset.c 	if (!(page = (char *)__get_free_page(GFP_TEMPORARY)))
page             1510 kernel/cpuset.c 	s = page;
page             1525 kernel/cpuset.c 	retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page);
page             1527 kernel/cpuset.c 	free_page((unsigned long)page);
page              116 kernel/dma-coherent.c 		int page = bitmap_find_free_region(mem->bitmap, mem->size,
page              118 kernel/dma-coherent.c 		if (page >= 0) {
page              119 kernel/dma-coherent.c 			*dma_handle = mem->device_base + (page << PAGE_SHIFT);
page              120 kernel/dma-coherent.c 			*ret = mem->virt_base + (page << PAGE_SHIFT);
page              148 kernel/dma-coherent.c 		int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
page              150 kernel/dma-coherent.c 		bitmap_release_region(mem->bitmap, page, order);
page              184 kernel/exec_domain.c 		len += sprintf(page + len, "%d-%d\t%-16s\t[%s]\n",
page              188 kernel/futex.c 	struct page *page;
page              260 kernel/futex.c 	err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL);
page              263 kernel/futex.c 			page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
page              264 kernel/futex.c 		put_page(page);
page              136 kernel/irq/proc.c 	return sprintf(page, "count %u\n"
page              112 kernel/kexec.c static struct page *kimage_alloc_page(struct kimage *image,
page              354 kernel/kexec.c static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
page              356 kernel/kexec.c 	struct page *pages;
page              375 kernel/kexec.c 	order = page_private(page);
page              378 kernel/kexec.c 		ClearPageReserved(page + i);
page              379 kernel/kexec.c 	__free_pages(page, order);
page              387 kernel/kexec.c 		struct page *page;
page              389 kernel/kexec.c 		page = list_entry(pos, struct page, lru);
page              390 kernel/kexec.c 		list_del(&page->lru);
page              391 kernel/kexec.c 		kimage_free_pages(page);
page              395 kernel/kexec.c static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
page              412 kernel/kexec.c 	struct page *pages;
page              461 kernel/kexec.c static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
page              486 kernel/kexec.c 	struct page *pages;
page              525 kernel/kexec.c struct page *kimage_alloc_control_pages(struct kimage *image,
page              528 kernel/kexec.c 	struct page *pages = NULL;
page              549 kernel/kexec.c 		struct page *page;
page              551 kernel/kexec.c 		page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
page              552 kernel/kexec.c 		if (!page)
page              555 kernel/kexec.c 		ind_page = page_address(page);
page              586 kernel/kexec.c 	page &= PAGE_MASK;
page              587 kernel/kexec.c 	result = kimage_add_entry(image, page | IND_SOURCE);
page              619 kernel/kexec.c 	struct page *page;
page              621 kernel/kexec.c 	page = pfn_to_page(entry >> PAGE_SHIFT);
page              622 kernel/kexec.c 	kimage_free_pages(page);
page              669 kernel/kexec.c 			if (page == destination)
page              678 kernel/kexec.c static struct page *kimage_alloc_page(struct kimage *image,
page              700 kernel/kexec.c 	struct page *page;
page              707 kernel/kexec.c 	list_for_each_entry(page, &image->dest_pages, lru) {
page              708 kernel/kexec.c 		addr = page_to_pfn(page) << PAGE_SHIFT;
page              710 kernel/kexec.c 			list_del(&page->lru);
page              711 kernel/kexec.c 			return page;
page              714 kernel/kexec.c 	page = NULL;
page              719 kernel/kexec.c 		page = kimage_alloc_pages(gfp_mask, 0);
page              720 kernel/kexec.c 		if (!page)
page              723 kernel/kexec.c 		if (page_to_pfn(page) >
page              725 kernel/kexec.c 			list_add(&page->lru, &image->unuseable_pages);
page              728 kernel/kexec.c 		addr = page_to_pfn(page) << PAGE_SHIFT;
page              748 kernel/kexec.c 			struct page *old_page;
page              752 kernel/kexec.c 			copy_highpage(page, old_page);
page              765 kernel/kexec.c 			page = old_page;
page              772 kernel/kexec.c 			list_add(&page->lru, &image->dest_pages);
page              776 kernel/kexec.c 	return page;
page              798 kernel/kexec.c 		struct page *page;
page              802 kernel/kexec.c 		page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
page              803 kernel/kexec.c 		if (!page) {
page              807 kernel/kexec.c 		result = kimage_add_page(image, page_to_pfn(page)
page              812 kernel/kexec.c 		ptr = kmap(page);
page              825 kernel/kexec.c 		kunmap(page);
page              857 kernel/kexec.c 		struct page *page;
page              861 kernel/kexec.c 		page = pfn_to_page(maddr >> PAGE_SHIFT);
page              862 kernel/kexec.c 		if (!page) {
page              866 kernel/kexec.c 		ptr = kmap(page);
page              879 kernel/kexec.c 		kexec_flush_icache_page(page);
page              880 kernel/kexec.c 		kunmap(page);
page             1385 kernel/kexec.c 	VMCOREINFO_STRUCT_SIZE(page);
page             1391 kernel/kexec.c 	VMCOREINFO_OFFSET(page, flags);
page             1392 kernel/kexec.c 	VMCOREINFO_OFFSET(page, _count);
page             1393 kernel/kexec.c 	VMCOREINFO_OFFSET(page, mapping);
page             1394 kernel/kexec.c 	VMCOREINFO_OFFSET(page, lru);
page               63 kernel/pid.c   		find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
page              121 kernel/pid.c   	clear_bit(offset, map->page);
page              137 kernel/pid.c   		if (unlikely(!map->page)) {
page              138 kernel/pid.c   			void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
page              144 kernel/pid.c   			if (map->page)
page              145 kernel/pid.c   				kfree(page);
page              147 kernel/pid.c   				map->page = page;
page              149 kernel/pid.c   			if (unlikely(!map->page))
page              154 kernel/pid.c   				if (!test_and_set_bit(offset, map->page)) {
page              194 kernel/pid.c   		if (unlikely(!map->page))
page              196 kernel/pid.c   		offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
page              523 kernel/pid.c   	init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
page              525 kernel/pid.c   	set_bit(0, init_pid_ns.pidmap[0].page);
page               79 kernel/pid_namespace.c 	ns->pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
page               80 kernel/pid_namespace.c 	if (!ns->pidmap[0].page)
page               90 kernel/pid_namespace.c 	set_bit(0, ns->pidmap[0].page);
page               99 kernel/pid_namespace.c 	kfree(ns->pidmap[0].page);
page              111 kernel/pid_namespace.c 		kfree(ns->pidmap[i].page);
page               92 kernel/power/snapshot.c static struct page *alloc_image_page(gfp_t gfp_mask)
page               94 kernel/power/snapshot.c 	struct page *page;
page               96 kernel/power/snapshot.c 	page = alloc_page(gfp_mask);
page               97 kernel/power/snapshot.c 	if (page) {
page               98 kernel/power/snapshot.c 		swsusp_set_page_forbidden(page);
page               99 kernel/power/snapshot.c 		swsusp_set_page_free(page);
page              101 kernel/power/snapshot.c 	return page;
page              111 kernel/power/snapshot.c 	struct page *page;
page              115 kernel/power/snapshot.c 	page = virt_to_page(addr);
page              117 kernel/power/snapshot.c 	swsusp_unset_page_forbidden(page);
page              119 kernel/power/snapshot.c 		swsusp_unset_page_free(page);
page              121 kernel/power/snapshot.c 	__free_page(page);
page              633 kernel/power/snapshot.c 		memory_bm_set_bit(free_pages_map, page_to_pfn(page));
page              639 kernel/power/snapshot.c 		memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
page              645 kernel/power/snapshot.c 		memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
page              651 kernel/power/snapshot.c 		memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
page              657 kernel/power/snapshot.c 		memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
page              663 kernel/power/snapshot.c 		memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
page              812 kernel/power/snapshot.c static struct page *saveable_highmem_page(unsigned long pfn)
page              814 kernel/power/snapshot.c 	struct page *page;
page              819 kernel/power/snapshot.c 	page = pfn_to_page(pfn);
page              821 kernel/power/snapshot.c 	BUG_ON(!PageHighMem(page));
page              823 kernel/power/snapshot.c 	if (swsusp_page_is_forbidden(page) ||  swsusp_page_is_free(page) ||
page              824 kernel/power/snapshot.c 	    PageReserved(page))
page              827 kernel/power/snapshot.c 	return page;
page              867 kernel/power/snapshot.c static struct page *saveable_page(unsigned long pfn)
page              869 kernel/power/snapshot.c 	struct page *page;
page              874 kernel/power/snapshot.c 	page = pfn_to_page(pfn);
page              876 kernel/power/snapshot.c 	BUG_ON(PageHighMem(page));
page              878 kernel/power/snapshot.c 	if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
page              881 kernel/power/snapshot.c 	if (PageReserved(page)
page              882 kernel/power/snapshot.c 	    && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
page              885 kernel/power/snapshot.c 	return page;
page              943 kernel/power/snapshot.c static inline struct page *
page              952 kernel/power/snapshot.c 	struct page *s_page, *d_page;
page             1033 kernel/power/snapshot.c 				struct page *page = pfn_to_page(pfn);
page             1035 kernel/power/snapshot.c 				if (swsusp_page_is_forbidden(page) &&
page             1036 kernel/power/snapshot.c 				    swsusp_page_is_free(page)) {
page             1037 kernel/power/snapshot.c 					swsusp_unset_page_forbidden(page);
page             1038 kernel/power/snapshot.c 					swsusp_unset_page_free(page);
page             1039 kernel/power/snapshot.c 					__free_page(page);
page             1122 kernel/power/snapshot.c 		struct page *page;
page             1124 kernel/power/snapshot.c 		page = alloc_image_page(__GFP_HIGHMEM);
page             1125 kernel/power/snapshot.c 		memory_bm_set_bit(bm, page_to_pfn(page));
page             1170 kernel/power/snapshot.c 		struct page *page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
page             1172 kernel/power/snapshot.c 		if (!page)
page             1175 kernel/power/snapshot.c 		memory_bm_set_bit(copy_bm, page_to_pfn(page));
page             1345 kernel/power/snapshot.c 			struct page *page;
page             1347 kernel/power/snapshot.c 			page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
page             1348 kernel/power/snapshot.c 			if (PageHighMem(page)) {
page             1355 kernel/power/snapshot.c 				kaddr = kmap_atomic(page, KM_USER0);
page             1360 kernel/power/snapshot.c 				handle->buffer = page_address(page);
page             1487 kernel/power/snapshot.c 	struct page *copy_page;	/* data is here now */
page             1488 kernel/power/snapshot.c 	struct page *orig_page;	/* data was here before the suspend */
page             1556 kernel/power/snapshot.c 		struct page *page;
page             1558 kernel/power/snapshot.c 		page = alloc_page(__GFP_HIGHMEM);
page             1559 kernel/power/snapshot.c 		if (!swsusp_page_is_free(page)) {
page             1561 kernel/power/snapshot.c 			memory_bm_set_bit(bm, page_to_pfn(page));
page             1565 kernel/power/snapshot.c 		swsusp_set_page_forbidden(page);
page             1566 kernel/power/snapshot.c 		swsusp_set_page_free(page);
page             1590 kernel/power/snapshot.c static struct page *last_highmem_page;
page             1598 kernel/power/snapshot.c 	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
page             1602 kernel/power/snapshot.c 		last_highmem_page = page;
page             1613 kernel/power/snapshot.c 	pbe->orig_page = page;
page             1615 kernel/power/snapshot.c 		struct page *tmp;
page             1791 kernel/power/snapshot.c 	struct page *page = pfn_to_page(memory_bm_next_pfn(bm));
page             1793 kernel/power/snapshot.c 	if (PageHighMem(page))
page             1794 kernel/power/snapshot.c 		return get_highmem_page_buffer(page, ca);
page             1796 kernel/power/snapshot.c 	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
page             1800 kernel/power/snapshot.c 		return page_address(page);
page             1810 kernel/power/snapshot.c 	pbe->orig_address = page_address(page);
page               72 kernel/power/swap.c 	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
page               79 kernel/power/swap.c 	lock_page(page);
page               84 kernel/power/swap.c 		wait_on_page_locked(page);
page               90 kernel/power/swap.c 			get_page(page);	/* These pages are freed later */
page              121 kernel/power/swap.c 		struct page *page;
page              124 kernel/power/swap.c 		page = bio->bi_io_vec[0].bv_page;
page              125 kernel/power/swap.c 		wait_on_page_locked(page);
page              126 kernel/power/swap.c 		if (!PageUptodate(page) || PageError(page))
page              128 kernel/power/swap.c 		put_page(page);
page              337 kernel/profile.c 	struct page *page;
page              345 kernel/profile.c 			page = alloc_pages_node(node,
page              348 kernel/profile.c 			if (!page)
page              350 kernel/profile.c 			per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
page              353 kernel/profile.c 			page = alloc_pages_node(node,
page              356 kernel/profile.c 			if (!page)
page              358 kernel/profile.c 			per_cpu(cpu_profile_hits, cpu)[0] = page_address(page);
page              362 kernel/profile.c 		page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
page              364 kernel/profile.c 		__free_page(page);
page              376 kernel/profile.c 			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
page              378 kernel/profile.c 			__free_page(page);
page              381 kernel/profile.c 			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
page              383 kernel/profile.c 			__free_page(page);
page              424 kernel/profile.c 	int len = cpumask_scnprintf(page, count, *(cpumask_t *)data);
page              427 kernel/profile.c 	len += sprintf(page + len, "\n");
page              536 kernel/profile.c 		struct page *page;
page              538 kernel/profile.c 		page = alloc_pages_node(node,
page              541 kernel/profile.c 		if (!page)
page              544 kernel/profile.c 				= (struct profile_hit *)page_address(page);
page              545 kernel/profile.c 		page = alloc_pages_node(node,
page              548 kernel/profile.c 		if (!page)
page              551 kernel/profile.c 				= (struct profile_hit *)page_address(page);
page              559 kernel/profile.c 		struct page *page;
page              562 kernel/profile.c 			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
page              564 kernel/profile.c 			__free_page(page);
page              567 kernel/profile.c 			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
page              569 kernel/profile.c 			__free_page(page);
page              223 kernel/rcutorture.c 	int (*stats)(char *page);
page              478 kernel/rcutorture.c 	cnt += sprintf(&page[cnt], "%s%s per-CPU(idx=%d):",
page              481 kernel/rcutorture.c 		cnt += sprintf(&page[cnt], " %d(%d,%d)", cpu,
page              485 kernel/rcutorture.c 	cnt += sprintf(&page[cnt], "\n");
page              764 kernel/rcutorture.c 	cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
page              765 kernel/rcutorture.c 	cnt += sprintf(&page[cnt],
page              777 kernel/rcutorture.c 		cnt += sprintf(&page[cnt], " !!!");
page              778 kernel/rcutorture.c 	cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
page              780 kernel/rcutorture.c 		cnt += sprintf(&page[cnt], "!!! ");
page              784 kernel/rcutorture.c 	cnt += sprintf(&page[cnt], "Reader Pipe: ");
page              786 kernel/rcutorture.c 		cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
page              787 kernel/rcutorture.c 	cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
page              788 kernel/rcutorture.c 	cnt += sprintf(&page[cnt], "Reader Batch: ");
page              790 kernel/rcutorture.c 		cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
page              791 kernel/rcutorture.c 	cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
page              792 kernel/rcutorture.c 	cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
page              794 kernel/rcutorture.c 		cnt += sprintf(&page[cnt], " %d",
page              797 kernel/rcutorture.c 	cnt += sprintf(&page[cnt], "\n");
page              799 kernel/rcutorture.c 		cnt += cur_ops->stats(&page[cnt]);
page               44 kernel/relay.c 	struct page *page;
page               51 kernel/relay.c 	page = vmalloc_to_page(buf->start + (pgoff << PAGE_SHIFT));
page               52 kernel/relay.c 	if (!page)
page               54 kernel/relay.c 	get_page(page);
page               55 kernel/relay.c 	vmf->page = page;
page               71 kernel/relay.c static struct page **relay_alloc_page_array(unsigned int n_pages)
page               73 kernel/relay.c 	struct page **array;
page               74 kernel/relay.c 	size_t pa_size = n_pages * sizeof(struct page *);
page             1196 kernel/relay.c 	rbuf = (struct rchan_buf *)page_private(buf->page);
page             1233 kernel/relay.c 	struct page *pages[PIPE_BUFFERS];
page             7878 kernel/sched.c 	return sprintf(page, "%u\n", sched_mc_power_savings);
page             7894 kernel/sched.c 	return sprintf(page, "%u\n", sched_smt_power_savings);
page              284 kernel/trace/trace.c 	struct page *page, *tmp;
page              289 kernel/trace/trace.c 	list_for_each_entry_safe(page, tmp, &data->trace_pages, lru) {
page              290 kernel/trace/trace.c 		CHECK_COND(page->lru.next->prev != &page->lru);
page              291 kernel/trace/trace.c 		CHECK_COND(page->lru.prev->next != &page->lru);
page              306 kernel/trace/trace.c 	struct page *page;
page              311 kernel/trace/trace.c 	page = list_entry(data->trace_pages.next, struct page, lru);
page              312 kernel/trace/trace.c 	BUG_ON(&page->lru == &data->trace_pages);
page              314 kernel/trace/trace.c 	return page_address(page);
page              766 kernel/trace/trace.c 	struct page *page;
page              768 kernel/trace/trace.c 	page = virt_to_page(addr);
page              770 kernel/trace/trace.c 	next = trace_next_list(data, &page->lru);
page              771 kernel/trace/trace.c 	page = list_entry(next, struct page, lru);
page              773 kernel/trace/trace.c 	return page_address(page);
page             1080 kernel/trace/trace.c 	struct page *page;
page             1092 kernel/trace/trace.c 		page = virt_to_page(data->trace_tail);
page             1093 kernel/trace/trace.c 		iter->next_page[cpu] = &page->lru;
page             1097 kernel/trace/trace.c 	page = list_entry(iter->next_page[cpu], struct page, lru);
page             1098 kernel/trace/trace.c 	BUG_ON(&data->trace_pages == &page->lru);
page             1100 kernel/trace/trace.c 	array = page_address(page);
page             2936 kernel/trace/trace.c 	struct page *page, *tmp;
page             2952 kernel/trace/trace.c 		page = virt_to_page(array);
page             2953 kernel/trace/trace.c 		list_add(&page->lru, &pages);
page             2964 kernel/trace/trace.c 		page = virt_to_page(array);
page             2965 kernel/trace/trace.c 		list_add(&page->lru, &pages);
page             2972 kernel/trace/trace.c 		page = list_entry(pages.next, struct page, lru);
page             2973 kernel/trace/trace.c 		list_del_init(&page->lru);
page             2974 kernel/trace/trace.c 		list_add_tail(&page->lru, &data->trace_pages);
page             2975 kernel/trace/trace.c 		ClearPageLRU(page);
page             2979 kernel/trace/trace.c 		page = list_entry(pages.next, struct page, lru);
page             2980 kernel/trace/trace.c 		list_del_init(&page->lru);
page             2981 kernel/trace/trace.c 		list_add_tail(&page->lru, &data->trace_pages);
page             2982 kernel/trace/trace.c 		SetPageLRU(page);
page             2991 kernel/trace/trace.c 	list_for_each_entry_safe(page, tmp, &pages, lru) {
page             2992 kernel/trace/trace.c 		list_del_init(&page->lru);
page             2993 kernel/trace/trace.c 		__free_page(page);
page             3001 kernel/trace/trace.c 	struct page *page;
page             3017 kernel/trace/trace.c 		page = list_entry(p, struct page, lru);
page             3018 kernel/trace/trace.c 		ClearPageLRU(page);
page             3019 kernel/trace/trace.c 		list_del(&page->lru);
page             3022 kernel/trace/trace.c 		__free_page(page);
page             3036 kernel/trace/trace.c 		page = list_entry(p, struct page, lru);
page             3037 kernel/trace/trace.c 		ClearPageLRU(page);
page             3038 kernel/trace/trace.c 		list_del(&page->lru);
page             3039 kernel/trace/trace.c 		__free_page(page);
page             3053 kernel/trace/trace.c 	struct page *page;
page             3076 kernel/trace/trace.c 		page = virt_to_page(array);
page             3077 kernel/trace/trace.c 		list_add(&page->lru, &data->trace_pages);
page             3079 kernel/trace/trace.c 		ClearPageLRU(page);
page             3094 kernel/trace/trace.c 		page = virt_to_page(array);
page             3095 kernel/trace/trace.c 		list_add(&page->lru, &max_tr.data[i]->trace_pages);
page             3096 kernel/trace/trace.c 		SetPageLRU(page);
page             3133 kernel/trace/trace.c 		struct page *page, *tmp;
page             3137 kernel/trace/trace.c 			list_for_each_entry_safe(page, tmp,
page             3139 kernel/trace/trace.c 				list_del_init(&page->lru);
page             3140 kernel/trace/trace.c 				__free_page(page);
page             3147 kernel/trace/trace.c 			list_for_each_entry_safe(page, tmp,
page             3149 kernel/trace/trace.c 				list_del_init(&page->lru);
page             3150 kernel/trace/trace.c 				__free_page(page);
page               23 kernel/trace/trace_selftest.c 	struct page *page;
page               28 kernel/trace/trace_selftest.c 	page = list_entry(data->trace_pages.next, struct page, lru);
page               29 kernel/trace/trace_selftest.c 	entries = page_address(page);
page               51 kernel/trace/trace_selftest.c 			page = virt_to_page(entries);
page               52 kernel/trace/trace_selftest.c 			if (page->lru.next == &data->trace_pages) {
page               58 kernel/trace/trace_selftest.c 				page = list_entry(page->lru.next, struct page, lru);
page               59 kernel/trace/trace_selftest.c 				entries = page_address(page);
page               65 kernel/trace/trace_selftest.c 	page = virt_to_page(entries);
page               66 kernel/trace/trace_selftest.c 	if (page->lru.next != &data->trace_pages) {
page              359 lib/scatterlist.c 	miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT);
page              365 lib/scatterlist.c 		miter->addr = kmap_atomic(miter->page, KM_BIO_SRC_IRQ) + off;
page              367 lib/scatterlist.c 		miter->addr = kmap(miter->page) + off;
page              400 lib/scatterlist.c 		miter->page = NULL;
page              440 lib/scatterlist.c 			flush_kernel_dcache_page(miter.page);
page               26 lib/show_mem.c 			struct page *page;
page               35 lib/show_mem.c 			page = pfn_to_page(pfn);
page               37 lib/show_mem.c 			if (PageHighMem(page))
page               40 lib/show_mem.c 			if (PageReserved(page))
page               42 lib/show_mem.c 			else if (page_count(page) == 1)
page               44 lib/show_mem.c 			else if (page_count(page) > 1)
page               45 lib/show_mem.c 				shared += page_count(page) - 1;
page              108 mm/backing-dev.c 			   struct device_attribute *attr, char *page)	\
page              112 mm/backing-dev.c 	return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr);	\
page              148 mm/bootmem.c   	struct page *page;
page              183 mm/bootmem.c   					page = pfn_to_page(start + off);
page              184 mm/bootmem.c   					__free_pages_bootmem(page, 0);
page              194 mm/bootmem.c   	page = virt_to_page(bdata->node_bootmem_map);
page              199 mm/bootmem.c   		__free_pages_bootmem(page++, 0);
page              181 mm/bounce.c    	struct page *page;
page              187 mm/bounce.c    		page = from->bv_page;
page              192 mm/bounce.c    		if (page_to_pfn(page) <= q->bounce_pfn)
page               74 mm/dmapool.c   	struct dma_page *page;
page               89 mm/dmapool.c   		list_for_each_entry(page, &pool->page_list, page_list) {
page               91 mm/dmapool.c   			blocks += page->in_use;
page              208 mm/dmapool.c   		*(int *)(page->vaddr + offset) = next;
page              215 mm/dmapool.c   	struct dma_page *page;
page              217 mm/dmapool.c   	page = kmalloc(sizeof(*page), mem_flags);
page              218 mm/dmapool.c   	if (!page)
page              220 mm/dmapool.c   	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
page              221 mm/dmapool.c   					 &page->dma, mem_flags);
page              222 mm/dmapool.c   	if (page->vaddr) {
page              224 mm/dmapool.c   		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
page              226 mm/dmapool.c   		pool_initialise_page(pool, page);
page              227 mm/dmapool.c   		list_add(&page->page_list, &pool->page_list);
page              228 mm/dmapool.c   		page->in_use = 0;
page              229 mm/dmapool.c   		page->offset = 0;
page              231 mm/dmapool.c   		kfree(page);
page              232 mm/dmapool.c   		page = NULL;
page              234 mm/dmapool.c   	return page;
page              239 mm/dmapool.c   	return page->in_use != 0;
page              244 mm/dmapool.c   	dma_addr_t dma = page->dma;
page              247 mm/dmapool.c   	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
page              249 mm/dmapool.c   	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
page              250 mm/dmapool.c   	list_del(&page->page_list);
page              251 mm/dmapool.c   	kfree(page);
page              271 mm/dmapool.c   		struct dma_page *page;
page              272 mm/dmapool.c   		page = list_entry(pool->page_list.next,
page              274 mm/dmapool.c   		if (is_page_busy(page)) {
page              278 mm/dmapool.c   					pool->name, page->vaddr);
page              282 mm/dmapool.c   				       pool->name, page->vaddr);
page              284 mm/dmapool.c   			list_del(&page->page_list);
page              285 mm/dmapool.c   			kfree(page);
page              287 mm/dmapool.c   			pool_free_page(pool, page);
page              308 mm/dmapool.c   	struct dma_page *page;
page              314 mm/dmapool.c   	list_for_each_entry(page, &pool->page_list, page_list) {
page              315 mm/dmapool.c   		if (page->offset < pool->allocation)
page              318 mm/dmapool.c   	page = pool_alloc_page(pool, GFP_ATOMIC);
page              319 mm/dmapool.c   	if (!page) {
page              338 mm/dmapool.c   	page->in_use++;
page              339 mm/dmapool.c   	offset = page->offset;
page              340 mm/dmapool.c   	page->offset = *(int *)(page->vaddr + offset);
page              341 mm/dmapool.c   	retval = offset + page->vaddr;
page              342 mm/dmapool.c   	*handle = offset + page->dma;
page              355 mm/dmapool.c   	struct dma_page *page;
page              358 mm/dmapool.c   	list_for_each_entry(page, &pool->page_list, page_list) {
page              359 mm/dmapool.c   		if (dma < page->dma)
page              361 mm/dmapool.c   		if (dma < (page->dma + pool->allocation))
page              364 mm/dmapool.c   	page = NULL;
page              367 mm/dmapool.c   	return page;
page              381 mm/dmapool.c   	struct dma_page *page;
page              385 mm/dmapool.c   	page = pool_find_page(pool, dma);
page              386 mm/dmapool.c   	if (!page) {
page              397 mm/dmapool.c   	offset = vaddr - page->vaddr;
page              399 mm/dmapool.c   	if ((dma - page->dma) != offset) {
page              411 mm/dmapool.c   		unsigned int chain = page->offset;
page              414 mm/dmapool.c   				chain = *(int *)(page->vaddr + chain);
page              432 mm/dmapool.c   	page->in_use--;
page              433 mm/dmapool.c   	*(int *)vaddr = page->offset;
page              434 mm/dmapool.c   	page->offset = offset;
page              116 mm/filemap.c   	struct address_space *mapping = page->mapping;
page              118 mm/filemap.c   	mem_cgroup_uncharge_cache_page(page);
page              119 mm/filemap.c   	radix_tree_delete(&mapping->page_tree, page->index);
page              120 mm/filemap.c   	page->mapping = NULL;
page              122 mm/filemap.c   	__dec_zone_page_state(page, NR_FILE_PAGES);
page              123 mm/filemap.c   	BUG_ON(page_mapped(page));
page              132 mm/filemap.c   	if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
page              133 mm/filemap.c   		dec_zone_page_state(page, NR_FILE_DIRTY);
page              140 mm/filemap.c   	struct address_space *mapping = page->mapping;
page              142 mm/filemap.c   	BUG_ON(!PageLocked(page));
page              145 mm/filemap.c   	__remove_from_page_cache(page);
page              152 mm/filemap.c   	struct page *page;
page              154 mm/filemap.c   	page = container_of((unsigned long *)word, struct page, flags);
page              178 mm/filemap.c   	mapping = page_mapping(page);
page              180 mm/filemap.c   		mapping->a_ops->sync_page(page);
page              285 mm/filemap.c   			struct page *page = pvec.pages[i];
page              288 mm/filemap.c   			if (page->index > end)
page              291 mm/filemap.c   			wait_on_page_writeback(page);
page              292 mm/filemap.c   			if (PageError(page))
page              459 mm/filemap.c   	VM_BUG_ON(!PageLocked(page));
page              461 mm/filemap.c   	error = mem_cgroup_cache_charge(page, current->mm,
page              468 mm/filemap.c   		page_cache_get(page);
page              469 mm/filemap.c   		page->mapping = mapping;
page              470 mm/filemap.c   		page->index = offset;
page              473 mm/filemap.c   		error = radix_tree_insert(&mapping->page_tree, offset, page);
page              476 mm/filemap.c   			__inc_zone_page_state(page, NR_FILE_PAGES);
page              478 mm/filemap.c   			page->mapping = NULL;
page              479 mm/filemap.c   			mem_cgroup_uncharge_cache_page(page);
page              480 mm/filemap.c   			page_cache_release(page);
page              486 mm/filemap.c   		mem_cgroup_uncharge_cache_page(page);
page              495 mm/filemap.c   	int ret = add_to_page_cache(page, mapping, offset, gfp_mask);
page              497 mm/filemap.c   		lru_cache_add(page);
page              502 mm/filemap.c   struct page *__page_cache_alloc(gfp_t gfp)
page              531 mm/filemap.c   	const struct zone *zone = page_zone(page);
page              533 mm/filemap.c   	return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
page              538 mm/filemap.c   	__wake_up_bit(page_waitqueue(page), &page->flags, bit);
page              543 mm/filemap.c   	DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
page              545 mm/filemap.c   	if (test_bit(bit_nr, &page->flags))
page              546 mm/filemap.c   		__wait_on_bit(page_waitqueue(page), &wait, sync_page,
page              568 mm/filemap.c   	if (!test_and_clear_bit(PG_locked, &page->flags))
page              571 mm/filemap.c   	wake_up_page(page, PG_locked);
page              581 mm/filemap.c   	if (TestClearPageReclaim(page))
page              582 mm/filemap.c   		rotate_reclaimable_page(page);
page              584 mm/filemap.c   	if (!test_clear_page_writeback(page))
page              588 mm/filemap.c   	wake_up_page(page, PG_writeback);
page              603 mm/filemap.c   	DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
page              605 mm/filemap.c   	__wait_on_bit_lock(page_waitqueue(page), &wait, sync_page,
page              612 mm/filemap.c   	DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
page              614 mm/filemap.c   	return __wait_on_bit_lock(page_waitqueue(page), &wait,
page              627 mm/filemap.c   	DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
page              628 mm/filemap.c   	__wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock,
page              640 mm/filemap.c   struct page *find_get_page(struct address_space *mapping, pgoff_t offset)
page              643 mm/filemap.c   	struct page *page;
page              647 mm/filemap.c   	page = NULL;
page              650 mm/filemap.c   		page = radix_tree_deref_slot(pagep);
page              651 mm/filemap.c   		if (unlikely(!page || page == RADIX_TREE_RETRY))
page              654 mm/filemap.c   		if (!page_cache_get_speculative(page))
page              662 mm/filemap.c   		if (unlikely(page != *pagep)) {
page              663 mm/filemap.c   			page_cache_release(page);
page              669 mm/filemap.c   	return page;
page              683 mm/filemap.c   struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
page              685 mm/filemap.c   	struct page *page;
page              688 mm/filemap.c   	page = find_get_page(mapping, offset);
page              689 mm/filemap.c   	if (page) {
page              690 mm/filemap.c   		lock_page(page);
page              692 mm/filemap.c   		if (unlikely(page->mapping != mapping)) {
page              693 mm/filemap.c   			unlock_page(page);
page              694 mm/filemap.c   			page_cache_release(page);
page              697 mm/filemap.c   		VM_BUG_ON(page->index != offset);
page              699 mm/filemap.c   	return page;
page              720 mm/filemap.c   struct page *find_or_create_page(struct address_space *mapping,
page              723 mm/filemap.c   	struct page *page;
page              726 mm/filemap.c   	page = find_lock_page(mapping, index);
page              727 mm/filemap.c   	if (!page) {
page              728 mm/filemap.c   		page = __page_cache_alloc(gfp_mask);
page              729 mm/filemap.c   		if (!page)
page              731 mm/filemap.c   		err = add_to_page_cache_lru(page, mapping, index, gfp_mask);
page              733 mm/filemap.c   			page_cache_release(page);
page              734 mm/filemap.c   			page = NULL;
page              739 mm/filemap.c   	return page;
page              772 mm/filemap.c   		struct page *page;
page              774 mm/filemap.c   		page = radix_tree_deref_slot((void **)pages[i]);
page              775 mm/filemap.c   		if (unlikely(!page))
page              781 mm/filemap.c   		if (unlikely(page == RADIX_TREE_RETRY))
page              784 mm/filemap.c   		if (!page_cache_get_speculative(page))
page              788 mm/filemap.c   		if (unlikely(page != *((void **)pages[i]))) {
page              789 mm/filemap.c   			page_cache_release(page);
page              793 mm/filemap.c   		pages[ret] = page;
page              825 mm/filemap.c   		struct page *page;
page              827 mm/filemap.c   		page = radix_tree_deref_slot((void **)pages[i]);
page              828 mm/filemap.c   		if (unlikely(!page))
page              834 mm/filemap.c   		if (unlikely(page == RADIX_TREE_RETRY))
page              837 mm/filemap.c   		if (page->mapping == NULL || page->index != index)
page              840 mm/filemap.c   		if (!page_cache_get_speculative(page))
page              844 mm/filemap.c   		if (unlikely(page != *((void **)pages[i]))) {
page              845 mm/filemap.c   			page_cache_release(page);
page              849 mm/filemap.c   		pages[ret] = page;
page              882 mm/filemap.c   		struct page *page;
page              884 mm/filemap.c   		page = radix_tree_deref_slot((void **)pages[i]);
page              885 mm/filemap.c   		if (unlikely(!page))
page              891 mm/filemap.c   		if (unlikely(page == RADIX_TREE_RETRY))
page              894 mm/filemap.c   		if (!page_cache_get_speculative(page))
page              898 mm/filemap.c   		if (unlikely(page != *((void **)pages[i]))) {
page              899 mm/filemap.c   			page_cache_release(page);
page              903 mm/filemap.c   		pages[ret] = page;
page              928 mm/filemap.c   struct page *
page              931 mm/filemap.c   	struct page *page = find_get_page(mapping, index);
page              933 mm/filemap.c   	if (page) {
page              934 mm/filemap.c   		if (trylock_page(page))
page              935 mm/filemap.c   			return page;
page              936 mm/filemap.c   		page_cache_release(page);
page              939 mm/filemap.c   	page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
page              940 mm/filemap.c   	if (page && add_to_page_cache_lru(page, mapping, index, GFP_KERNEL)) {
page              941 mm/filemap.c   		page_cache_release(page);
page              942 mm/filemap.c   		page = NULL;
page              944 mm/filemap.c   	return page;
page             1005 mm/filemap.c   		struct page *page;
page             1012 mm/filemap.c   		page = find_get_page(mapping, index);
page             1013 mm/filemap.c   		if (!page) {
page             1017 mm/filemap.c   			page = find_get_page(mapping, index);
page             1018 mm/filemap.c   			if (unlikely(page == NULL))
page             1021 mm/filemap.c   		if (PageReadahead(page)) {
page             1023 mm/filemap.c   					ra, filp, page,
page             1026 mm/filemap.c   		if (!PageUptodate(page)) {
page             1030 mm/filemap.c   			if (!trylock_page(page))
page             1032 mm/filemap.c   			if (!mapping->a_ops->is_partially_uptodate(page,
page             1035 mm/filemap.c   			unlock_page(page);
page             1050 mm/filemap.c   			page_cache_release(page);
page             1059 mm/filemap.c   				page_cache_release(page);
page             1070 mm/filemap.c   			flush_dcache_page(page);
page             1077 mm/filemap.c   			mark_page_accessed(page);
page             1090 mm/filemap.c   		ret = actor(desc, page, offset, nr);
page             1096 mm/filemap.c   		page_cache_release(page);
page             1103 mm/filemap.c   		if (lock_page_killable(page))
page             1108 mm/filemap.c   		if (!page->mapping) {
page             1109 mm/filemap.c   			unlock_page(page);
page             1110 mm/filemap.c   			page_cache_release(page);
page             1115 mm/filemap.c   		if (PageUptodate(page)) {
page             1116 mm/filemap.c   			unlock_page(page);
page             1122 mm/filemap.c   		error = mapping->a_ops->readpage(filp, page);
page             1126 mm/filemap.c   				page_cache_release(page);
page             1132 mm/filemap.c   		if (!PageUptodate(page)) {
page             1133 mm/filemap.c   			if (lock_page_killable(page))
page             1135 mm/filemap.c   			if (!PageUptodate(page)) {
page             1136 mm/filemap.c   				if (page->mapping == NULL) {
page             1140 mm/filemap.c   					unlock_page(page);
page             1141 mm/filemap.c   					page_cache_release(page);
page             1144 mm/filemap.c   				unlock_page(page);
page             1148 mm/filemap.c   			unlock_page(page);
page             1158 mm/filemap.c   		page_cache_release(page);
page             1166 mm/filemap.c   		page = page_cache_alloc_cold(mapping);
page             1167 mm/filemap.c   		if (!page) {
page             1171 mm/filemap.c   		error = add_to_page_cache_lru(page, mapping,
page             1174 mm/filemap.c   			page_cache_release(page);
page             1207 mm/filemap.c   		kaddr = kmap_atomic(page, KM_USER0);
page             1216 mm/filemap.c   	kaddr = kmap(page);
page             1218 mm/filemap.c   	kunmap(page);
page             1388 mm/filemap.c   	struct page *page; 
page             1392 mm/filemap.c   		page = page_cache_alloc_cold(mapping);
page             1393 mm/filemap.c   		if (!page)
page             1396 mm/filemap.c   		ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
page             1398 mm/filemap.c   			ret = mapping->a_ops->readpage(file, page);
page             1402 mm/filemap.c   		page_cache_release(page);
page             1430 mm/filemap.c   	struct page *page;
page             1447 mm/filemap.c   	page = find_lock_page(mapping, vmf->pgoff);
page             1452 mm/filemap.c   		if (!page) {
page             1455 mm/filemap.c   			page = find_lock_page(mapping, vmf->pgoff);
page             1456 mm/filemap.c   			if (!page)
page             1459 mm/filemap.c   		if (PageReadahead(page)) {
page             1460 mm/filemap.c   			page_cache_async_readahead(mapping, ra, file, page,
page             1465 mm/filemap.c   	if (!page) {
page             1494 mm/filemap.c   		page = find_lock_page(mapping, vmf->pgoff);
page             1495 mm/filemap.c   		if (!page)
page             1506 mm/filemap.c   	if (unlikely(!PageUptodate(page)))
page             1512 mm/filemap.c   		unlock_page(page);
page             1513 mm/filemap.c   		page_cache_release(page);
page             1520 mm/filemap.c   	mark_page_accessed(page);
page             1521 mm/filemap.c   	ra->prev_pos = (loff_t)page->index << PAGE_CACHE_SHIFT;
page             1522 mm/filemap.c   	vmf->page = page;
page             1562 mm/filemap.c   	ClearPageError(page);
page             1563 mm/filemap.c   	error = mapping->a_ops->readpage(file, page);
page             1565 mm/filemap.c   		wait_on_page_locked(page);
page             1566 mm/filemap.c   		if (!PageUptodate(page))
page             1569 mm/filemap.c   	page_cache_release(page);
page             1621 mm/filemap.c   static struct page *__read_cache_page(struct address_space *mapping,
page             1626 mm/filemap.c   	struct page *page;
page             1629 mm/filemap.c   	page = find_get_page(mapping, index);
page             1630 mm/filemap.c   	if (!page) {
page             1631 mm/filemap.c   		page = page_cache_alloc_cold(mapping);
page             1632 mm/filemap.c   		if (!page)
page             1634 mm/filemap.c   		err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
page             1636 mm/filemap.c   			page_cache_release(page);
page             1642 mm/filemap.c   		err = filler(data, page);
page             1644 mm/filemap.c   			page_cache_release(page);
page             1645 mm/filemap.c   			page = ERR_PTR(err);
page             1648 mm/filemap.c   	return page;
page             1666 mm/filemap.c   struct page *read_cache_page_async(struct address_space *mapping,
page             1671 mm/filemap.c   	struct page *page;
page             1675 mm/filemap.c   	page = __read_cache_page(mapping, index, filler, data);
page             1676 mm/filemap.c   	if (IS_ERR(page))
page             1677 mm/filemap.c   		return page;
page             1678 mm/filemap.c   	if (PageUptodate(page))
page             1681 mm/filemap.c   	lock_page(page);
page             1682 mm/filemap.c   	if (!page->mapping) {
page             1683 mm/filemap.c   		unlock_page(page);
page             1684 mm/filemap.c   		page_cache_release(page);
page             1687 mm/filemap.c   	if (PageUptodate(page)) {
page             1688 mm/filemap.c   		unlock_page(page);
page             1691 mm/filemap.c   	err = filler(data, page);
page             1693 mm/filemap.c   		page_cache_release(page);
page             1697 mm/filemap.c   	mark_page_accessed(page);
page             1698 mm/filemap.c   	return page;
page             1714 mm/filemap.c   struct page *read_cache_page(struct address_space *mapping,
page             1719 mm/filemap.c   	struct page *page;
page             1721 mm/filemap.c   	page = read_cache_page_async(mapping, index, filler, data);
page             1722 mm/filemap.c   	if (IS_ERR(page))
page             1724 mm/filemap.c   	wait_on_page_locked(page);
page             1725 mm/filemap.c   	if (!PageUptodate(page)) {
page             1726 mm/filemap.c   		page_cache_release(page);
page             1727 mm/filemap.c   		page = ERR_PTR(-EIO);
page             1730 mm/filemap.c   	return page;
page             1823 mm/filemap.c   	kaddr = kmap_atomic(page, KM_USER0);
page             1852 mm/filemap.c   	kaddr = kmap(page);
page             1862 mm/filemap.c   	kunmap(page);
page             2027 mm/filemap.c   		struct page *page;
page             2029 mm/filemap.c   		page = __grab_cache_page(mapping, index);
page             2030 mm/filemap.c   		*pagep = page;
page             2031 mm/filemap.c   		if (!page)
page             2034 mm/filemap.c   		if (flags & AOP_FLAG_UNINTERRUPTIBLE && !PageUptodate(page)) {
page             2042 mm/filemap.c   			ret = aops->readpage(file, page);
page             2043 mm/filemap.c   			page_cache_release(page);
page             2052 mm/filemap.c   		ret = aops->prepare_write(file, page, offset, offset+len);
page             2054 mm/filemap.c   			unlock_page(page);
page             2055 mm/filemap.c   			page_cache_release(page);
page             2072 mm/filemap.c   		mark_page_accessed(page);
page             2074 mm/filemap.c   							page, fsdata);
page             2079 mm/filemap.c   		flush_dcache_page(page);
page             2080 mm/filemap.c   		ret = aops->commit_write(file, page, offset, offset+len);
page             2081 mm/filemap.c   		unlock_page(page);
page             2082 mm/filemap.c   		mark_page_accessed(page);
page             2083 mm/filemap.c   		page_cache_release(page);
page             2193 mm/filemap.c   struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index)
page             2196 mm/filemap.c   	struct page *page;
page             2198 mm/filemap.c   	page = find_lock_page(mapping, index);
page             2199 mm/filemap.c   	if (likely(page))
page             2200 mm/filemap.c   		return page;
page             2202 mm/filemap.c   	page = page_cache_alloc(mapping);
page             2203 mm/filemap.c   	if (!page)
page             2205 mm/filemap.c   	status = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
page             2207 mm/filemap.c   		page_cache_release(page);
page             2212 mm/filemap.c   	return page;
page             2226 mm/filemap.c   		struct page *src_page;
page             2227 mm/filemap.c   		struct page *page;
page             2259 mm/filemap.c   		page = __grab_cache_page(mapping, index);
page             2260 mm/filemap.c   		if (!page) {
page             2270 mm/filemap.c   		if (!PageUptodate(page) && !segment_eq(get_fs(), KERNEL_DS)) {
page             2271 mm/filemap.c   			unlock_page(page);
page             2275 mm/filemap.c   				page_cache_release(page);
page             2289 mm/filemap.c   				page_cache_release(page);
page             2295 mm/filemap.c   			lock_page(page);
page             2304 mm/filemap.c   			if (unlikely(!page->mapping || PageUptodate(page))) {
page             2305 mm/filemap.c   				unlock_page(page);
page             2306 mm/filemap.c   				page_cache_release(page);
page             2312 mm/filemap.c   		status = a_ops->prepare_write(file, page, offset, offset+bytes);
page             2331 mm/filemap.c   			copied = iov_iter_copy_from_user_atomic(page, i,
page             2337 mm/filemap.c   			dst = kmap_atomic(page, KM_USER1);
page             2343 mm/filemap.c   		flush_dcache_page(page);
page             2345 mm/filemap.c   		status = a_ops->commit_write(file, page, offset, offset+bytes);
page             2351 mm/filemap.c   		unlock_page(page);
page             2352 mm/filemap.c   		mark_page_accessed(page);
page             2353 mm/filemap.c   		page_cache_release(page);
page             2366 mm/filemap.c   		unlock_page(page);
page             2367 mm/filemap.c   		page_cache_release(page);
page             2400 mm/filemap.c   		struct page *page;
page             2430 mm/filemap.c   						&page, &fsdata);
page             2435 mm/filemap.c   		copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
page             2437 mm/filemap.c   		flush_dcache_page(page);
page             2440 mm/filemap.c   						page, fsdata);
page             2682 mm/filemap.c   	struct address_space * const mapping = page->mapping;
page             2684 mm/filemap.c   	BUG_ON(!PageLocked(page));
page             2685 mm/filemap.c   	if (PageWriteback(page))
page             2689 mm/filemap.c   		return mapping->a_ops->releasepage(page, gfp_mask);
page             2690 mm/filemap.c   	return try_to_free_buffers(page);
page               29 mm/filemap_xip.c static struct page *__xip_sparse_page;
page               32 mm/filemap_xip.c static struct page *xip_sparse_page(void)
page               35 mm/filemap_xip.c 		struct page *page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
page               37 mm/filemap_xip.c 		if (page)
page               38 mm/filemap_xip.c 			__xip_sparse_page = page;
page              174 mm/filemap_xip.c 	struct page *page;
page              180 mm/filemap_xip.c 	page = __xip_sparse_page;
page              181 mm/filemap_xip.c 	if (!page)
page              191 mm/filemap_xip.c 		pte = page_check_address(page, mm, address, &ptl, 1);
page              196 mm/filemap_xip.c 			page_remove_rmap(page, vma);
page              200 mm/filemap_xip.c 			page_cache_release(page);
page              228 mm/filemap_xip.c 	struct page *page;
page              282 mm/filemap_xip.c 		page = xip_sparse_page();
page              283 mm/filemap_xip.c 		if (!page)
page              286 mm/filemap_xip.c 							page);
page               30 mm/fremap.c    		struct page *page;
page               34 mm/fremap.c    		page = vm_normal_page(vma, addr, pte);
page               35 mm/fremap.c    		if (page) {
page               37 mm/fremap.c    				set_page_dirty(page);
page               38 mm/fremap.c    			page_remove_rmap(page, vma);
page               39 mm/fremap.c    			page_cache_release(page);
page               78 mm/highmem.c   		struct page *page;
page              100 mm/highmem.c   		page = pte_page(pkmap_page_table[i]);
page              101 mm/highmem.c   		pte_clear(&init_mm, (unsigned long)page_address(page),
page              104 mm/highmem.c   		set_page_address(page, NULL);
page              154 mm/highmem.c   			if (page_address(page))
page              155 mm/highmem.c   				return (unsigned long)page_address(page);
page              163 mm/highmem.c   		   &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
page              166 mm/highmem.c   	set_page_address(page, (void *)vaddr);
page              188 mm/highmem.c   	vaddr = (unsigned long)page_address(page);
page              190 mm/highmem.c   		vaddr = map_new_virtual(page);
page              210 mm/highmem.c   	vaddr = (unsigned long)page_address(page);
page              253 mm/highmem.c   	struct page *page;
page              274 mm/highmem.c   	return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)];
page              289 mm/highmem.c   	if (!PageHighMem(page))
page              290 mm/highmem.c   		return lowmem_page_address(page);
page              292 mm/highmem.c   	pas = page_slot(page);
page              299 mm/highmem.c   			if (pam->page == page) {
page              323 mm/highmem.c   	BUG_ON(!PageHighMem(page));
page              325 mm/highmem.c   	pas = page_slot(page);
page              335 mm/highmem.c   		pam->page = page;
page              344 mm/highmem.c   			if (pam->page == page) {
page              364 mm/hugetlb.c   		clear_user_highpage(page + i, addr + i * PAGE_SIZE);
page              383 mm/hugetlb.c   	int nid = page_to_nid(page);
page              384 mm/hugetlb.c   	list_add(&page->lru, &h->hugepage_freelists[nid]);
page              389 mm/hugetlb.c   static struct page *dequeue_huge_page(struct hstate *h)
page              392 mm/hugetlb.c   	struct page *page = NULL;
page              396 mm/hugetlb.c   			page = list_entry(h->hugepage_freelists[nid].next,
page              397 mm/hugetlb.c   					  struct page, lru);
page              398 mm/hugetlb.c   			list_del(&page->lru);
page              404 mm/hugetlb.c   	return page;
page              407 mm/hugetlb.c   static struct page *dequeue_huge_page_vma(struct hstate *h,
page              412 mm/hugetlb.c   	struct page *page = NULL;
page              438 mm/hugetlb.c   			page = list_entry(h->hugepage_freelists[nid].next,
page              439 mm/hugetlb.c   					  struct page, lru);
page              440 mm/hugetlb.c   			list_del(&page->lru);
page              451 mm/hugetlb.c   	return page;
page              459 mm/hugetlb.c   	h->nr_huge_pages_node[page_to_nid(page)]--;
page              461 mm/hugetlb.c   		page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
page              465 mm/hugetlb.c   	set_compound_page_dtor(page, NULL);
page              466 mm/hugetlb.c   	set_page_refcounted(page);
page              467 mm/hugetlb.c   	arch_release_hugepage(page);
page              468 mm/hugetlb.c   	__free_pages(page, huge_page_order(h));
page              488 mm/hugetlb.c   	struct hstate *h = page_hstate(page);
page              489 mm/hugetlb.c   	int nid = page_to_nid(page);
page              492 mm/hugetlb.c   	mapping = (struct address_space *) page_private(page);
page              493 mm/hugetlb.c   	set_page_private(page, 0);
page              494 mm/hugetlb.c   	BUG_ON(page_count(page));
page              495 mm/hugetlb.c   	INIT_LIST_HEAD(&page->lru);
page              499 mm/hugetlb.c   		update_and_free_page(h, page);
page              503 mm/hugetlb.c   		enqueue_huge_page(h, page);
page              547 mm/hugetlb.c   	set_compound_page_dtor(page, free_huge_page);
page              552 mm/hugetlb.c   	put_page(page); /* free it into the hugepage allocator */
page              555 mm/hugetlb.c   static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
page              557 mm/hugetlb.c   	struct page *page;
page              562 mm/hugetlb.c   	page = alloc_pages_node(nid,
page              566 mm/hugetlb.c   	if (page) {
page              567 mm/hugetlb.c   		if (arch_prepare_hugepage(page)) {
page              568 mm/hugetlb.c   			__free_pages(page, huge_page_order(h));
page              571 mm/hugetlb.c   		prep_new_huge_page(h, page, nid);
page              574 mm/hugetlb.c   	return page;
page              600 mm/hugetlb.c   	struct page *page;
page              608 mm/hugetlb.c   		page = alloc_fresh_huge_page_node(h, h->hugetlb_next_nid);
page              609 mm/hugetlb.c   		if (page)
page              612 mm/hugetlb.c   	} while (!page && h->hugetlb_next_nid != start_nid);
page              622 mm/hugetlb.c   static struct page *alloc_buddy_huge_page(struct hstate *h,
page              625 mm/hugetlb.c   	struct page *page;
page              664 mm/hugetlb.c   	page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
page              668 mm/hugetlb.c   	if (page && arch_prepare_hugepage(page)) {
page              669 mm/hugetlb.c   		__free_pages(page, huge_page_order(h));
page              674 mm/hugetlb.c   	if (page) {
page              679 mm/hugetlb.c   		put_page_testzero(page);
page              680 mm/hugetlb.c   		VM_BUG_ON(page_count(page));
page              681 mm/hugetlb.c   		nid = page_to_nid(page);
page              682 mm/hugetlb.c   		set_compound_page_dtor(page, free_huge_page);
page              696 mm/hugetlb.c   	return page;
page              706 mm/hugetlb.c   	struct page *page, *tmp;
page              723 mm/hugetlb.c   		page = alloc_buddy_huge_page(h, NULL, 0);
page              724 mm/hugetlb.c   		if (!page) {
page              735 mm/hugetlb.c   		list_add(&page->lru, &surplus_list);
page              762 mm/hugetlb.c   	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
page              765 mm/hugetlb.c   		list_del(&page->lru);
page              766 mm/hugetlb.c   		enqueue_huge_page(h, page);
page              772 mm/hugetlb.c   		list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
page              773 mm/hugetlb.c   			list_del(&page->lru);
page              781 mm/hugetlb.c   			free_huge_page(page);
page              798 mm/hugetlb.c   	struct page *page;
page              827 mm/hugetlb.c   			page = list_entry(h->hugepage_freelists[nid].next,
page              828 mm/hugetlb.c   					  struct page, lru);
page              829 mm/hugetlb.c   			list_del(&page->lru);
page              830 mm/hugetlb.c   			update_and_free_page(h, page);
page              894 mm/hugetlb.c   static struct page *alloc_huge_page(struct vm_area_struct *vma,
page              898 mm/hugetlb.c   	struct page *page;
page              918 mm/hugetlb.c   	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
page              921 mm/hugetlb.c   	if (!page) {
page              922 mm/hugetlb.c   		page = alloc_buddy_huge_page(h, vma, addr);
page              923 mm/hugetlb.c   		if (!page) {
page              929 mm/hugetlb.c   	set_page_refcounted(page);
page              930 mm/hugetlb.c   	set_page_private(page, (unsigned long) mapping);
page              934 mm/hugetlb.c   	return page;
page              978 mm/hugetlb.c   		struct page *page = virt_to_page(m);
page              980 mm/hugetlb.c   		__ClearPageReserved(page);
page              981 mm/hugetlb.c   		WARN_ON(page_count(page) != 1);
page              982 mm/hugetlb.c   		prep_compound_page(page, h->order);
page              983 mm/hugetlb.c   		prep_new_huge_page(h, page, page_to_nid(page));
page             1045 mm/hugetlb.c   		struct page *page, *next;
page             1047 mm/hugetlb.c   		list_for_each_entry_safe(page, next, freel, lru) {
page             1050 mm/hugetlb.c   			if (PageHighMem(page))
page             1052 mm/hugetlb.c   			list_del(&page->lru);
page             1053 mm/hugetlb.c   			update_and_free_page(h, page);
page             1055 mm/hugetlb.c   			h->free_huge_pages_node[page_to_nid(page)]--;
page             1123 mm/hugetlb.c   		struct page *page = dequeue_huge_page(h);
page             1124 mm/hugetlb.c   		if (!page)
page             1126 mm/hugetlb.c   		update_and_free_page(h, page);
page             1599 mm/hugetlb.c   		    pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
page             1601 mm/hugetlb.c   		entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
page             1625 mm/hugetlb.c   	struct page *ptepage;
page             1671 mm/hugetlb.c   	struct page *page;
page             1672 mm/hugetlb.c   	struct page *tmp;
page             1706 mm/hugetlb.c   			page = pte_page(pte);
page             1707 mm/hugetlb.c   			if (page != ref_page)
page             1722 mm/hugetlb.c   		page = pte_page(pte);
page             1724 mm/hugetlb.c   			set_page_dirty(page);
page             1725 mm/hugetlb.c   		list_add(&page->lru, &page_list);
page             1730 mm/hugetlb.c   	list_for_each_entry_safe(page, tmp, &page_list, lru) {
page             1731 mm/hugetlb.c   		list_del(&page->lru);
page             1732 mm/hugetlb.c   		put_page(page);
page             1767 mm/hugetlb.c   	mapping = (struct address_space *)page_private(page);
page             1784 mm/hugetlb.c   				page);
page             1795 mm/hugetlb.c   	struct page *old_page, *new_page;
page             1870 mm/hugetlb.c   static struct page *hugetlbfs_pagecache_page(struct hstate *h,
page             1889 mm/hugetlb.c   	struct page *page;
page             1913 mm/hugetlb.c   	page = find_lock_page(mapping, idx);
page             1914 mm/hugetlb.c   	if (!page) {
page             1918 mm/hugetlb.c   		page = alloc_huge_page(vma, address, 0);
page             1919 mm/hugetlb.c   		if (IS_ERR(page)) {
page             1920 mm/hugetlb.c   			ret = -PTR_ERR(page);
page             1923 mm/hugetlb.c   		clear_huge_page(page, address, huge_page_size(h));
page             1924 mm/hugetlb.c   		__SetPageUptodate(page);
page             1930 mm/hugetlb.c   			err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
page             1932 mm/hugetlb.c   				put_page(page);
page             1942 mm/hugetlb.c   			lock_page(page);
page             1966 mm/hugetlb.c   	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
page             1972 mm/hugetlb.c   		ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
page             1976 mm/hugetlb.c   	unlock_page(page);
page             1983 mm/hugetlb.c   	unlock_page(page);
page             1984 mm/hugetlb.c   	put_page(page);
page             1994 mm/hugetlb.c   	struct page *pagecache_page = NULL;
page             2055 mm/hugetlb.c   __attribute__((weak)) struct page *
page             2076 mm/hugetlb.c   		struct page *page;
page             2102 mm/hugetlb.c   		page = pte_page(huge_ptep_get(pte));
page             2105 mm/hugetlb.c   			get_page(page);
page             2106 mm/hugetlb.c   			pages[i] = page + pfn_offset;
page               23 mm/internal.h  	atomic_set(&page->_count, v);
page               32 mm/internal.h  	VM_BUG_ON(PageTail(page));
page               33 mm/internal.h  	VM_BUG_ON(atomic_read(&page->_count));
page               34 mm/internal.h  	set_page_count(page, 1);
page               39 mm/internal.h  	atomic_dec(&page->_count);
page               51 mm/internal.h  	VM_BUG_ON(!PageBuddy(page));
page               52 mm/internal.h  	return page_private(page);
page              167 mm/memcontrol.c 	struct page *page;
page              176 mm/memcontrol.c 	return page_to_nid(pc->page);
page              181 mm/memcontrol.c 	return page_zonenum(pc->page);
page              267 mm/memcontrol.c 	return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
page              272 mm/memcontrol.c 	VM_BUG_ON(!page_cgroup_locked(page));
page              273 mm/memcontrol.c 	page->page_cgroup = ((unsigned long)pc | PAGE_CGROUP_LOCK);
page              278 mm/memcontrol.c 	return (struct page_cgroup *) (page->page_cgroup & ~PAGE_CGROUP_LOCK);
page              283 mm/memcontrol.c 	bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
page              288 mm/memcontrol.c 	return bit_spin_trylock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
page              293 mm/memcontrol.c 	bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
page              375 mm/memcontrol.c 	if (!try_lock_page_cgroup(page))
page              378 mm/memcontrol.c 	pc = page_get_page_cgroup(page);
page              385 mm/memcontrol.c 	unlock_page_cgroup(page);
page              479 mm/memcontrol.c 	struct page *page;
page              501 mm/memcontrol.c 		page = pc->page;
page              503 mm/memcontrol.c 		if (unlikely(!PageLRU(page)))
page              506 mm/memcontrol.c 		if (PageActive(page) && !active) {
page              510 mm/memcontrol.c 		if (!PageActive(page) && active) {
page              518 mm/memcontrol.c 		if (__isolate_lru_page(page, mode) == 0) {
page              519 mm/memcontrol.c 			list_move(&page->lru, dst);
page              599 mm/memcontrol.c 	pc->page = page;
page              609 mm/memcontrol.c 	lock_page_cgroup(page);
page              610 mm/memcontrol.c 	if (unlikely(page_get_page_cgroup(page))) {
page              611 mm/memcontrol.c 		unlock_page_cgroup(page);
page              617 mm/memcontrol.c 	page_assign_page_cgroup(page, pc);
page              624 mm/memcontrol.c 	unlock_page_cgroup(page);
page              646 mm/memcontrol.c 	if (page_mapped(page) || (page->mapping && !PageAnon(page)))
page              650 mm/memcontrol.c 	return mem_cgroup_charge_common(page, mm, gfp_mask,
page              672 mm/memcontrol.c 		lock_page_cgroup(page);
page              673 mm/memcontrol.c 		pc = page_get_page_cgroup(page);
page              675 mm/memcontrol.c 			VM_BUG_ON(pc->page != page);
page              677 mm/memcontrol.c 			unlock_page_cgroup(page);
page              680 mm/memcontrol.c 		unlock_page_cgroup(page);
page              686 mm/memcontrol.c 	return mem_cgroup_charge_common(page, mm, gfp_mask,
page              707 mm/memcontrol.c 	lock_page_cgroup(page);
page              708 mm/memcontrol.c 	pc = page_get_page_cgroup(page);
page              712 mm/memcontrol.c 	VM_BUG_ON(pc->page != page);
page              716 mm/memcontrol.c 		|| page_mapped(page)))
page              724 mm/memcontrol.c 	page_assign_page_cgroup(page, NULL);
page              725 mm/memcontrol.c 	unlock_page_cgroup(page);
page              734 mm/memcontrol.c 	unlock_page_cgroup(page);
page              739 mm/memcontrol.c 	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
page              744 mm/memcontrol.c 	VM_BUG_ON(page_mapped(page));
page              745 mm/memcontrol.c 	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
page              761 mm/memcontrol.c 	lock_page_cgroup(page);
page              762 mm/memcontrol.c 	pc = page_get_page_cgroup(page);
page              769 mm/memcontrol.c 	unlock_page_cgroup(page);
page              869 mm/memcontrol.c 	struct page *page;
page              882 mm/memcontrol.c 		page = pc->page;
page              883 mm/memcontrol.c 		get_page(page);
page              889 mm/memcontrol.c 		if (PageLRU(page)) {
page              890 mm/memcontrol.c 			__mem_cgroup_uncharge_common(page,
page              892 mm/memcontrol.c 			put_page(page);
page               70 mm/memory.c    struct page *mem_map;
page              441 mm/memory.c    struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
page              498 mm/memory.c    	struct page *page;
page              545 mm/memory.c    	page = vm_normal_page(vma, addr, pte);
page              546 mm/memory.c    	if (page) {
page              547 mm/memory.c    		get_page(page);
page              548 mm/memory.c    		page_dup_rmap(page, vma, addr);
page              549 mm/memory.c    		rss[!!PageAnon(page)]++;
page              724 mm/memory.c    			struct page *page;
page              726 mm/memory.c    			page = vm_normal_page(vma, addr, ptent);
page              727 mm/memory.c    			if (unlikely(details) && page) {
page              734 mm/memory.c    				    details->check_mapping != page->mapping)
page              741 mm/memory.c    				    (page->index < details->first_index ||
page              742 mm/memory.c    				     page->index > details->last_index))
page              748 mm/memory.c    			if (unlikely(!page))
page              752 mm/memory.c    						addr) != page->index)
page              754 mm/memory.c    					   pgoff_to_pte(page->index));
page              755 mm/memory.c    			if (PageAnon(page))
page              759 mm/memory.c    					set_page_dirty(page);
page              761 mm/memory.c    					SetPageReferenced(page);
page              764 mm/memory.c    			page_remove_rmap(page, vma);
page              765 mm/memory.c    			tlb_remove_page(tlb, page);
page             1023 mm/memory.c    struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
page             1031 mm/memory.c    	struct page *page;
page             1034 mm/memory.c    	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
page             1035 mm/memory.c    	if (!IS_ERR(page)) {
page             1040 mm/memory.c    	page = NULL;
page             1050 mm/memory.c    		page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
page             1061 mm/memory.c    		page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
page             1074 mm/memory.c    	page = vm_normal_page(vma, address, pte);
page             1075 mm/memory.c    	if (unlikely(!page))
page             1079 mm/memory.c    		get_page(page);
page             1082 mm/memory.c    		    !pte_dirty(pte) && !PageDirty(page))
page             1083 mm/memory.c    			set_page_dirty(page);
page             1084 mm/memory.c    		mark_page_accessed(page);
page             1089 mm/memory.c    	return page;
page             1098 mm/memory.c    		return page;
page             1106 mm/memory.c    		page = ZERO_PAGE(0);
page             1108 mm/memory.c    			get_page(page);
page             1111 mm/memory.c    	return page;
page             1179 mm/memory.c    				struct page *page = vm_normal_page(gate_vma, start, *pte);
page             1180 mm/memory.c    				pages[i] = page;
page             1181 mm/memory.c    				if (page)
page             1182 mm/memory.c    					get_page(page);
page             1210 mm/memory.c    			struct page *page;
page             1224 mm/memory.c    			while (!(page = follow_page(vma, start, foll_flags))) {
page             1252 mm/memory.c    			if (IS_ERR(page))
page             1253 mm/memory.c    				return i ? i : PTR_ERR(page);
page             1255 mm/memory.c    				pages[i] = page;
page             1257 mm/memory.c    				flush_anon_page(vma, page, start);
page             1258 mm/memory.c    				flush_dcache_page(page);
page             1299 mm/memory.c    	retval = mem_cgroup_charge(page, mm, GFP_KERNEL);
page             1304 mm/memory.c    	if (PageAnon(page))
page             1307 mm/memory.c    	flush_dcache_page(page);
page             1316 mm/memory.c    	get_page(page);
page             1318 mm/memory.c    	page_add_file_rmap(page);
page             1319 mm/memory.c    	set_pte_at(mm, addr, pte, mk_pte(page, prot));
page             1327 mm/memory.c    	mem_cgroup_uncharge_page(page);
page             1359 mm/memory.c    	if (!page_count(page))
page             1362 mm/memory.c    	return insert_page(vma, addr, page, vma->vm_page_prot);
page             1447 mm/memory.c    		struct page *page;
page             1449 mm/memory.c    		page = pfn_to_page(pfn);
page             1450 mm/memory.c    		return insert_page(vma, addr, page, vma->vm_page_prot);
page             1766 mm/memory.c    	struct page *old_page, *new_page;
page             1770 mm/memory.c    	struct page *dirty_page = NULL;
page             2255 mm/memory.c    	struct page *page;
page             2269 mm/memory.c    	page = lookup_swap_cache(entry);
page             2270 mm/memory.c    	if (!page) {
page             2272 mm/memory.c    		page = swapin_readahead(entry,
page             2274 mm/memory.c    		if (!page) {
page             2291 mm/memory.c    	if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
page             2297 mm/memory.c    	mark_page_accessed(page);
page             2298 mm/memory.c    	lock_page(page);
page             2308 mm/memory.c    	if (unlikely(!PageUptodate(page))) {
page             2316 mm/memory.c    	pte = mk_pte(page, vma->vm_page_prot);
page             2317 mm/memory.c    	if (write_access && can_share_swap_page(page)) {
page             2322 mm/memory.c    	flush_icache_page(vma, page);
page             2324 mm/memory.c    	page_add_anon_rmap(page, vma, address);
page             2328 mm/memory.c    		remove_exclusive_swap_page(page);
page             2329 mm/memory.c    	unlock_page(page);
page             2345 mm/memory.c    	mem_cgroup_uncharge_page(page);
page             2347 mm/memory.c    	unlock_page(page);
page             2348 mm/memory.c    	page_cache_release(page);
page             2361 mm/memory.c    	struct page *page;
page             2370 mm/memory.c    	page = alloc_zeroed_user_highpage_movable(vma, address);
page             2371 mm/memory.c    	if (!page)
page             2373 mm/memory.c    	__SetPageUptodate(page);
page             2375 mm/memory.c    	if (mem_cgroup_charge(page, mm, GFP_KERNEL))
page             2378 mm/memory.c    	entry = mk_pte(page, vma->vm_page_prot);
page             2385 mm/memory.c    	lru_cache_add_active(page);
page             2386 mm/memory.c    	page_add_new_anon_rmap(page, vma, address);
page             2395 mm/memory.c    	mem_cgroup_uncharge_page(page);
page             2396 mm/memory.c    	page_cache_release(page);
page             2399 mm/memory.c    	page_cache_release(page);
page             2423 mm/memory.c    	struct page *page;
page             2426 mm/memory.c    	struct page *dirty_page = NULL;
page             2434 mm/memory.c    	vmf.page = NULL;
page             2445 mm/memory.c    		lock_page(vmf.page);
page             2447 mm/memory.c    		VM_BUG_ON(!PageLocked(vmf.page));
page             2452 mm/memory.c    	page = vmf.page;
page             2460 mm/memory.c    			page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
page             2462 mm/memory.c    			if (!page) {
page             2466 mm/memory.c    			copy_user_highpage(page, vmf.page, address, vma);
page             2467 mm/memory.c    			__SetPageUptodate(page);
page             2475 mm/memory.c    				unlock_page(page);
page             2476 mm/memory.c    				if (vma->vm_ops->page_mkwrite(vma, page) < 0) {
page             2481 mm/memory.c    				lock_page(page);
page             2489 mm/memory.c    				if (!page->mapping) {
page             2500 mm/memory.c    	if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
page             2519 mm/memory.c    		flush_icache_page(vma, page);
page             2520 mm/memory.c    		entry = mk_pte(page, vma->vm_page_prot);
page             2526 mm/memory.c                            lru_cache_add_active(page);
page             2527 mm/memory.c                            page_add_new_anon_rmap(page, vma, address);
page             2530 mm/memory.c    			page_add_file_rmap(page);
page             2532 mm/memory.c    				dirty_page = page;
page             2540 mm/memory.c    		mem_cgroup_uncharge_page(page);
page             2542 mm/memory.c    			page_cache_release(page);
page             2550 mm/memory.c    	unlock_page(vmf.page);
page             2553 mm/memory.c    		page_cache_release(vmf.page);
page             2934 mm/memory.c    		struct page *page = NULL;
page             2937 mm/memory.c    				write, 1, &page, &vma);
page             2960 mm/memory.c    			maddr = kmap(page);
page             2962 mm/memory.c    				copy_to_user_page(vma, page, addr,
page             2964 mm/memory.c    				set_page_dirty_lock(page);
page             2966 mm/memory.c    				copy_from_user_page(vma, page, addr,
page             2969 mm/memory.c    			kunmap(page);
page             2970 mm/memory.c    			page_cache_release(page);
page               67 mm/memory_hotplug.c 	atomic_set(&page->_mapcount, type);
page               68 mm/memory_hotplug.c 	SetPagePrivate(page);
page               69 mm/memory_hotplug.c 	set_page_private(page, info);
page               70 mm/memory_hotplug.c 	atomic_inc(&page->_count);
page               77 mm/memory_hotplug.c 	type = atomic_read(&page->_mapcount);
page               80 mm/memory_hotplug.c 	if (atomic_dec_return(&page->_count) == 1) {
page               81 mm/memory_hotplug.c 		ClearPagePrivate(page);
page               82 mm/memory_hotplug.c 		set_page_private(page, 0);
page               83 mm/memory_hotplug.c 		reset_page_mapcount(page);
page               84 mm/memory_hotplug.c 		__free_pages_bootmem(page, 0);
page               93 mm/memory_hotplug.c 	struct page *page, *memmap;
page              108 mm/memory_hotplug.c 	page = virt_to_page(memmap);
page              109 mm/memory_hotplug.c 	mapsize = sizeof(struct page) * PAGES_PER_SECTION;
page              113 mm/memory_hotplug.c 	for (i = 0; i < mapsize; i++, page++)
page              114 mm/memory_hotplug.c 		get_page_bootmem(section_nr, page, SECTION_INFO);
page              117 mm/memory_hotplug.c 	page = virt_to_page(usemap);
page              121 mm/memory_hotplug.c 	for (i = 0; i < mapsize; i++, page++)
page              122 mm/memory_hotplug.c 		get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
page              130 mm/memory_hotplug.c 	struct page *page;
page              134 mm/memory_hotplug.c 	page = virt_to_page(pgdat);
page              136 mm/memory_hotplug.c 	for (i = 0; i < nr_pages; i++, page++)
page              137 mm/memory_hotplug.c 		get_page_bootmem(node, page, NODE_INFO);
page              145 mm/memory_hotplug.c 			page = virt_to_page(zone->wait_table);
page              147 mm/memory_hotplug.c 			for (i = 0; i < nr_pages; i++, page++)
page              148 mm/memory_hotplug.c 				get_page_bootmem(node, page, NODE_INFO);
page              345 mm/memory_hotplug.c 	if (PageHighMem(page))
page              350 mm/memory_hotplug.c 	max_mapnr = max(page_to_pfn(page), max_mapnr);
page              353 mm/memory_hotplug.c 	ClearPageReserved(page);
page              354 mm/memory_hotplug.c 	init_page_count(page);
page              355 mm/memory_hotplug.c 	__free_page(page);
page              363 mm/memory_hotplug.c 	struct page *page;
page              366 mm/memory_hotplug.c 			page = pfn_to_page(start_pfn + i);
page              367 mm/memory_hotplug.c 			online_page(page);
page              534 mm/memory_hotplug.c 	return PageBuddy(page) && page_order(page) >= pageblock_order;
page              538 mm/memory_hotplug.c static struct page *next_active_pageblock(struct page *page)
page              543 mm/memory_hotplug.c 	BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
page              549 mm/memory_hotplug.c 	if (pageblock_free(page))
page              550 mm/memory_hotplug.c 		pageblocks_stride += page_order(page) - pageblock_order;
page              552 mm/memory_hotplug.c 	return page + (pageblocks_stride * pageblock_nr_pages);
page              559 mm/memory_hotplug.c 	struct page *page = pfn_to_page(start_pfn);
page              560 mm/memory_hotplug.c 	struct page *end_page = page + nr_pages;
page              563 mm/memory_hotplug.c 	for (; page < end_page; page = next_active_pageblock(page)) {
page              564 mm/memory_hotplug.c 		type = get_pageblock_migratetype(page);
page              570 mm/memory_hotplug.c 		if (type != MIGRATE_MOVABLE && !pageblock_free(page))
page              577 mm/memory_hotplug.c 		if (PageReserved(page))
page              592 mm/memory_hotplug.c 	struct page *page;
page              603 mm/memory_hotplug.c 		page = pfn_to_page(pfn + i);
page              604 mm/memory_hotplug.c 		if (zone && page_zone(page) != zone)
page              606 mm/memory_hotplug.c 		zone = page_zone(page);
page              618 mm/memory_hotplug.c 	struct page *page;
page              621 mm/memory_hotplug.c 			page = pfn_to_page(pfn);
page              622 mm/memory_hotplug.c 			if (PageLRU(page))
page              629 mm/memory_hotplug.c static struct page *
page              644 mm/memory_hotplug.c 	struct page *page;
page              653 mm/memory_hotplug.c 		page = pfn_to_page(pfn);
page              654 mm/memory_hotplug.c 		if (!page_count(page))
page              660 mm/memory_hotplug.c 		ret = isolate_lru_page(page, &source);
page              666 mm/memory_hotplug.c 			if (page_count(page))
page              671 mm/memory_hotplug.c 				pfn, page_count(page), page->flags);
page              382 mm/mempolicy.c 		struct page *page;
page              387 mm/mempolicy.c 		page = vm_normal_page(vma, addr, *pte);
page              388 mm/mempolicy.c 		if (!page)
page              401 mm/mempolicy.c 		if (PageReserved(page))
page              403 mm/mempolicy.c 		nid = page_to_nid(page);
page              408 mm/mempolicy.c 			gather_stats(page, private, pte_dirty(*pte));
page              410 mm/mempolicy.c 			migrate_page_add(page, private, flags);
page              661 mm/mempolicy.c 	struct page *p;
page              765 mm/mempolicy.c 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1)
page              766 mm/mempolicy.c 		isolate_lru_page(page, pagelist);
page              769 mm/mempolicy.c static struct page *new_node_page(struct page *page, unsigned long node, int **x)
page              890 mm/mempolicy.c static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
page              896 mm/mempolicy.c 		address = page_address_in_vma(page, vma);
page              920 mm/mempolicy.c static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
page             1495 mm/mempolicy.c static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
page             1499 mm/mempolicy.c 	struct page *page;
page             1502 mm/mempolicy.c 	page = __alloc_pages(gfp, order, zl);
page             1503 mm/mempolicy.c 	if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
page             1504 mm/mempolicy.c 		inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
page             1505 mm/mempolicy.c 	return page;
page             1530 mm/mempolicy.c struct page *
page             1550 mm/mempolicy.c 		struct page *page =  __alloc_pages_nodemask(gfp, 0,
page             1553 mm/mempolicy.c 		return page;
page             1580 mm/mempolicy.c struct page *alloc_pages_current(gfp_t gfp, unsigned order)
page             2191 mm/mempolicy.c 	int count = page_mapcount(page);
page             2194 mm/mempolicy.c 	if (pte_dirty || PageDirty(page))
page             2197 mm/mempolicy.c 	if (PageSwapCache(page))
page             2200 mm/mempolicy.c 	if (PageActive(page))
page             2203 mm/mempolicy.c 	if (PageWriteback(page))
page             2206 mm/mempolicy.c 	if (PageAnon(page))
page             2212 mm/mempolicy.c 	md->node[page_to_nid(page)]++;
page             2221 mm/mempolicy.c 	struct page *page;
page             2237 mm/mempolicy.c 		page = pte_page(pte);
page             2238 mm/mempolicy.c 		if (!page)
page             2241 mm/mempolicy.c 		gather_stats(page, md, pte_dirty(*ptep));
page               37 mm/migrate.c   #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
page               51 mm/migrate.c   	if (PageLRU(page)) {
page               52 mm/migrate.c   		struct zone *zone = page_zone(page);
page               55 mm/migrate.c   		if (PageLRU(page) && get_page_unless_zero(page)) {
page               57 mm/migrate.c   			ClearPageLRU(page);
page               58 mm/migrate.c   			if (PageActive(page))
page               59 mm/migrate.c   				del_page_from_active_list(zone, page);
page               61 mm/migrate.c   				del_page_from_inactive_list(zone, page);
page               62 mm/migrate.c   			list_add_tail(&page->lru, pagelist);
page               88 mm/migrate.c   	if (PageActive(page)) {
page               93 mm/migrate.c   		ClearPageActive(page);
page               94 mm/migrate.c   		lru_cache_add_active(page);
page               96 mm/migrate.c   		lru_cache_add(page);
page               98 mm/migrate.c   	put_page(page);
page              108 mm/migrate.c   	struct page *page;
page              109 mm/migrate.c   	struct page *page2;
page              112 mm/migrate.c   	list_for_each_entry_safe(page, page2, l, lru) {
page              113 mm/migrate.c   		list_del(&page->lru);
page              114 mm/migrate.c   		move_to_lru(page);
page              275 mm/migrate.c   	struct page *page;
page              286 mm/migrate.c   	page = migration_entry_to_page(entry);
page              295 mm/migrate.c   	if (!get_page_unless_zero(page))
page              298 mm/migrate.c   	wait_on_page_locked(page);
page              299 mm/migrate.c   	put_page(page);
page              321 mm/migrate.c   		if (page_count(page) != 1)
page              329 mm/migrate.c    					page_index(page));
page              331 mm/migrate.c   	expected_count = 2 + !!PagePrivate(page);
page              332 mm/migrate.c   	if (page_count(page) != expected_count ||
page              333 mm/migrate.c   			(struct page *)radix_tree_deref_slot(pslot) != page) {
page              338 mm/migrate.c   	if (!page_freeze_refs(page, expected_count)) {
page              348 mm/migrate.c   	if (PageSwapCache(page)) {
page              350 mm/migrate.c   		set_page_private(newpage, page_private(page));
page              356 mm/migrate.c   	page_unfreeze_refs(page, expected_count);
page              361 mm/migrate.c   	__put_page(page);
page              373 mm/migrate.c   	__dec_zone_page_state(page, NR_FILE_PAGES);
page              378 mm/migrate.c   		mem_cgroup_uncharge_cache_page(page);
page              388 mm/migrate.c   	copy_highpage(newpage, page);
page              390 mm/migrate.c   	if (PageError(page))
page              392 mm/migrate.c   	if (PageReferenced(page))
page              394 mm/migrate.c   	if (PageUptodate(page))
page              396 mm/migrate.c   	if (PageActive(page))
page              398 mm/migrate.c   	if (PageChecked(page))
page              400 mm/migrate.c   	if (PageMappedToDisk(page))
page              403 mm/migrate.c   	if (PageDirty(page)) {
page              404 mm/migrate.c   		clear_page_dirty_for_io(page);
page              416 mm/migrate.c   	ClearPageSwapCache(page);
page              418 mm/migrate.c   	ClearPageActive(page);
page              419 mm/migrate.c   	ClearPagePrivate(page);
page              420 mm/migrate.c   	set_page_private(page, 0);
page              421 mm/migrate.c   	page->mapping = NULL;
page              454 mm/migrate.c   	BUG_ON(PageWriteback(page));	/* Writeback must be complete */
page              456 mm/migrate.c   	rc = migrate_page_move_mapping(mapping, newpage, page);
page              461 mm/migrate.c   	migrate_page_copy(newpage, page);
page              478 mm/migrate.c   	if (!page_has_buffers(page))
page              479 mm/migrate.c   		return migrate_page(mapping, newpage, page);
page              481 mm/migrate.c   	head = page_buffers(page);
page              483 mm/migrate.c   	rc = migrate_page_move_mapping(mapping, newpage, page);
page              496 mm/migrate.c   	ClearPagePrivate(page);
page              497 mm/migrate.c   	set_page_private(newpage, page_private(page));
page              498 mm/migrate.c   	set_page_private(page, 0);
page              499 mm/migrate.c   	put_page(page);
page              511 mm/migrate.c   	migrate_page_copy(newpage, page);
page              545 mm/migrate.c   	if (!clear_page_dirty_for_io(page))
page              557 mm/migrate.c   	remove_migration_ptes(page, page);
page              559 mm/migrate.c   	rc = mapping->a_ops->writepage(page, &wbc);
page              566 mm/migrate.c   		lock_page(page);
page              577 mm/migrate.c   	if (PageDirty(page))
page              578 mm/migrate.c   		return writeout(mapping, page);
page              584 mm/migrate.c   	if (PagePrivate(page) &&
page              585 mm/migrate.c   	    !try_to_release_page(page, GFP_KERNEL))
page              588 mm/migrate.c   	return migrate_page(mapping, newpage, page);
page              612 mm/migrate.c   	newpage->index = page->index;
page              613 mm/migrate.c   	newpage->mapping = page->mapping;
page              615 mm/migrate.c   	mapping = page_mapping(page);
page              617 mm/migrate.c   		rc = migrate_page(mapping, newpage, page);
page              627 mm/migrate.c   						newpage, page);
page              629 mm/migrate.c   		rc = fallback_migrate_page(mapping, newpage, page);
page              632 mm/migrate.c   		remove_migration_ptes(page, newpage);
page              650 mm/migrate.c   	struct page *newpage = get_new_page(page, private, &result);
page              657 mm/migrate.c   	if (page_count(page) == 1)
page              661 mm/migrate.c   	charge = mem_cgroup_prepare_migration(page, newpage);
page              670 mm/migrate.c   	if (!trylock_page(page)) {
page              673 mm/migrate.c   		lock_page(page);
page              676 mm/migrate.c   	if (PageWriteback(page)) {
page              679 mm/migrate.c   		wait_on_page_writeback(page);
page              689 mm/migrate.c   	if (PageAnon(page)) {
page              706 mm/migrate.c   	if (!page->mapping) {
page              707 mm/migrate.c   		if (!PageAnon(page) && PagePrivate(page)) {
page              715 mm/migrate.c   			try_to_free_buffers(page);
page              721 mm/migrate.c   	try_to_unmap(page, 1);
page              723 mm/migrate.c   	if (!page_mapped(page))
page              724 mm/migrate.c   		rc = move_to_new_page(newpage, page);
page              727 mm/migrate.c   		remove_migration_ptes(page, page);
page              734 mm/migrate.c   	unlock_page(page);
page              743 mm/migrate.c    		list_del(&page->lru);
page              744 mm/migrate.c    		move_to_lru(page);
page              784 mm/migrate.c   	struct page *page;
page              785 mm/migrate.c   	struct page *page2;
page              795 mm/migrate.c   		list_for_each_entry_safe(page, page2, from, lru) {
page              799 mm/migrate.c   						page, pass > 2);
page              835 mm/migrate.c   	struct page *page;
page              840 mm/migrate.c   static struct page *new_page_node(struct page *p, unsigned long private,
page              845 mm/migrate.c   	while (pm->node != MAX_NUMNODES && pm->page != p)
page              877 mm/migrate.c   		struct page *page;
page              883 mm/migrate.c   		pp->page = ZERO_PAGE(0);
page              890 mm/migrate.c   		page = follow_page(vma, pp->addr, FOLL_GET);
page              892 mm/migrate.c   		err = PTR_ERR(page);
page              893 mm/migrate.c   		if (IS_ERR(page))
page              897 mm/migrate.c   		if (!page)
page              900 mm/migrate.c   		if (PageReserved(page))		/* Check for zero page */
page              903 mm/migrate.c   		pp->page = page;
page              904 mm/migrate.c   		err = page_to_nid(page);
page              913 mm/migrate.c   		if (page_mapcount(page) > 1 &&
page              917 mm/migrate.c   		err = isolate_lru_page(page, &pagelist);
page              924 mm/migrate.c   		put_page(page);
page              950 mm/migrate.c   		struct page *page;
page              958 mm/migrate.c   		page = follow_page(vma, pm->addr, 0);
page              960 mm/migrate.c   		err = PTR_ERR(page);
page              961 mm/migrate.c   		if (IS_ERR(page))
page              966 mm/migrate.c   		if (!page || PageReserved(page))
page              969 mm/migrate.c   		err = page_to_nid(page);
page               30 mm/mincore.c   	struct page *page;
page               42 mm/mincore.c   	page = find_get_page(mapping, pgoff);
page               43 mm/mincore.c   	if (page) {
page               44 mm/mincore.c   		present = PageUptodate(page);
page               45 mm/mincore.c   		page_cache_release(page);
page              127 mm/mm_init.c   	BUG_ON(page_to_nid(page) != nid);
page              128 mm/mm_init.c   	BUG_ON(page_zonenum(page) != zone);
page              129 mm/mm_init.c   	BUG_ON(page_to_pfn(page) != pfn);
page             2204 mm/mmap.c      	struct page **pages;
page             2218 mm/mmap.c      		struct page *page = *pages;
page             2219 mm/mmap.c      		get_page(page);
page             2220 mm/mmap.c      		vmf->page = page;
page               38 mm/nommu.c     struct page *mem_map;
page              106 mm/nommu.c     	struct page *page;
page              115 mm/nommu.c     	page = virt_to_head_page(objp);
page              121 mm/nommu.c     	if (PageSlab(page))
page              128 mm/nommu.c     	return PAGE_SIZE << compound_order(page);
page              218 mm/nommu.c     struct page *vmalloc_to_page(const void *addr)
page             1276 mm/nommu.c     struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
page              535 mm/page-writeback.c 	if (set_page_dirty(page) || page_mkwrite) {
page              536 mm/page-writeback.c 		struct address_space *mapping = page_mapping(page);
page              905 mm/page-writeback.c 			struct page *page = pvec.pages[i];
page              914 mm/page-writeback.c 			lock_page(page);
page              916 mm/page-writeback.c 			if (unlikely(page->mapping != mapping)) {
page              917 mm/page-writeback.c 				unlock_page(page);
page              921 mm/page-writeback.c 			if (!wbc->range_cyclic && page->index > end) {
page              923 mm/page-writeback.c 				unlock_page(page);
page              928 mm/page-writeback.c 				wait_on_page_writeback(page);
page              930 mm/page-writeback.c 			if (PageWriteback(page) ||
page              931 mm/page-writeback.c 			    !clear_page_dirty_for_io(page)) {
page              932 mm/page-writeback.c 				unlock_page(page);
page              936 mm/page-writeback.c 			ret = (*writepage)(page, wbc, data);
page              939 mm/page-writeback.c 				unlock_page(page);
page              978 mm/page-writeback.c 	int ret = mapping->a_ops->writepage(page, wbc);
page             1029 mm/page-writeback.c 	struct address_space *mapping = page->mapping;
page             1036 mm/page-writeback.c 	BUG_ON(!PageLocked(page));
page             1039 mm/page-writeback.c 		wait_on_page_writeback(page);
page             1041 mm/page-writeback.c 	if (clear_page_dirty_for_io(page)) {
page             1042 mm/page-writeback.c 		page_cache_get(page);
page             1043 mm/page-writeback.c 		ret = mapping->a_ops->writepage(page, &wbc);
page             1045 mm/page-writeback.c 			wait_on_page_writeback(page);
page             1046 mm/page-writeback.c 			if (PageError(page))
page             1049 mm/page-writeback.c 		page_cache_release(page);
page             1051 mm/page-writeback.c 		unlock_page(page);
page             1062 mm/page-writeback.c 	if (!PageDirty(page))
page             1063 mm/page-writeback.c 		SetPageDirty(page);
page             1084 mm/page-writeback.c 	if (!TestSetPageDirty(page)) {
page             1085 mm/page-writeback.c 		struct address_space *mapping = page_mapping(page);
page             1092 mm/page-writeback.c 		mapping2 = page_mapping(page);
page             1095 mm/page-writeback.c 			WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
page             1097 mm/page-writeback.c 				__inc_zone_page_state(page, NR_FILE_DIRTY);
page             1103 mm/page-writeback.c 				page_index(page), PAGECACHE_TAG_DIRTY);
page             1124 mm/page-writeback.c 	return __set_page_dirty_nobuffers(page);
page             1134 mm/page-writeback.c 	struct address_space *mapping = page_mapping(page);
page             1137 mm/page-writeback.c 		int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
page             1142 mm/page-writeback.c 		return (*spd)(page);
page             1144 mm/page-writeback.c 	if (!PageDirty(page)) {
page             1145 mm/page-writeback.c 		if (!TestSetPageDirty(page))
page             1153 mm/page-writeback.c 	int ret = __set_page_dirty(page);
page             1174 mm/page-writeback.c 	lock_page_nosync(page);
page             1175 mm/page-writeback.c 	ret = set_page_dirty(page);
page             1176 mm/page-writeback.c 	unlock_page(page);
page             1197 mm/page-writeback.c 	struct address_space *mapping = page_mapping(page);
page             1199 mm/page-writeback.c 	BUG_ON(!PageLocked(page));
page             1201 mm/page-writeback.c 	ClearPageReclaim(page);
page             1228 mm/page-writeback.c 		if (page_mkclean(page))
page             1229 mm/page-writeback.c 			set_page_dirty(page);
page             1240 mm/page-writeback.c 		if (TestClearPageDirty(page)) {
page             1241 mm/page-writeback.c 			dec_zone_page_state(page, NR_FILE_DIRTY);
page             1248 mm/page-writeback.c 	return TestClearPageDirty(page);
page             1254 mm/page-writeback.c 	struct address_space *mapping = page_mapping(page);
page             1262 mm/page-writeback.c 		ret = TestClearPageWriteback(page);
page             1265 mm/page-writeback.c 						page_index(page),
page             1274 mm/page-writeback.c 		ret = TestClearPageWriteback(page);
page             1277 mm/page-writeback.c 		dec_zone_page_state(page, NR_WRITEBACK);
page             1283 mm/page-writeback.c 	struct address_space *mapping = page_mapping(page);
page             1291 mm/page-writeback.c 		ret = TestSetPageWriteback(page);
page             1294 mm/page-writeback.c 						page_index(page),
page             1299 mm/page-writeback.c 		if (!PageDirty(page))
page             1301 mm/page-writeback.c 						page_index(page),
page             1305 mm/page-writeback.c 		ret = TestSetPageWriteback(page);
page             1308 mm/page-writeback.c 		inc_zone_page_state(page, NR_WRITEBACK);
page              174 mm/page_alloc.c 	set_pageblock_flags_group(page, (unsigned long)migratetype,
page              183 mm/page_alloc.c 	unsigned long pfn = page_to_pfn(page);
page              198 mm/page_alloc.c 	if (!pfn_valid_within(page_to_pfn(page)))
page              200 mm/page_alloc.c 	if (zone != page_zone(page))
page              210 mm/page_alloc.c 	if (page_outside_zone_boundaries(zone, page))
page              212 mm/page_alloc.c 	if (!page_is_consistent(zone, page))
page              226 mm/page_alloc.c 	void *pc = page_get_page_cgroup(page);
page              230 mm/page_alloc.c 		current->comm, page, (int)(2*sizeof(unsigned long)),
page              231 mm/page_alloc.c 		(unsigned long)page->flags, page->mapping,
page              232 mm/page_alloc.c 		page_mapcount(page), page_count(page));
page              235 mm/page_alloc.c 		page_reset_bad_cgroup(page);
page              240 mm/page_alloc.c 	page->flags &= ~PAGE_FLAGS_CLEAR_WHEN_BAD;
page              241 mm/page_alloc.c 	set_page_count(page, 0);
page              242 mm/page_alloc.c 	reset_page_mapcount(page);
page              243 mm/page_alloc.c 	page->mapping = NULL;
page              264 mm/page_alloc.c 	__free_pages_ok(page, compound_order(page));
page              271 mm/page_alloc.c 	struct page *p = page + 1;
page              273 mm/page_alloc.c 	set_compound_page_dtor(page, free_compound_page);
page              274 mm/page_alloc.c 	set_compound_order(page, order);
page              275 mm/page_alloc.c 	__SetPageHead(page);
page              278 mm/page_alloc.c 			p = pfn_to_page(page_to_pfn(page) + i);
page              280 mm/page_alloc.c 		p->first_page = page;
page              288 mm/page_alloc.c 	struct page *p = page + 1;
page              290 mm/page_alloc.c 	if (unlikely(compound_order(page) != order))
page              291 mm/page_alloc.c 		bad_page(page);
page              293 mm/page_alloc.c 	if (unlikely(!PageHead(page)))
page              294 mm/page_alloc.c 			bad_page(page);
page              295 mm/page_alloc.c 	__ClearPageHead(page);
page              298 mm/page_alloc.c 			p = pfn_to_page(page_to_pfn(page) + i);
page              301 mm/page_alloc.c 				(p->first_page != page)))
page              302 mm/page_alloc.c 			bad_page(page);
page              317 mm/page_alloc.c 		clear_highpage(page + i);
page              322 mm/page_alloc.c 	set_page_private(page, order);
page              323 mm/page_alloc.c 	__SetPageBuddy(page);
page              328 mm/page_alloc.c 	__ClearPageBuddy(page);
page              329 mm/page_alloc.c 	set_page_private(page, 0);
page              349 mm/page_alloc.c static inline struct page *
page              354 mm/page_alloc.c 	return page + (buddy_idx - page_idx);
page              382 mm/page_alloc.c 	if (page_zone_id(page) != page_zone_id(buddy))
page              421 mm/page_alloc.c 	int migratetype = get_pageblock_migratetype(page);
page              423 mm/page_alloc.c 	if (unlikely(PageCompound(page)))
page              424 mm/page_alloc.c 		destroy_compound_page(page, order);
page              426 mm/page_alloc.c 	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
page              429 mm/page_alloc.c 	VM_BUG_ON(bad_range(zone, page));
page              434 mm/page_alloc.c 		struct page *buddy;
page              436 mm/page_alloc.c 		buddy = __page_find_buddy(page, page_idx, order);
page              437 mm/page_alloc.c 		if (!page_is_buddy(page, buddy, order))
page              445 mm/page_alloc.c 		page = page + (combined_idx - page_idx);
page              449 mm/page_alloc.c 	set_page_order(page, order);
page              450 mm/page_alloc.c 	list_add(&page->lru,
page              457 mm/page_alloc.c 	if (unlikely(page_mapcount(page) |
page              458 mm/page_alloc.c 		(page->mapping != NULL)  |
page              459 mm/page_alloc.c 		(page_get_page_cgroup(page) != NULL) |
page              460 mm/page_alloc.c 		(page_count(page) != 0)  |
page              461 mm/page_alloc.c 		(page->flags & PAGE_FLAGS_CHECK_AT_FREE)))
page              462 mm/page_alloc.c 		bad_page(page);
page              463 mm/page_alloc.c 	if (PageDirty(page))
page              464 mm/page_alloc.c 		__ClearPageDirty(page);
page              470 mm/page_alloc.c 	return PageReserved(page);
page              491 mm/page_alloc.c 		struct page *page;
page              494 mm/page_alloc.c 		page = list_entry(list->prev, struct page, lru);
page              496 mm/page_alloc.c 		list_del(&page->lru);
page              497 mm/page_alloc.c 		__free_one_page(page, zone, order);
page              507 mm/page_alloc.c 	__free_one_page(page, zone, order);
page              518 mm/page_alloc.c 		reserved += free_pages_check(page + i);
page              522 mm/page_alloc.c 	if (!PageHighMem(page)) {
page              523 mm/page_alloc.c 		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
page              524 mm/page_alloc.c 		debug_check_no_obj_freed(page_address(page),
page              527 mm/page_alloc.c 	arch_free_page(page, order);
page              528 mm/page_alloc.c 	kernel_map_pages(page, 1 << order, 0);
page              532 mm/page_alloc.c 	free_one_page(page_zone(page), page, order);
page              542 mm/page_alloc.c 		__ClearPageReserved(page);
page              543 mm/page_alloc.c 		set_page_count(page, 0);
page              544 mm/page_alloc.c 		set_page_refcounted(page);
page              545 mm/page_alloc.c 		__free_page(page);
page              549 mm/page_alloc.c 		prefetchw(page);
page              551 mm/page_alloc.c 			struct page *p = &page[loop];
page              559 mm/page_alloc.c 		set_page_refcounted(page);
page              560 mm/page_alloc.c 		__free_pages(page, order);
page              589 mm/page_alloc.c 		VM_BUG_ON(bad_range(zone, &page[size]));
page              590 mm/page_alloc.c 		list_add(&page[size].lru, &area->free_list[migratetype]);
page              592 mm/page_alloc.c 		set_page_order(&page[size], high);
page              601 mm/page_alloc.c 	if (unlikely(page_mapcount(page) |
page              602 mm/page_alloc.c 		(page->mapping != NULL)  |
page              603 mm/page_alloc.c 		(page_get_page_cgroup(page) != NULL) |
page              604 mm/page_alloc.c 		(page_count(page) != 0)  |
page              605 mm/page_alloc.c 		(page->flags & PAGE_FLAGS_CHECK_AT_PREP)))
page              606 mm/page_alloc.c 		bad_page(page);
page              612 mm/page_alloc.c 	if (PageReserved(page))
page              615 mm/page_alloc.c 	page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_reclaim |
page              618 mm/page_alloc.c 	set_page_private(page, 0);
page              619 mm/page_alloc.c 	set_page_refcounted(page);
page              621 mm/page_alloc.c 	arch_alloc_page(page, order);
page              622 mm/page_alloc.c 	kernel_map_pages(page, 1 << order, 1);
page              625 mm/page_alloc.c 		prep_zero_page(page, order, gfp_flags);
page              628 mm/page_alloc.c 		prep_compound_page(page, order);
page              637 mm/page_alloc.c static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
page              642 mm/page_alloc.c 	struct page *page;
page              650 mm/page_alloc.c 		page = list_entry(area->free_list[migratetype].next,
page              651 mm/page_alloc.c 							struct page, lru);
page              652 mm/page_alloc.c 		list_del(&page->lru);
page              653 mm/page_alloc.c 		rmv_page_order(page);
page              656 mm/page_alloc.c 		expand(zone, page, order, current_order, area, migratetype);
page              657 mm/page_alloc.c 		return page;
page              684 mm/page_alloc.c 	struct page *page;
page              699 mm/page_alloc.c 	for (page = start_page; page <= end_page;) {
page              701 mm/page_alloc.c 		VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
page              703 mm/page_alloc.c 		if (!pfn_valid_within(page_to_pfn(page))) {
page              704 mm/page_alloc.c 			page++;
page              708 mm/page_alloc.c 		if (!PageBuddy(page)) {
page              709 mm/page_alloc.c 			page++;
page              713 mm/page_alloc.c 		order = page_order(page);
page              714 mm/page_alloc.c 		list_del(&page->lru);
page              715 mm/page_alloc.c 		list_add(&page->lru,
page              717 mm/page_alloc.c 		page += 1 << order;
page              728 mm/page_alloc.c 	struct page *start_page, *end_page;
page              730 mm/page_alloc.c 	start_pfn = page_to_pfn(page);
page              738 mm/page_alloc.c 		start_page = page;
page              746 mm/page_alloc.c static struct page *__rmqueue_fallback(struct zone *zone, int order,
page              751 mm/page_alloc.c 	struct page *page;
page              768 mm/page_alloc.c 			page = list_entry(area->free_list[migratetype].next,
page              769 mm/page_alloc.c 					struct page, lru);
page              781 mm/page_alloc.c 				pages = move_freepages_block(zone, page,
page              786 mm/page_alloc.c 					set_pageblock_migratetype(page,
page              793 mm/page_alloc.c 			list_del(&page->lru);
page              794 mm/page_alloc.c 			rmv_page_order(page);
page              799 mm/page_alloc.c 				set_pageblock_migratetype(page,
page              802 mm/page_alloc.c 			expand(zone, page, order, current_order, area, migratetype);
page              803 mm/page_alloc.c 			return page;
page              815 mm/page_alloc.c static struct page *__rmqueue(struct zone *zone, unsigned int order,
page              818 mm/page_alloc.c 	struct page *page;
page              820 mm/page_alloc.c 	page = __rmqueue_smallest(zone, order, migratetype);
page              822 mm/page_alloc.c 	if (unlikely(!page))
page              823 mm/page_alloc.c 		page = __rmqueue_fallback(zone, order, migratetype);
page              825 mm/page_alloc.c 	return page;
page              841 mm/page_alloc.c 		struct page *page = __rmqueue(zone, order, migratetype);
page              842 mm/page_alloc.c 		if (unlikely(page == NULL))
page              854 mm/page_alloc.c 		list_add(&page->lru, list);
page              855 mm/page_alloc.c 		set_page_private(page, migratetype);
page              856 mm/page_alloc.c 		list = &page->lru;
page              949 mm/page_alloc.c 			struct page *page = pfn_to_page(pfn);
page              951 mm/page_alloc.c 			if (!swsusp_page_is_forbidden(page))
page              952 mm/page_alloc.c 				swsusp_unset_page_free(page);
page              959 mm/page_alloc.c 			pfn = page_to_pfn(list_entry(curr, struct page, lru));
page              973 mm/page_alloc.c 	struct zone *zone = page_zone(page);
page              977 mm/page_alloc.c 	if (PageAnon(page))
page              978 mm/page_alloc.c 		page->mapping = NULL;
page              979 mm/page_alloc.c 	if (free_pages_check(page))
page              982 mm/page_alloc.c 	if (!PageHighMem(page)) {
page              983 mm/page_alloc.c 		debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
page              984 mm/page_alloc.c 		debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
page              986 mm/page_alloc.c 	arch_free_page(page, 0);
page              987 mm/page_alloc.c 	kernel_map_pages(page, 1, 0);
page              993 mm/page_alloc.c 		list_add_tail(&page->lru, &pcp->list);
page              995 mm/page_alloc.c 		list_add(&page->lru, &pcp->list);
page              996 mm/page_alloc.c 	set_page_private(page, get_pageblock_migratetype(page));
page             1008 mm/page_alloc.c 	free_hot_cold_page(page, 0);
page             1013 mm/page_alloc.c 	free_hot_cold_page(page, 1);
page             1028 mm/page_alloc.c 	VM_BUG_ON(PageCompound(page));
page             1029 mm/page_alloc.c 	VM_BUG_ON(!page_count(page));
page             1031 mm/page_alloc.c 		set_page_refcounted(page + i);
page             1039 mm/page_alloc.c static struct page *buffered_rmqueue(struct zone *preferred_zone,
page             1043 mm/page_alloc.c 	struct page *page;
page             1064 mm/page_alloc.c 			list_for_each_entry_reverse(page, &pcp->list, lru)
page             1065 mm/page_alloc.c 				if (page_private(page) == migratetype)
page             1068 mm/page_alloc.c 			list_for_each_entry(page, &pcp->list, lru)
page             1069 mm/page_alloc.c 				if (page_private(page) == migratetype)
page             1074 mm/page_alloc.c 		if (unlikely(&page->lru == &pcp->list)) {
page             1077 mm/page_alloc.c 			page = list_entry(pcp->list.next, struct page, lru);
page             1080 mm/page_alloc.c 		list_del(&page->lru);
page             1084 mm/page_alloc.c 		page = __rmqueue(zone, order, migratetype);
page             1086 mm/page_alloc.c 		if (!page)
page             1095 mm/page_alloc.c 	VM_BUG_ON(bad_range(zone, page));
page             1096 mm/page_alloc.c 	if (prep_new_page(page, order, gfp_flags))
page             1098 mm/page_alloc.c 	return page;
page             1365 mm/page_alloc.c static struct page *
page             1370 mm/page_alloc.c 	struct page *page = NULL;
page             1414 mm/page_alloc.c 		page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask);
page             1415 mm/page_alloc.c 		if (page)
page             1429 mm/page_alloc.c 	if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
page             1434 mm/page_alloc.c 	return page;
page             1440 mm/page_alloc.c struct page *
page             1448 mm/page_alloc.c 	struct page *page;
page             1472 mm/page_alloc.c 	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
page             1474 mm/page_alloc.c 	if (page)
page             1517 mm/page_alloc.c 	page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
page             1519 mm/page_alloc.c 	if (page)
page             1530 mm/page_alloc.c 			page = get_page_from_freelist(gfp_mask, nodemask, order,
page             1532 mm/page_alloc.c 			if (page)
page             1565 mm/page_alloc.c 		page = get_page_from_freelist(gfp_mask, nodemask, order,
page             1567 mm/page_alloc.c 		if (page)
page             1581 mm/page_alloc.c 		page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
page             1584 mm/page_alloc.c 		if (page) {
page             1641 mm/page_alloc.c 	return page;
page             1650 mm/page_alloc.c 	struct page * page;
page             1651 mm/page_alloc.c 	page = alloc_pages(gfp_mask, order);
page             1652 mm/page_alloc.c 	if (!page)
page             1654 mm/page_alloc.c 	return (unsigned long) page_address(page);
page             1661 mm/page_alloc.c 	struct page * page;
page             1669 mm/page_alloc.c 	page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
page             1670 mm/page_alloc.c 	if (page)
page             1671 mm/page_alloc.c 		return (unsigned long) page_address(page);
page             1687 mm/page_alloc.c 	if (put_page_testzero(page)) {
page             1689 mm/page_alloc.c 			free_hot_page(page);
page             1691 mm/page_alloc.c 			__free_pages_ok(page, order);
page             2511 mm/page_alloc.c 	struct page *page;
page             2523 mm/page_alloc.c 		page = pfn_to_page(pfn);
page             2526 mm/page_alloc.c 		if (page_to_nid(page) != zone_to_nid(zone))
page             2530 mm/page_alloc.c 		if (PageReserved(page))
page             2533 mm/page_alloc.c 		block_migratetype = get_pageblock_migratetype(page);
page             2543 mm/page_alloc.c 			set_pageblock_migratetype(page, MIGRATE_RESERVE);
page             2544 mm/page_alloc.c 			move_freepages_block(zone, page, MIGRATE_RESERVE);
page             2554 mm/page_alloc.c 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
page             2555 mm/page_alloc.c 			move_freepages_block(zone, page, MIGRATE_MOVABLE);
page             2568 mm/page_alloc.c 	struct page *page;
page             2586 mm/page_alloc.c 		page = pfn_to_page(pfn);
page             2587 mm/page_alloc.c 		set_page_links(page, zone, nid, pfn);
page             2588 mm/page_alloc.c 		mminit_verify_page_links(page, zone, nid, pfn);
page             2589 mm/page_alloc.c 		init_page_count(page);
page             2590 mm/page_alloc.c 		reset_page_mapcount(page);
page             2591 mm/page_alloc.c 		SetPageReserved(page);
page             2609 mm/page_alloc.c 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
page             2611 mm/page_alloc.c 		INIT_LIST_HEAD(&page->lru);
page             2615 mm/page_alloc.c 			set_page_address(page, __va(pfn << PAGE_SHIFT));
page             3428 mm/page_alloc.c 			PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
page             3497 mm/page_alloc.c 		struct page *map;
page             3507 mm/page_alloc.c 		size =  (end - start) * sizeof(struct page);
page             4492 mm/page_alloc.c struct page *pfn_to_page(unsigned long pfn)
page             4498 mm/page_alloc.c 	return __page_to_pfn(page);
page             4542 mm/page_alloc.c 	zone = page_zone(page);
page             4543 mm/page_alloc.c 	pfn = page_to_pfn(page);
page             4569 mm/page_alloc.c 	zone = page_zone(page);
page             4570 mm/page_alloc.c 	pfn = page_to_pfn(page);
page             4595 mm/page_alloc.c 	zone = page_zone(page);
page             4600 mm/page_alloc.c 	if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
page             4602 mm/page_alloc.c 	set_pageblock_migratetype(page, MIGRATE_ISOLATE);
page             4603 mm/page_alloc.c 	move_freepages_block(zone, page, MIGRATE_ISOLATE);
page             4616 mm/page_alloc.c 	zone = page_zone(page);
page             4618 mm/page_alloc.c 	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
page             4620 mm/page_alloc.c 	set_pageblock_migratetype(page, MIGRATE_MOVABLE);
page             4621 mm/page_alloc.c 	move_freepages_block(zone, page, MIGRATE_MOVABLE);
page             4633 mm/page_alloc.c 	struct page *page;
page             4652 mm/page_alloc.c 		page = pfn_to_page(pfn);
page             4653 mm/page_alloc.c 		BUG_ON(page_count(page));
page             4654 mm/page_alloc.c 		BUG_ON(!PageBuddy(page));
page             4655 mm/page_alloc.c 		order = page_order(page);
page             4660 mm/page_alloc.c 		list_del(&page->lru);
page             4661 mm/page_alloc.c 		rmv_page_order(page);
page             4666 mm/page_alloc.c 			SetPageReserved((page+i));
page               36 mm/page_io.c   		bio->bi_io_vec[0].bv_page = page;
page               50 mm/page_io.c   	struct page *page = bio->bi_io_vec[0].bv_page;
page               53 mm/page_io.c   		SetPageError(page);
page               62 mm/page_io.c   		set_page_dirty(page);
page               67 mm/page_io.c   		ClearPageReclaim(page);
page               69 mm/page_io.c   	end_page_writeback(page);
page               76 mm/page_io.c   	struct page *page = bio->bi_io_vec[0].bv_page;
page               79 mm/page_io.c   		SetPageError(page);
page               80 mm/page_io.c   		ClearPageUptodate(page);
page               86 mm/page_io.c   		SetPageUptodate(page);
page               88 mm/page_io.c   	unlock_page(page);
page              101 mm/page_io.c   	if (remove_exclusive_swap_page(page)) {
page              102 mm/page_io.c   		unlock_page(page);
page              105 mm/page_io.c   	bio = get_swap_bio(GFP_NOIO, page_private(page), page,
page              108 mm/page_io.c   		set_page_dirty(page);
page              109 mm/page_io.c   		unlock_page(page);
page              116 mm/page_io.c   	set_page_writeback(page);
page              117 mm/page_io.c   	unlock_page(page);
page              128 mm/page_io.c   	BUG_ON(!PageLocked(page));
page              129 mm/page_io.c   	BUG_ON(PageUptodate(page));
page              130 mm/page_io.c   	bio = get_swap_bio(GFP_KERNEL, page_private(page), page,
page              133 mm/page_io.c   		unlock_page(page);
page               10 mm/page_isolation.c static inline struct page *
page               40 mm/page_isolation.c 	struct page *page;
page               48 mm/page_isolation.c 		page = __first_valid_page(pfn, pageblock_nr_pages);
page               49 mm/page_isolation.c 		if (page && set_migratetype_isolate(page)) {
page               71 mm/page_isolation.c 	struct page *page;
page               77 mm/page_isolation.c 		page = __first_valid_page(pfn, pageblock_nr_pages);
page               78 mm/page_isolation.c 		if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
page               80 mm/page_isolation.c 		unset_migratetype_isolate(page);
page               94 mm/page_isolation.c 	struct page *page;
page              101 mm/page_isolation.c 		page = pfn_to_page(pfn);
page              102 mm/page_isolation.c 		if (PageBuddy(page))
page              103 mm/page_isolation.c 			pfn += 1 << page_order(page);
page              104 mm/page_isolation.c 		else if (page_count(page) == 0 &&
page              105 mm/page_isolation.c 				page_private(page) == MIGRATE_ISOLATE)
page              118 mm/page_isolation.c 	struct page *page;
page              129 mm/page_isolation.c 		page = __first_valid_page(pfn, pageblock_nr_pages);
page              130 mm/page_isolation.c 		if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
page               45 mm/readahead.c #define list_to_page(head) (list_entry((head)->prev, struct page, lru))
page               60 mm/readahead.c 	struct page *page;
page               64 mm/readahead.c 		page = list_to_page(pages);
page               65 mm/readahead.c 		list_del(&page->lru);
page               66 mm/readahead.c 		if (add_to_page_cache_lru(page, mapping,
page               67 mm/readahead.c 					page->index, GFP_KERNEL)) {
page               68 mm/readahead.c 			page_cache_release(page);
page               71 mm/readahead.c 		page_cache_release(page);
page               73 mm/readahead.c 		ret = filler(data, page);
page               99 mm/readahead.c 		struct page *page = list_to_page(pages);
page              100 mm/readahead.c 		list_del(&page->lru);
page              101 mm/readahead.c 		if (!add_to_page_cache_lru(page, mapping,
page              102 mm/readahead.c 					page->index, GFP_KERNEL)) {
page              103 mm/readahead.c 			mapping->a_ops->readpage(filp, page);
page              105 mm/readahead.c 		page_cache_release(page);
page              129 mm/readahead.c 	struct page *page;
page              151 mm/readahead.c 		page = radix_tree_lookup(&mapping->page_tree, page_offset);
page              153 mm/readahead.c 		if (page)
page              156 mm/readahead.c 		page = page_cache_alloc_cold(mapping);
page              157 mm/readahead.c 		if (!page)
page              159 mm/readahead.c 		page->index = page_offset;
page              160 mm/readahead.c 		list_add(&page->lru, &page_pool);
page              162 mm/readahead.c 			SetPageReadahead(page);
page              469 mm/readahead.c 	if (PageWriteback(page))
page              472 mm/readahead.c 	ClearPageReadahead(page);
page              166 mm/rmap.c      	anon_mapping = (unsigned long) page->mapping;
page              169 mm/rmap.c      	if (!page_mapped(page))
page              194 mm/rmap.c      	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
page              211 mm/rmap.c      	if (PageAnon(page)) {
page              213 mm/rmap.c      		    (void *)page->mapping - PAGE_MAPPING_ANON)
page              215 mm/rmap.c      	} else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
page              217 mm/rmap.c      		    vma->vm_file->f_mapping != page->mapping)
page              221 mm/rmap.c      	return vma_address(page, vma);
page              263 mm/rmap.c      	if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
page              284 mm/rmap.c      	address = vma_address(page, vma);
page              288 mm/rmap.c      	pte = page_check_address(page, mm, address, &ptl, 0);
page              318 mm/rmap.c      	anon_vma = page_lock_anon_vma(page);
page              322 mm/rmap.c      	mapcount = page_mapcount(page);
page              331 mm/rmap.c      		referenced += page_referenced_one(page, vma, &mapcount);
page              356 mm/rmap.c      	struct address_space *mapping = page->mapping;
page              357 mm/rmap.c      	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
page              367 mm/rmap.c      	BUG_ON(PageAnon(page));
page              375 mm/rmap.c      	BUG_ON(!PageLocked(page));
page              383 mm/rmap.c      	mapcount = page_mapcount(page);
page              398 mm/rmap.c      		referenced += page_referenced_one(page, vma, &mapcount);
page              421 mm/rmap.c      	if (TestClearPageReferenced(page))
page              424 mm/rmap.c      	if (page_mapped(page) && page->mapping) {
page              425 mm/rmap.c      		if (PageAnon(page))
page              426 mm/rmap.c      			referenced += page_referenced_anon(page, mem_cont);
page              428 mm/rmap.c      			referenced += page_referenced_file(page, mem_cont);
page              429 mm/rmap.c      		else if (!trylock_page(page))
page              432 mm/rmap.c      			if (page->mapping)
page              434 mm/rmap.c      					page_referenced_file(page, mem_cont);
page              435 mm/rmap.c      			unlock_page(page);
page              439 mm/rmap.c      	if (page_test_and_clear_young(page))
page              453 mm/rmap.c      	address = vma_address(page, vma);
page              457 mm/rmap.c      	pte = page_check_address(page, mm, address, &ptl, 1);
page              479 mm/rmap.c      	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
page              484 mm/rmap.c      	BUG_ON(PageAnon(page));
page              489 mm/rmap.c      			ret += page_mkclean_one(page, vma);
page              499 mm/rmap.c      	BUG_ON(!PageLocked(page));
page              501 mm/rmap.c      	if (page_mapped(page)) {
page              502 mm/rmap.c      		struct address_space *mapping = page_mapping(page);
page              504 mm/rmap.c      			ret = page_mkclean_file(mapping, page);
page              505 mm/rmap.c      			if (page_test_dirty(page)) {
page              506 mm/rmap.c      				page_clear_dirty(page);
page              529 mm/rmap.c      	page->mapping = (struct address_space *) anon_vma;
page              531 mm/rmap.c      	page->index = linear_page_index(vma, address);
page              537 mm/rmap.c      	__inc_zone_page_state(page, NR_ANON_PAGES);
page              564 mm/rmap.c      	BUG_ON(page->mapping != (struct address_space *)anon_vma);
page              565 mm/rmap.c      	BUG_ON(page->index != linear_page_index(vma, address));
page              580 mm/rmap.c      	VM_BUG_ON(!PageLocked(page));
page              582 mm/rmap.c      	if (atomic_inc_and_test(&page->_mapcount))
page              583 mm/rmap.c      		__page_set_anon_rmap(page, vma, address);
page              585 mm/rmap.c      		__page_check_anon_rmap(page, vma, address);
page              602 mm/rmap.c      	atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
page              603 mm/rmap.c      	__page_set_anon_rmap(page, vma, address);
page              614 mm/rmap.c      	if (atomic_inc_and_test(&page->_mapcount))
page              615 mm/rmap.c      		__inc_zone_page_state(page, NR_FILE_MAPPED);
page              633 mm/rmap.c      	BUG_ON(page_mapcount(page) == 0);
page              634 mm/rmap.c      	if (PageAnon(page))
page              635 mm/rmap.c      		__page_check_anon_rmap(page, vma, address);
page              636 mm/rmap.c      	atomic_inc(&page->_mapcount);
page              649 mm/rmap.c      	if (atomic_add_negative(-1, &page->_mapcount)) {
page              650 mm/rmap.c      		if (unlikely(page_mapcount(page) < 0)) {
page              651 mm/rmap.c      			printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page));
page              652 mm/rmap.c      			printk (KERN_EMERG "  page pfn = %lx\n", page_to_pfn(page));
page              653 mm/rmap.c      			printk (KERN_EMERG "  page->flags = %lx\n", page->flags);
page              654 mm/rmap.c      			printk (KERN_EMERG "  page->count = %x\n", page_count(page));
page              655 mm/rmap.c      			printk (KERN_EMERG "  page->mapping = %p\n", page->mapping);
page              672 mm/rmap.c      		if ((!PageAnon(page) || PageSwapCache(page)) &&
page              673 mm/rmap.c      		    page_test_dirty(page)) {
page              674 mm/rmap.c      			page_clear_dirty(page);
page              675 mm/rmap.c      			set_page_dirty(page);
page              678 mm/rmap.c      		mem_cgroup_uncharge_page(page);
page              679 mm/rmap.c      		__dec_zone_page_state(page,
page              680 mm/rmap.c      			PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
page              707 mm/rmap.c      	address = vma_address(page, vma);
page              711 mm/rmap.c      	pte = page_check_address(page, mm, address, &ptl, 0);
page              727 mm/rmap.c      	flush_cache_page(vma, address, page_to_pfn(page));
page              732 mm/rmap.c      		set_page_dirty(page);
page              737 mm/rmap.c      	if (PageAnon(page)) {
page              738 mm/rmap.c      		swp_entry_t entry = { .val = page_private(page) };
page              740 mm/rmap.c      		if (PageSwapCache(page)) {
page              761 mm/rmap.c      			entry = make_migration_entry(page, pte_write(pteval));
page              771 mm/rmap.c      		entry = make_migration_entry(page, pte_write(pteval));
page              778 mm/rmap.c      	page_remove_rmap(page, vma);
page              779 mm/rmap.c      	page_cache_release(page);
page              819 mm/rmap.c      	struct page *page;
page              850 mm/rmap.c      		page = vm_normal_page(vma, address, *pte);
page              851 mm/rmap.c      		BUG_ON(!page || PageAnon(page));
page              861 mm/rmap.c      		if (page->index != linear_page_index(vma, address))
page              862 mm/rmap.c      			set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
page              866 mm/rmap.c      			set_page_dirty(page);
page              868 mm/rmap.c      		page_remove_rmap(page, vma);
page              869 mm/rmap.c      		page_cache_release(page);
page              882 mm/rmap.c      	anon_vma = page_lock_anon_vma(page);
page              887 mm/rmap.c      		ret = try_to_unmap_one(page, vma, migration);
page              888 mm/rmap.c      		if (ret == SWAP_FAIL || !page_mapped(page))
page              908 mm/rmap.c      	struct address_space *mapping = page->mapping;
page              909 mm/rmap.c      	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
page              920 mm/rmap.c      		ret = try_to_unmap_one(page, vma, migration);
page              921 mm/rmap.c      		if (ret == SWAP_FAIL || !page_mapped(page))
page              952 mm/rmap.c      	mapcount = page_mapcount(page);
page             1010 mm/rmap.c      	BUG_ON(!PageLocked(page));
page             1012 mm/rmap.c      	if (PageAnon(page))
page             1013 mm/rmap.c      		ret = try_to_unmap_anon(page, migration);
page             1015 mm/rmap.c      		ret = try_to_unmap_file(page, migration);
page             1017 mm/rmap.c      	if (!page_mapped(page))
page              101 mm/shmem.c     static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
page              116 mm/shmem.c     	__free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
page              119 mm/shmem.c     static struct page **shmem_dir_map(struct page *page)
page              121 mm/shmem.c     	return (struct page **)kmap_atomic(page, KM_USER0);
page              131 mm/shmem.c     	return (swp_entry_t *)kmap_atomic(page, KM_USER1);
page              310 mm/shmem.c     	struct page **dir;
page              311 mm/shmem.c     	struct page *subdir;
page              318 mm/shmem.c     		if (page) {
page              319 mm/shmem.c     			info->i_indirect = *page;
page              320 mm/shmem.c     			*page = NULL;
page              336 mm/shmem.c     			if (page) {
page              337 mm/shmem.c     				*dir = *page;
page              338 mm/shmem.c     				*page = NULL;
page              350 mm/shmem.c     		if (!page || !(subdir = *page)) {
page              355 mm/shmem.c     		*page = NULL;
page              368 mm/shmem.c     		struct page *page = kmap_atomic_to_page(entry);
page              369 mm/shmem.c     		set_page_private(page, page_private(page) + incdec);
page              385 mm/shmem.c     	struct page *page = NULL;
page              392 mm/shmem.c     	while (!(entry = shmem_swp_entry(info, index, &page))) {
page              412 mm/shmem.c     		page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping));
page              413 mm/shmem.c     		if (page)
page              414 mm/shmem.c     			set_page_private(page, 0);
page              417 mm/shmem.c     		if (!page) {
page              429 mm/shmem.c     	if (page) {
page              432 mm/shmem.c     		shmem_dir_free(page);
page              500 mm/shmem.c     	struct page *page;
page              504 mm/shmem.c     		page = container_of(next, struct page, lru);
page              506 mm/shmem.c     		shmem_dir_free(page);
page              523 mm/shmem.c     	struct page **dir;
page              524 mm/shmem.c     	struct page *topdir;
page              525 mm/shmem.c     	struct page *middir;
page              526 mm/shmem.c     	struct page *subdir;
page              750 mm/shmem.c     	struct page *page = NULL;
page              765 mm/shmem.c     						&page, SGP_READ, NULL);
page              766 mm/shmem.c     				if (page)
page              767 mm/shmem.c     					unlock_page(page);
page              792 mm/shmem.c     	if (page)
page              793 mm/shmem.c     		page_cache_release(page);
page              835 mm/shmem.c     	struct page **dir;
page              836 mm/shmem.c     	struct page *subdir;
page              924 mm/shmem.c     	error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
page              929 mm/shmem.c     		mem_cgroup_uncharge_cache_page(page);
page              937 mm/shmem.c     		error = add_to_page_cache_locked(page, inode->i_mapping,
page              941 mm/shmem.c     		mem_cgroup_uncharge_cache_page(page);
page              944 mm/shmem.c     		struct page *filepage = find_get_page(inode->i_mapping, idx);
page              957 mm/shmem.c     		delete_from_swap_cache(page);
page              958 mm/shmem.c     		set_page_dirty(page);
page              969 mm/shmem.c     	unlock_page(page);
page              970 mm/shmem.c     	page_cache_release(page);
page              987 mm/shmem.c     		found = shmem_unuse_inode(info, entry, page);
page             1007 mm/shmem.c     	BUG_ON(!PageLocked(page));
page             1008 mm/shmem.c     	mapping = page->mapping;
page             1009 mm/shmem.c     	index = page->index;
page             1046 mm/shmem.c     	if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
page             1047 mm/shmem.c     		remove_from_page_cache(page);
page             1056 mm/shmem.c     		BUG_ON(page_mapped(page));
page             1057 mm/shmem.c     		page_cache_release(page);	/* pagecache ref */
page             1058 mm/shmem.c     		set_page_dirty(page);
page             1059 mm/shmem.c     		unlock_page(page);
page             1075 mm/shmem.c     	set_page_dirty(page);
page             1078 mm/shmem.c     	unlock_page(page);
page             1109 mm/shmem.c     static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
page             1114 mm/shmem.c     	struct page *page;
page             1124 mm/shmem.c     	page = swapin_readahead(entry, gfp, &pvma, 0);
page             1125 mm/shmem.c     	return page;
page             1128 mm/shmem.c     static struct page *shmem_alloc_page(gfp_t gfp,
page             1151 mm/shmem.c     static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
page             1157 mm/shmem.c     static inline struct page *shmem_alloc_page(gfp_t gfp,
page             1184 mm/shmem.c     	struct page *filepage = *pagep;
page             1185 mm/shmem.c     	struct page *swappage;
page             1442 mm/shmem.c     	error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
page             1446 mm/shmem.c     	mark_page_accessed(vmf->page);
page             1565 mm/shmem.c     	struct inode *inode = page->mapping->host;
page             1566 mm/shmem.c     	int error = shmem_getpage(inode, page->index, &page, SGP_CACHE, NULL);
page             1567 mm/shmem.c     	unlock_page(page);
page             1592 mm/shmem.c     	unlock_page(page);
page             1593 mm/shmem.c     	set_page_dirty(page);
page             1594 mm/shmem.c     	page_cache_release(page);
page             1618 mm/shmem.c     		struct page *page = NULL;
page             1631 mm/shmem.c     		desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
page             1637 mm/shmem.c     		if (page)
page             1638 mm/shmem.c     			unlock_page(page);
page             1650 mm/shmem.c     				if (page)
page             1651 mm/shmem.c     					page_cache_release(page);
page             1657 mm/shmem.c     		if (page) {
page             1664 mm/shmem.c     				flush_dcache_page(page);
page             1669 mm/shmem.c     				mark_page_accessed(page);
page             1671 mm/shmem.c     			page = ZERO_PAGE(0);
page             1672 mm/shmem.c     			page_cache_get(page);
page             1685 mm/shmem.c     		ret = actor(desc, page, offset, nr);
page             1690 mm/shmem.c     		page_cache_release(page);
page             1895 mm/shmem.c     	struct page *page = NULL;
page             1924 mm/shmem.c     		error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
page             1929 mm/shmem.c     		unlock_page(page);
page             1932 mm/shmem.c     		kaddr = kmap_atomic(page, KM_USER0);
page             1935 mm/shmem.c     		set_page_dirty(page);
page             1936 mm/shmem.c     		page_cache_release(page);
page             1955 mm/shmem.c     	struct page *page = NULL;
page             1956 mm/shmem.c     	int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
page             1957 mm/shmem.c     	nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
page             1958 mm/shmem.c     	if (page)
page             1959 mm/shmem.c     		unlock_page(page);
page             1960 mm/shmem.c     	return page;
page             1966 mm/shmem.c     		struct page *page = cookie;
page             1967 mm/shmem.c     		kunmap(page);
page             1968 mm/shmem.c     		mark_page_accessed(page);
page             1969 mm/shmem.c     		page_cache_release(page);
page              584 mm/slab.c      	page->lru.next = (struct list_head *)cache;
page              589 mm/slab.c      	page = compound_head(page);
page              590 mm/slab.c      	BUG_ON(!PageSlab(page));
page              591 mm/slab.c      	return (struct kmem_cache *)page->lru.next;
page              596 mm/slab.c      	page->lru.prev = (struct list_head *)slab;
page              601 mm/slab.c      	BUG_ON(!PageSlab(page));
page              602 mm/slab.c      	return (struct slab *)page->lru.prev;
page              607 mm/slab.c      	struct page *page = virt_to_head_page(obj);
page              608 mm/slab.c      	return page_get_cache(page);
page              613 mm/slab.c      	struct page *page = virt_to_head_page(obj);
page              614 mm/slab.c      	return page_get_slab(page);
page             1666 mm/slab.c      	struct page *page;
page             1682 mm/slab.c      	page = alloc_pages_node(nodeid, flags, cachep->gfporder);
page             1683 mm/slab.c      	if (!page)
page             1688 mm/slab.c      		add_zone_page_state(page_zone(page),
page             1691 mm/slab.c      		add_zone_page_state(page_zone(page),
page             1694 mm/slab.c      		__SetPageSlab(page + i);
page             1695 mm/slab.c      	return page_address(page);
page             1704 mm/slab.c      	struct page *page = virt_to_page(addr);
page             1708 mm/slab.c      		sub_zone_page_state(page_zone(page),
page             1711 mm/slab.c      		sub_zone_page_state(page_zone(page),
page             1714 mm/slab.c      		BUG_ON(!PageSlab(page));
page             1715 mm/slab.c      		__ClearPageSlab(page);
page             1716 mm/slab.c      		page++;
page             2734 mm/slab.c      	struct page *page;
page             2736 mm/slab.c      	page = virt_to_page(addr);
page             2739 mm/slab.c      	if (likely(!PageCompound(page)))
page             2743 mm/slab.c      		page_set_cache(page, cache);
page             2744 mm/slab.c      		page_set_slab(page, slab);
page             2745 mm/slab.c      		page++;
page             2872 mm/slab.c      	struct page *page;
page             2880 mm/slab.c      	page = virt_to_head_page(objp);
page             2882 mm/slab.c      	slabp = page_get_slab(page);
page             3638 mm/slab.c      	struct page *page;
page             3650 mm/slab.c      	page = virt_to_page(ptr);
page             3651 mm/slab.c      	if (unlikely(!PageSlab(page)))
page             3653 mm/slab.c      	if (unlikely(page_get_cache(page) != cachep))
page              104 mm/slob.c      		struct page page;
page              108 mm/slob.c      { BUILD_BUG_ON(sizeof(struct slob_page) != sizeof(struct page)); }
page              115 mm/slob.c      	reset_page_mapcount(&sp->page);
page              116 mm/slob.c      	sp->page.mapping = NULL;
page              133 mm/slob.c      	return PageSlobPage((struct page *)sp);
page              138 mm/slob.c      	__SetPageSlobPage((struct page *)sp);
page              143 mm/slob.c      	__ClearPageSlobPage((struct page *)sp);
page              151 mm/slob.c      	return PageSlobFree((struct page *)sp);
page              157 mm/slob.c      	__SetPageSlobFree((struct page *)sp);
page              163 mm/slob.c      	__ClearPageSlobFree((struct page *)sp);
page              235 mm/slob.c      	void *page;
page              239 mm/slob.c      		page = alloc_pages_node(node, gfp, order);
page              242 mm/slob.c      		page = alloc_pages(gfp, order);
page              244 mm/slob.c      	if (!page)
page              247 mm/slob.c      	return page_address(page);
page              327 mm/slob.c      		if (node != -1 && page_to_nid(&sp->page) != node)
page              481 mm/slob.c      			struct page *page;
page              482 mm/slob.c      			page = virt_to_page(ret);
page              483 mm/slob.c      			page->private = size;
page              503 mm/slob.c      		put_page(&sp->page);
page              522 mm/slob.c      		return sp->page.private;
page              247 mm/slub.c      	base = page_address(page);
page              248 mm/slub.c      	if (object < base || object >= base + page->objects * s->size ||
page              418 mm/slub.c      		page, page->objects, page->inuse, page->freelist, page->flags);
page              451 mm/slub.c      	u8 *addr = page_address(page);
page              455 mm/slub.c      	print_page_info(page);
page              488 mm/slub.c      	print_trailer(s, page, object);
page              500 mm/slub.c      	print_page_info(page);
page              555 mm/slub.c      	print_trailer(s, page, object);
page              614 mm/slub.c      	return check_bytes_and_report(s, page, p, "Object padding",
page              630 mm/slub.c      	start = page_address(page);
page              631 mm/slub.c      	length = (PAGE_SIZE << compound_order(page));
page              643 mm/slub.c      	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
page              660 mm/slub.c      		if (!check_bytes_and_report(s, page, object, "Redzone",
page              665 mm/slub.c      			check_bytes_and_report(s, page, p, "Alignment padding",
page              672 mm/slub.c      			(!check_bytes_and_report(s, page, p, "Poison", p,
page              674 mm/slub.c      			 !check_bytes_and_report(s, page, p, "Poison",
page              680 mm/slub.c      		check_pad_bytes(s, page, p);
page              691 mm/slub.c      	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
page              692 mm/slub.c      		object_err(s, page, p, "Freepointer corrupt");
page              710 mm/slub.c      	if (!PageSlab(page)) {
page              711 mm/slub.c      		slab_err(s, page, "Not a valid slab page");
page              715 mm/slub.c      	maxobj = (PAGE_SIZE << compound_order(page)) / s->size;
page              716 mm/slub.c      	if (page->objects > maxobj) {
page              717 mm/slub.c      		slab_err(s, page, "objects %u > max %u",
page              718 mm/slub.c      			s->name, page->objects, maxobj);
page              721 mm/slub.c      	if (page->inuse > page->objects) {
page              722 mm/slub.c      		slab_err(s, page, "inuse %u > max %u",
page              723 mm/slub.c      			s->name, page->inuse, page->objects);
page              727 mm/slub.c      	slab_pad_check(s, page);
page              738 mm/slub.c      	void *fp = page->freelist;
page              742 mm/slub.c      	while (fp && nr <= page->objects) {
page              745 mm/slub.c      		if (!check_valid_pointer(s, page, fp)) {
page              747 mm/slub.c      				object_err(s, page, object,
page              752 mm/slub.c      				slab_err(s, page, "Freepointer corrupt");
page              753 mm/slub.c      				page->freelist = NULL;
page              754 mm/slub.c      				page->inuse = page->objects;
page              765 mm/slub.c      	max_objects = (PAGE_SIZE << compound_order(page)) / s->size;
page              769 mm/slub.c      	if (page->objects != max_objects) {
page              770 mm/slub.c      		slab_err(s, page, "Wrong number of objects. Found %d but "
page              771 mm/slub.c      			"should be %d", page->objects, max_objects);
page              772 mm/slub.c      		page->objects = max_objects;
page              775 mm/slub.c      	if (page->inuse != page->objects - nr) {
page              776 mm/slub.c      		slab_err(s, page, "Wrong object count. Counter is %d but "
page              777 mm/slub.c      			"counted were %d", page->inuse, page->objects - nr);
page              778 mm/slub.c      		page->inuse = page->objects - nr;
page              791 mm/slub.c      			object, page->inuse,
page              792 mm/slub.c      			page->freelist);
page              807 mm/slub.c      	list_add(&page->lru, &n->full);
page              818 mm/slub.c      	n = get_node(s, page_to_nid(page));
page              821 mm/slub.c      	list_del(&page->lru);
page              870 mm/slub.c      	if (!check_slab(s, page))
page              873 mm/slub.c      	if (!on_freelist(s, page, object)) {
page              874 mm/slub.c      		object_err(s, page, object, "Object already allocated");
page              878 mm/slub.c      	if (!check_valid_pointer(s, page, object)) {
page              879 mm/slub.c      		object_err(s, page, object, "Freelist Pointer check fails");
page              883 mm/slub.c      	if (!check_object(s, page, object, 0))
page              889 mm/slub.c      	trace(s, page, object, 1);
page              894 mm/slub.c      	if (PageSlab(page)) {
page              901 mm/slub.c      		page->inuse = page->objects;
page              902 mm/slub.c      		page->freelist = NULL;
page              910 mm/slub.c      	if (!check_slab(s, page))
page              913 mm/slub.c      	if (!check_valid_pointer(s, page, object)) {
page              914 mm/slub.c      		slab_err(s, page, "Invalid object pointer 0x%p", object);
page              918 mm/slub.c      	if (on_freelist(s, page, object)) {
page              919 mm/slub.c      		object_err(s, page, object, "Object already free");
page              923 mm/slub.c      	if (!check_object(s, page, object, 1))
page              926 mm/slub.c      	if (unlikely(s != page->slab)) {
page              927 mm/slub.c      		if (!PageSlab(page)) {
page              928 mm/slub.c      			slab_err(s, page, "Attempt to free object(0x%p) "
page              930 mm/slub.c      		} else if (!page->slab) {
page              936 mm/slub.c      			object_err(s, page, object,
page              942 mm/slub.c      	if (!PageSlubFrozen(page) && !page->freelist)
page              943 mm/slub.c      		remove_full(s, page);
page              946 mm/slub.c      	trace(s, page, object, 0);
page             1060 mm/slub.c      static inline struct page *alloc_slab_page(gfp_t flags, int node,
page             1071 mm/slub.c      static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
page             1073 mm/slub.c      	struct page *page;
page             1078 mm/slub.c      	page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node,
page             1080 mm/slub.c      	if (unlikely(!page)) {
page             1086 mm/slub.c      		page = alloc_slab_page(flags, node, oo);
page             1087 mm/slub.c      		if (!page)
page             1092 mm/slub.c      	page->objects = oo_objects(oo);
page             1093 mm/slub.c      	mod_zone_page_state(page_zone(page),
page             1098 mm/slub.c      	return page;
page             1104 mm/slub.c      	setup_object_debug(s, page, object);
page             1109 mm/slub.c      static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
page             1111 mm/slub.c      	struct page *page;
page             1118 mm/slub.c      	page = allocate_slab(s,
page             1120 mm/slub.c      	if (!page)
page             1123 mm/slub.c      	inc_slabs_node(s, page_to_nid(page), page->objects);
page             1124 mm/slub.c      	page->slab = s;
page             1125 mm/slub.c      	page->flags |= 1 << PG_slab;
page             1128 mm/slub.c      		__SetPageSlubDebug(page);
page             1130 mm/slub.c      	start = page_address(page);
page             1133 mm/slub.c      		memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page));
page             1136 mm/slub.c      	for_each_object(p, s, start, page->objects) {
page             1137 mm/slub.c      		setup_object(s, page, last);
page             1141 mm/slub.c      	setup_object(s, page, last);
page             1144 mm/slub.c      	page->freelist = start;
page             1145 mm/slub.c      	page->inuse = 0;
page             1147 mm/slub.c      	return page;
page             1152 mm/slub.c      	int order = compound_order(page);
page             1155 mm/slub.c      	if (unlikely(SLABDEBUG && PageSlubDebug(page))) {
page             1158 mm/slub.c      		slab_pad_check(s, page);
page             1159 mm/slub.c      		for_each_object(p, s, page_address(page),
page             1160 mm/slub.c      						page->objects)
page             1161 mm/slub.c      			check_object(s, page, p, 0);
page             1162 mm/slub.c      		__ClearPageSlubDebug(page);
page             1165 mm/slub.c      	mod_zone_page_state(page_zone(page),
page             1170 mm/slub.c      	__ClearPageSlab(page);
page             1171 mm/slub.c      	reset_page_mapcount(page);
page             1172 mm/slub.c      	__free_pages(page, order);
page             1177 mm/slub.c      	struct page *page;
page             1179 mm/slub.c      	page = container_of((struct list_head *)h, struct page, lru);
page             1180 mm/slub.c      	__free_slab(page->slab, page);
page             1189 mm/slub.c      		struct rcu_head *head = (void *)&page->lru;
page             1193 mm/slub.c      		__free_slab(s, page);
page             1198 mm/slub.c      	dec_slabs_node(s, page_to_nid(page), page->objects);
page             1199 mm/slub.c      	free_slab(s, page);
page             1207 mm/slub.c      	bit_spin_lock(PG_locked, &page->flags);
page             1212 mm/slub.c      	__bit_spin_unlock(PG_locked, &page->flags);
page             1219 mm/slub.c      	rc = bit_spin_trylock(PG_locked, &page->flags);
page             1232 mm/slub.c      		list_add_tail(&page->lru, &n->partial);
page             1234 mm/slub.c      		list_add(&page->lru, &n->partial);
page             1240 mm/slub.c      	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
page             1243 mm/slub.c      	list_del(&page->lru);
page             1256 mm/slub.c      	if (slab_trylock(page)) {
page             1257 mm/slub.c      		list_del(&page->lru);
page             1259 mm/slub.c      		__SetPageSlubFrozen(page);
page             1268 mm/slub.c      static struct page *get_partial_node(struct kmem_cache_node *n)
page             1270 mm/slub.c      	struct page *page;
page             1282 mm/slub.c      	list_for_each_entry(page, &n->partial, lru)
page             1283 mm/slub.c      		if (lock_and_freeze_slab(n, page))
page             1285 mm/slub.c      	page = NULL;
page             1288 mm/slub.c      	return page;
page             1294 mm/slub.c      static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
page             1301 mm/slub.c      	struct page *page;
page             1333 mm/slub.c      			page = get_partial_node(n);
page             1334 mm/slub.c      			if (page)
page             1335 mm/slub.c      				return page;
page             1345 mm/slub.c      static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
page             1347 mm/slub.c      	struct page *page;
page             1350 mm/slub.c      	page = get_partial_node(get_node(s, searchnode));
page             1351 mm/slub.c      	if (page || (flags & __GFP_THISNODE))
page             1352 mm/slub.c      		return page;
page             1366 mm/slub.c      	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
page             1369 mm/slub.c      	__ClearPageSlubFrozen(page);
page             1370 mm/slub.c      	if (page->inuse) {
page             1372 mm/slub.c      		if (page->freelist) {
page             1373 mm/slub.c      			add_partial(n, page, tail);
page             1377 mm/slub.c      			if (SLABDEBUG && PageSlubDebug(page) &&
page             1379 mm/slub.c      				add_full(n, page);
page             1381 mm/slub.c      		slab_unlock(page);
page             1395 mm/slub.c      			add_partial(n, page, 1);
page             1396 mm/slub.c      			slab_unlock(page);
page             1398 mm/slub.c      			slab_unlock(page);
page             1400 mm/slub.c      			discard_slab(s, page);
page             1410 mm/slub.c      	struct page *page = c->page;
page             1413 mm/slub.c      	if (page->freelist)
page             1430 mm/slub.c      		object[c->offset] = page->freelist;
page             1431 mm/slub.c      		page->freelist = object;
page             1432 mm/slub.c      		page->inuse--;
page             1434 mm/slub.c      	c->page = NULL;
page             1435 mm/slub.c      	unfreeze_slab(s, page, tail);
page             1441 mm/slub.c      	slab_lock(c->page);
page             1454 mm/slub.c      	if (likely(c && c->page))
page             1505 mm/slub.c      	struct page *new;
page             1510 mm/slub.c      	if (!c->page)
page             1513 mm/slub.c      	slab_lock(c->page);
page             1520 mm/slub.c      	object = c->page->freelist;
page             1523 mm/slub.c      	if (unlikely(SLABDEBUG && PageSlubDebug(c->page)))
page             1527 mm/slub.c      	c->page->inuse = c->page->objects;
page             1528 mm/slub.c      	c->page->freelist = NULL;
page             1529 mm/slub.c      	c->node = page_to_nid(c->page);
page             1531 mm/slub.c      	slab_unlock(c->page);
page             1541 mm/slub.c      		c->page = new;
page             1557 mm/slub.c      		if (c->page)
page             1561 mm/slub.c      		c->page = new;
page             1566 mm/slub.c      	if (!alloc_debug_processing(s, c->page, object, addr))
page             1569 mm/slub.c      	c->page->inuse++;
page             1570 mm/slub.c      	c->page->freelist = object[c->offset];
page             1644 mm/slub.c      	slab_lock(page);
page             1646 mm/slub.c      	if (unlikely(SLABDEBUG && PageSlubDebug(page)))
page             1650 mm/slub.c      	prior = object[offset] = page->freelist;
page             1651 mm/slub.c      	page->freelist = object;
page             1652 mm/slub.c      	page->inuse--;
page             1654 mm/slub.c      	if (unlikely(PageSlubFrozen(page))) {
page             1659 mm/slub.c      	if (unlikely(!page->inuse))
page             1667 mm/slub.c      		add_partial(get_node(s, page_to_nid(page)), page, 1);
page             1672 mm/slub.c      	slab_unlock(page);
page             1680 mm/slub.c      		remove_partial(s, page);
page             1683 mm/slub.c      	slab_unlock(page);
page             1685 mm/slub.c      	discard_slab(s, page);
page             1689 mm/slub.c      	if (!free_debug_processing(s, page, x, addr))
page             1717 mm/slub.c      	if (likely(page == c->page && c->node >= 0)) {
page             1722 mm/slub.c      		__slab_free(s, page, x, addr, c->offset);
page             1729 mm/slub.c      	struct page *page;
page             1731 mm/slub.c      	page = virt_to_head_page(x);
page             1733 mm/slub.c      	slab_free(s, page, x, __builtin_return_address(0));
page             1738 mm/slub.c      static struct page *get_object_page(const void *x)
page             1740 mm/slub.c      	struct page *page = virt_to_head_page(x);
page             1742 mm/slub.c      	if (!PageSlab(page))
page             1745 mm/slub.c      	return page;
page             1906 mm/slub.c      	c->page = NULL;
page             2078 mm/slub.c      	struct page *page;
page             2084 mm/slub.c      	page = new_slab(kmalloc_caches, gfpflags, node);
page             2086 mm/slub.c      	BUG_ON(!page);
page             2087 mm/slub.c      	if (page_to_nid(page) != node) {
page             2094 mm/slub.c      	n = page->freelist;
page             2096 mm/slub.c      	page->freelist = get_freepointer(kmalloc_caches, n);
page             2097 mm/slub.c      	page->inuse++;
page             2104 mm/slub.c      	inc_slabs_node(kmalloc_caches, node, page->objects);
page             2112 mm/slub.c      	add_partial(n, page, 0);
page             2338 mm/slub.c      	struct page *page;
page             2340 mm/slub.c      	page = get_object_page(object);
page             2342 mm/slub.c      	if (!page || s != page->slab)
page             2346 mm/slub.c      	if (!check_valid_pointer(s, page, object))
page             2378 mm/slub.c      	void *addr = page_address(page);
page             2380 mm/slub.c      	DECLARE_BITMAP(map, page->objects);
page             2382 mm/slub.c      	bitmap_zero(map, page->objects);
page             2383 mm/slub.c      	slab_err(s, page, "%s", text);
page             2384 mm/slub.c      	slab_lock(page);
page             2385 mm/slub.c      	for_each_free_object(p, s, page->freelist)
page             2388 mm/slub.c      	for_each_object(p, s, addr, page->objects) {
page             2396 mm/slub.c      	slab_unlock(page);
page             2406 mm/slub.c      	struct page *page, *h;
page             2409 mm/slub.c      	list_for_each_entry_safe(page, h, &n->partial, lru) {
page             2410 mm/slub.c      		if (!page->inuse) {
page             2411 mm/slub.c      			list_del(&page->lru);
page             2412 mm/slub.c      			discard_slab(s, page);
page             2415 mm/slub.c      			list_slab_objects(s, page,
page             2667 mm/slub.c      	struct page *page = alloc_pages_node(node, flags | __GFP_COMP,
page             2670 mm/slub.c      	if (page)
page             2671 mm/slub.c      		return page_address(page);
page             2696 mm/slub.c      	struct page *page;
page             2702 mm/slub.c      	page = virt_to_head_page(object);
page             2704 mm/slub.c      	if (unlikely(!PageSlab(page))) {
page             2705 mm/slub.c      		WARN_ON(!PageCompound(page));
page             2706 mm/slub.c      		return PAGE_SIZE << compound_order(page);
page             2708 mm/slub.c      	s = page->slab;
page             2734 mm/slub.c      	struct page *page;
page             2740 mm/slub.c      	page = virt_to_head_page(x);
page             2741 mm/slub.c      	if (unlikely(!PageSlab(page))) {
page             2742 mm/slub.c      		BUG_ON(!PageCompound(page));
page             2743 mm/slub.c      		put_page(page);
page             2746 mm/slub.c      	slab_free(page->slab, page, object, __builtin_return_address(0));
page             2765 mm/slub.c      	struct page *page;
page             2766 mm/slub.c      	struct page *t;
page             2793 mm/slub.c      		list_for_each_entry_safe(page, t, &n->partial, lru) {
page             2794 mm/slub.c      			if (!page->inuse && slab_trylock(page)) {
page             2800 mm/slub.c      				list_del(&page->lru);
page             2802 mm/slub.c      				slab_unlock(page);
page             2803 mm/slub.c      				discard_slab(s, page);
page             2805 mm/slub.c      				list_move(&page->lru,
page             2806 mm/slub.c      				slabs_by_inuse + page->inuse);
page             3239 mm/slub.c      	struct page *page;
page             3242 mm/slub.c      	list_for_each_entry(page, &n->partial, lru)
page             3243 mm/slub.c      		x += get_count(page);
page             3250 mm/slub.c      	return page->inuse;
page             3255 mm/slub.c      	return page->objects;
page             3260 mm/slub.c      	return page->objects - page->inuse;
page             3267 mm/slub.c      	void *addr = page_address(page);
page             3269 mm/slub.c      	if (!check_slab(s, page) ||
page             3270 mm/slub.c      			!on_freelist(s, page, NULL))
page             3274 mm/slub.c      	bitmap_zero(map, page->objects);
page             3276 mm/slub.c      	for_each_free_object(p, s, page->freelist) {
page             3278 mm/slub.c      		if (!check_object(s, page, p, 0))
page             3282 mm/slub.c      	for_each_object(p, s, addr, page->objects)
page             3284 mm/slub.c      			if (!check_object(s, page, p, 1))
page             3292 mm/slub.c      	if (slab_trylock(page)) {
page             3293 mm/slub.c      		validate_slab(s, page, map);
page             3294 mm/slub.c      		slab_unlock(page);
page             3297 mm/slub.c      			s->name, page);
page             3300 mm/slub.c      		if (!PageSlubDebug(page))
page             3302 mm/slub.c      				"on slab 0x%p\n", s->name, page);
page             3304 mm/slub.c      		if (PageSlubDebug(page))
page             3306 mm/slub.c      				"slab 0x%p\n", s->name, page);
page             3314 mm/slub.c      	struct page *page;
page             3319 mm/slub.c      	list_for_each_entry(page, &n->partial, lru) {
page             3320 mm/slub.c      		validate_slab_slab(s, page, map);
page             3330 mm/slub.c      	list_for_each_entry(page, &n->full, lru) {
page             3331 mm/slub.c      		validate_slab_slab(s, page, map);
page             3550 mm/slub.c      	void *addr = page_address(page);
page             3551 mm/slub.c      	DECLARE_BITMAP(map, page->objects);
page             3554 mm/slub.c      	bitmap_zero(map, page->objects);
page             3555 mm/slub.c      	for_each_free_object(p, s, page->freelist)
page             3558 mm/slub.c      	for_each_object(p, s, addr, page->objects)
page             3581 mm/slub.c      		struct page *page;
page             3587 mm/slub.c      		list_for_each_entry(page, &n->partial, lru)
page             3588 mm/slub.c      			process_slab(&t, s, page, alloc);
page             3589 mm/slub.c      		list_for_each_entry(page, &n->full, lru)
page             3590 mm/slub.c      			process_slab(&t, s, page, alloc);
page             3682 mm/slub.c      			if (c->page) {
page             3684 mm/slub.c      						x = c->page->objects;
page             3686 mm/slub.c      					x = c->page->inuse;
page               51 mm/sparse-vmemmap.c 		struct page *page = alloc_pages_node(node,
page               53 mm/sparse-vmemmap.c 		if (page)
page               54 mm/sparse-vmemmap.c 			return page_address(page);
page              151 mm/sparse-vmemmap.c struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid)
page              153 mm/sparse-vmemmap.c 	struct page *map = pfn_to_page(pnum * PAGES_PER_SECTION);
page               44 mm/sparse.c    	return section_to_node_table[page_to_section(page)];
page              218 mm/sparse.c    	return nr_pages * sizeof(struct page);
page              234 mm/sparse.c    struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
page              238 mm/sparse.c    	return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
page              365 mm/sparse.c    struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
page              367 mm/sparse.c    	struct page *map;
page              369 mm/sparse.c    	map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
page              374 mm/sparse.c    		       PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION));
page              379 mm/sparse.c    static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
page              381 mm/sparse.c    	struct page *map;
page              405 mm/sparse.c    	struct page *map;
page              455 mm/sparse.c    static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
page              469 mm/sparse.c    static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
page              471 mm/sparse.c    	struct page *page, *ret;
page              472 mm/sparse.c    	unsigned long memmap_size = sizeof(struct page) * nr_pages;
page              474 mm/sparse.c    	page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
page              475 mm/sparse.c    	if (page)
page              484 mm/sparse.c    	ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
page              491 mm/sparse.c    static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
page              503 mm/sparse.c    			   get_order(sizeof(struct page) * nr_pages));
page              511 mm/sparse.c    	for (i = 0; i < nr_pages; i++, page++) {
page              512 mm/sparse.c    		magic = atomic_read(&page->_mapcount);
page              516 mm/sparse.c    		maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
page              517 mm/sparse.c    		removing_section_nr = page->private;
page              528 mm/sparse.c    			put_page_bootmem(page);
page              535 mm/sparse.c    	struct page *usemap_page;
page              558 mm/sparse.c    		struct page *memmap_page;
page              561 mm/sparse.c    		nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
page              579 mm/sparse.c    	struct page *memmap;
page              623 mm/sparse.c    	struct page *memmap = NULL;
page               47 mm/swap.c      	if (PageLRU(page)) {
page               49 mm/swap.c      		struct zone *zone = page_zone(page);
page               52 mm/swap.c      		VM_BUG_ON(!PageLRU(page));
page               53 mm/swap.c      		__ClearPageLRU(page);
page               54 mm/swap.c      		del_page_from_lru(zone, page);
page               57 mm/swap.c      	free_hot_page(page);
page               62 mm/swap.c      	page = compound_head(page);
page               63 mm/swap.c      	if (put_page_testzero(page)) {
page               66 mm/swap.c      		dtor = get_compound_page_dtor(page);
page               67 mm/swap.c      		(*dtor)(page);
page               73 mm/swap.c      	if (unlikely(PageCompound(page)))
page               74 mm/swap.c      		put_compound_page(page);
page               75 mm/swap.c      	else if (put_page_testzero(page))
page               76 mm/swap.c      		__page_cache_release(page);
page               90 mm/swap.c      		struct page *victim;
page               92 mm/swap.c      		victim = list_entry(pages->prev, struct page, lru);
page              110 mm/swap.c      		struct page *page = pvec->pages[i];
page              111 mm/swap.c      		struct zone *pagezone = page_zone(page);
page              119 mm/swap.c      		if (PageLRU(page) && !PageActive(page)) {
page              120 mm/swap.c      			list_move_tail(&page->lru, &zone->inactive_list);
page              138 mm/swap.c      	if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
page              139 mm/swap.c      	    PageLRU(page)) {
page              143 mm/swap.c      		page_cache_get(page);
page              146 mm/swap.c      		if (!pagevec_add(pvec, page))
page              157 mm/swap.c      	struct zone *zone = page_zone(page);
page              160 mm/swap.c      	if (PageLRU(page) && !PageActive(page)) {
page              161 mm/swap.c      		del_page_from_inactive_list(zone, page);
page              162 mm/swap.c      		SetPageActive(page);
page              163 mm/swap.c      		add_page_to_active_list(zone, page);
page              165 mm/swap.c      		mem_cgroup_move_lists(page, true);
page              179 mm/swap.c      	if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) {
page              180 mm/swap.c      		activate_page(page);
page              181 mm/swap.c      		ClearPageReferenced(page);
page              182 mm/swap.c      	} else if (!PageReferenced(page)) {
page              183 mm/swap.c      		SetPageReferenced(page);
page              197 mm/swap.c      	page_cache_get(page);
page              198 mm/swap.c      	if (!pagevec_add(pvec, page))
page              207 mm/swap.c      	page_cache_get(page);
page              208 mm/swap.c      	if (!pagevec_add(pvec, page))
page              295 mm/swap.c      		struct page *page = pages[i];
page              297 mm/swap.c      		if (unlikely(PageCompound(page))) {
page              302 mm/swap.c      			put_compound_page(page);
page              306 mm/swap.c      		if (!put_page_testzero(page))
page              309 mm/swap.c      		if (PageLRU(page)) {
page              310 mm/swap.c      			struct zone *pagezone = page_zone(page);
page              318 mm/swap.c      			VM_BUG_ON(!PageLRU(page));
page              319 mm/swap.c      			__ClearPageLRU(page);
page              320 mm/swap.c      			del_page_from_lru(zone, page);
page              323 mm/swap.c      		if (!pagevec_add(&pages_to_free, page)) {
page              369 mm/swap.c      		struct page *page = pvec->pages[i];
page              371 mm/swap.c      		VM_BUG_ON(PageLRU(page));
page              372 mm/swap.c      		if (put_page_testzero(page))
page              373 mm/swap.c      			pagevec_add(&pages_to_free, page);
page              389 mm/swap.c      		struct page *page = pvec->pages[i];
page              390 mm/swap.c      		struct zone *pagezone = page_zone(page);
page              398 mm/swap.c      		VM_BUG_ON(PageLRU(page));
page              399 mm/swap.c      		SetPageLRU(page);
page              400 mm/swap.c      		add_page_to_inactive_list(zone, page);
page              416 mm/swap.c      		struct page *page = pvec->pages[i];
page              417 mm/swap.c      		struct zone *pagezone = page_zone(page);
page              425 mm/swap.c      		VM_BUG_ON(PageLRU(page));
page              426 mm/swap.c      		SetPageLRU(page);
page              427 mm/swap.c      		VM_BUG_ON(PageActive(page));
page              428 mm/swap.c      		SetPageActive(page);
page              429 mm/swap.c      		add_page_to_active_list(zone, page);
page              445 mm/swap.c      		struct page *page = pvec->pages[i];
page              447 mm/swap.c      		if (PagePrivate(page) && trylock_page(page)) {
page              448 mm/swap.c      			if (PagePrivate(page))
page              449 mm/swap.c      				try_to_release_page(page, 0);
page              450 mm/swap.c      			unlock_page(page);
page               75 mm/swap_state.c 	BUG_ON(!PageLocked(page));
page               76 mm/swap_state.c 	BUG_ON(PageSwapCache(page));
page               77 mm/swap_state.c 	BUG_ON(PagePrivate(page));
page               80 mm/swap_state.c 		page_cache_get(page);
page               81 mm/swap_state.c 		SetPageSwapCache(page);
page               82 mm/swap_state.c 		set_page_private(page, entry.val);
page               86 mm/swap_state.c 						entry.val, page);
page               89 mm/swap_state.c 			__inc_zone_page_state(page, NR_FILE_PAGES);
page               96 mm/swap_state.c 			set_page_private(page, 0UL);
page               97 mm/swap_state.c 			ClearPageSwapCache(page);
page               98 mm/swap_state.c 			page_cache_release(page);
page              110 mm/swap_state.c 	BUG_ON(!PageLocked(page));
page              111 mm/swap_state.c 	BUG_ON(!PageSwapCache(page));
page              112 mm/swap_state.c 	BUG_ON(PageWriteback(page));
page              113 mm/swap_state.c 	BUG_ON(PagePrivate(page));
page              115 mm/swap_state.c 	radix_tree_delete(&swapper_space.page_tree, page_private(page));
page              116 mm/swap_state.c 	set_page_private(page, 0);
page              117 mm/swap_state.c 	ClearPageSwapCache(page);
page              119 mm/swap_state.c 	__dec_zone_page_state(page, NR_FILE_PAGES);
page              136 mm/swap_state.c 	BUG_ON(!PageLocked(page));
page              137 mm/swap_state.c 	BUG_ON(!PageUptodate(page));
page              155 mm/swap_state.c 		err = add_to_swap_cache(page, entry,
page              160 mm/swap_state.c 			SetPageDirty(page);
page              184 mm/swap_state.c 	entry.val = page_private(page);
page              187 mm/swap_state.c 	__delete_from_swap_cache(page);
page              191 mm/swap_state.c 	page_cache_release(page);
page              204 mm/swap_state.c 	if (PageSwapCache(page) && trylock_page(page)) {
page              205 mm/swap_state.c 		remove_exclusive_swap_page(page);
page              206 mm/swap_state.c 		unlock_page(page);
page              216 mm/swap_state.c 	free_swap_cache(page);
page              217 mm/swap_state.c 	page_cache_release(page);
page              226 mm/swap_state.c 	struct page **pagep = pages;
page              247 mm/swap_state.c struct page * lookup_swap_cache(swp_entry_t entry)
page              249 mm/swap_state.c 	struct page *page;
page              251 mm/swap_state.c 	page = find_get_page(&swapper_space, entry.val);
page              253 mm/swap_state.c 	if (page)
page              257 mm/swap_state.c 	return page;
page              266 mm/swap_state.c struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
page              269 mm/swap_state.c 	struct page *found_page, *new_page = NULL;
page              343 mm/swap_state.c struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
page              347 mm/swap_state.c 	struct page *page;
page              361 mm/swap_state.c 		page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
page              363 mm/swap_state.c 		if (!page)
page              365 mm/swap_state.c 		page_cache_release(page);
page               65 mm/swapfile.c  	entry.val = page_private(page);
page               66 mm/swapfile.c  	if (PageSwapCache(page)) {
page               78 mm/swapfile.c  		WARN_ON(page_count(page) <= 1);
page               81 mm/swapfile.c  		blk_run_backing_dev(bdi, page);
page              318 mm/swapfile.c  	entry.val = page_private(page);
page              336 mm/swapfile.c  	BUG_ON(!PageLocked(page));
page              337 mm/swapfile.c  	count = page_mapcount(page);
page              338 mm/swapfile.c  	if (count <= 1 && PageSwapCache(page))
page              339 mm/swapfile.c  		count += page_swapcount(page);
page              353 mm/swapfile.c  	BUG_ON(PagePrivate(page));
page              354 mm/swapfile.c  	BUG_ON(!PageLocked(page));
page              356 mm/swapfile.c  	if (!PageSwapCache(page))
page              358 mm/swapfile.c  	if (PageWriteback(page))
page              360 mm/swapfile.c  	if (page_count(page) != 2) /* 2: us + cache */
page              363 mm/swapfile.c  	entry.val = page_private(page);
page              373 mm/swapfile.c  		if ((page_count(page) == 2) && !PageWriteback(page)) {
page              374 mm/swapfile.c  			__delete_from_swap_cache(page);
page              375 mm/swapfile.c  			SetPageDirty(page);
page              384 mm/swapfile.c  		page_cache_release(page);
page              397 mm/swapfile.c  	struct page *page = NULL;
page              405 mm/swapfile.c  			page = find_get_page(&swapper_space, entry.val);
page              406 mm/swapfile.c  			if (page && unlikely(!trylock_page(page))) {
page              407 mm/swapfile.c  				page_cache_release(page);
page              408 mm/swapfile.c  				page = NULL;
page              413 mm/swapfile.c  	if (page) {
page              416 mm/swapfile.c  		BUG_ON(PagePrivate(page));
page              417 mm/swapfile.c  		one_user = (page_count(page) == 2);
page              420 mm/swapfile.c  		if (PageSwapCache(page) && !PageWriteback(page) &&
page              422 mm/swapfile.c  			delete_from_swap_cache(page);
page              423 mm/swapfile.c  			SetPageDirty(page);
page              425 mm/swapfile.c  		unlock_page(page);
page              426 mm/swapfile.c  		page_cache_release(page);
page              518 mm/swapfile.c  	if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
page              524 mm/swapfile.c  			mem_cgroup_uncharge_page(page);
page              530 mm/swapfile.c  	get_page(page);
page              532 mm/swapfile.c  		   pte_mkold(mk_pte(page, vma->vm_page_prot)));
page              533 mm/swapfile.c  	page_add_anon_rmap(page, vma, addr);
page              539 mm/swapfile.c  	activate_page(page);
page              570 mm/swapfile.c  			ret = unuse_pte(vma, pmd, addr, entry, page);
page              594 mm/swapfile.c  		ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
page              614 mm/swapfile.c  		ret = unuse_pmd_range(vma, pud, addr, next, entry, page);
page              628 mm/swapfile.c  	if (page->mapping) {
page              629 mm/swapfile.c  		addr = page_address_in_vma(page, vma);
page              644 mm/swapfile.c  		ret = unuse_pud_range(vma, pgd, addr, next, entry, page);
page              662 mm/swapfile.c  		activate_page(page);
page              663 mm/swapfile.c  		unlock_page(page);
page              665 mm/swapfile.c  		lock_page(page);
page              668 mm/swapfile.c  		if (vma->anon_vma && (ret = unuse_vma(vma, entry, page)))
page              724 mm/swapfile.c  	struct page *page;
page              767 mm/swapfile.c  		page = read_swap_cache_async(entry,
page              769 mm/swapfile.c  		if (!page) {
page              799 mm/swapfile.c  		wait_on_page_locked(page);
page              800 mm/swapfile.c  		wait_on_page_writeback(page);
page              801 mm/swapfile.c  		lock_page(page);
page              802 mm/swapfile.c  		wait_on_page_writeback(page);
page              813 mm/swapfile.c  				shmem = shmem_unuse(entry, page);
page              815 mm/swapfile.c  				retval = unuse_mm(start_mm, entry, page);
page              843 mm/swapfile.c  					shmem = shmem_unuse(entry, page);
page              845 mm/swapfile.c  					retval = unuse_mm(mm, entry, page);
page              867 mm/swapfile.c  			unlock_page(page);
page              868 mm/swapfile.c  			page_cache_release(page);
page              905 mm/swapfile.c  		if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) {
page              910 mm/swapfile.c  			swap_writepage(page, &wbc);
page              911 mm/swapfile.c  			lock_page(page);
page              912 mm/swapfile.c  			wait_on_page_writeback(page);
page              914 mm/swapfile.c  		if (PageSwapCache(page))
page              915 mm/swapfile.c  			delete_from_swap_cache(page);
page              922 mm/swapfile.c  		SetPageDirty(page);
page              923 mm/swapfile.c  		unlock_page(page);
page              924 mm/swapfile.c  		page_cache_release(page);
page             1193 mm/swapfile.c  	BUG_ON(!PageLocked(page));	/* It pins the swap_info_struct */
page             1195 mm/swapfile.c  	if (PageSwapCache(page)) {
page             1196 mm/swapfile.c  		swp_entry_t entry = { .val = page_private(page) };
page             1202 mm/swapfile.c  		bdi = page->mapping->backing_dev_info;
page             1469 mm/swapfile.c  	struct page *page = NULL;
page             1554 mm/swapfile.c  	page = read_mapping_page(mapping, 0, swap_file);
page             1555 mm/swapfile.c  	if (IS_ERR(page)) {
page             1556 mm/swapfile.c  		error = PTR_ERR(page);
page             1559 mm/swapfile.c  	kmap(page);
page             1560 mm/swapfile.c  	swap_header = page_address(page);
page             1722 mm/swapfile.c  	if (page && !IS_ERR(page)) {
page             1723 mm/swapfile.c  		kunmap(page);
page             1724 mm/swapfile.c  		page_cache_release(page);
page               39 mm/truncate.c  	void (*invalidatepage)(struct page *, unsigned long);
page               40 mm/truncate.c  	invalidatepage = page->mapping->a_ops->invalidatepage;
page               46 mm/truncate.c  		(*invalidatepage)(page, offset);
page               51 mm/truncate.c  	zero_user_segment(page, partial, PAGE_CACHE_SIZE);
page               52 mm/truncate.c  	if (PagePrivate(page))
page               53 mm/truncate.c  		do_invalidatepage(page, partial);
page               72 mm/truncate.c  	if (TestClearPageDirty(page)) {
page               73 mm/truncate.c  		struct address_space *mapping = page->mapping;
page               75 mm/truncate.c  			dec_zone_page_state(page, NR_FILE_DIRTY);
page               98 mm/truncate.c  	if (page->mapping != mapping)
page              101 mm/truncate.c  	if (PagePrivate(page))
page              102 mm/truncate.c  		do_invalidatepage(page, 0);
page              104 mm/truncate.c  	cancel_dirty_page(page, PAGE_CACHE_SIZE);
page              106 mm/truncate.c  	remove_from_page_cache(page);
page              107 mm/truncate.c  	ClearPageMappedToDisk(page);
page              108 mm/truncate.c  	page_cache_release(page);	/* pagecache ref */
page              124 mm/truncate.c  	if (page->mapping != mapping)
page              127 mm/truncate.c  	if (PagePrivate(page) && !try_to_release_page(page, 0))
page              130 mm/truncate.c  	ret = remove_mapping(mapping, page);
page              179 mm/truncate.c  			struct page *page = pvec.pages[i];
page              180 mm/truncate.c  			pgoff_t page_index = page->index;
page              190 mm/truncate.c  			if (!trylock_page(page))
page              192 mm/truncate.c  			if (PageWriteback(page)) {
page              193 mm/truncate.c  				unlock_page(page);
page              196 mm/truncate.c  			if (page_mapped(page)) {
page              201 mm/truncate.c  			truncate_complete_page(mapping, page);
page              202 mm/truncate.c  			unlock_page(page);
page              209 mm/truncate.c  		struct page *page = find_lock_page(mapping, start - 1);
page              210 mm/truncate.c  		if (page) {
page              211 mm/truncate.c  			wait_on_page_writeback(page);
page              212 mm/truncate.c  			truncate_partial_page(page, partial);
page              213 mm/truncate.c  			unlock_page(page);
page              214 mm/truncate.c  			page_cache_release(page);
page              232 mm/truncate.c  			struct page *page = pvec.pages[i];
page              234 mm/truncate.c  			if (page->index > end)
page              236 mm/truncate.c  			lock_page(page);
page              237 mm/truncate.c  			wait_on_page_writeback(page);
page              238 mm/truncate.c  			if (page_mapped(page)) {
page              240 mm/truncate.c  				  (loff_t)page->index<<PAGE_CACHE_SHIFT,
page              243 mm/truncate.c  			if (page->index > next)
page              244 mm/truncate.c  				next = page->index;
page              246 mm/truncate.c  			truncate_complete_page(mapping, page);
page              247 mm/truncate.c  			unlock_page(page);
page              279 mm/truncate.c  			struct page *page = pvec.pages[i];
page              283 mm/truncate.c  			lock_failed = !trylock_page(page);
page              291 mm/truncate.c  			index = page->index;
page              298 mm/truncate.c  			if (PageDirty(page) || PageWriteback(page))
page              300 mm/truncate.c  			if (page_mapped(page))
page              302 mm/truncate.c  			ret += invalidate_complete_page(mapping, page);
page              304 mm/truncate.c  			unlock_page(page);
page              345 mm/truncate.c  	if (page->mapping != mapping)
page              348 mm/truncate.c  	if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL))
page              352 mm/truncate.c  	if (PageDirty(page))
page              355 mm/truncate.c  	BUG_ON(PagePrivate(page));
page              356 mm/truncate.c  	__remove_from_page_cache(page);
page              358 mm/truncate.c  	page_cache_release(page);	/* pagecache ref */
page              367 mm/truncate.c  	if (!PageDirty(page))
page              369 mm/truncate.c  	if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
page              371 mm/truncate.c  	return mapping->a_ops->launder_page(page);
page              402 mm/truncate.c  			struct page *page = pvec.pages[i];
page              405 mm/truncate.c  			lock_page(page);
page              406 mm/truncate.c  			if (page->mapping != mapping) {
page              407 mm/truncate.c  				unlock_page(page);
page              410 mm/truncate.c  			page_index = page->index;
page              415 mm/truncate.c  				unlock_page(page);
page              418 mm/truncate.c  			wait_on_page_writeback(page);
page              419 mm/truncate.c  			if (page_mapped(page)) {
page              439 mm/truncate.c  			BUG_ON(page_mapped(page));
page              440 mm/truncate.c  			ret2 = do_launder_page(mapping, page);
page              442 mm/truncate.c  				if (!invalidate_complete_page2(mapping, page))
page              447 mm/truncate.c  			unlock_page(page);
page              106 mm/vmalloc.c   		struct page *page = **pages;
page              108 mm/vmalloc.c   		if (!page)
page              110 mm/vmalloc.c   		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
page              174 mm/vmalloc.c   struct page *vmalloc_to_page(const void *vmalloc_addr)
page              177 mm/vmalloc.c   	struct page *page = NULL;
page              198 mm/vmalloc.c   					page = pte_page(pte);
page              203 mm/vmalloc.c   	return page;
page              409 mm/vmalloc.c   			struct page *page = area->pages[i];
page              411 mm/vmalloc.c   			BUG_ON(!page);
page              412 mm/vmalloc.c   			__free_page(page);
page              493 mm/vmalloc.c   	struct page **pages;
page              497 mm/vmalloc.c   	array_size = (nr_pages * sizeof(struct page *));
page              519 mm/vmalloc.c   		struct page *page;
page              522 mm/vmalloc.c   			page = alloc_page(gfp_mask);
page              524 mm/vmalloc.c   			page = alloc_pages_node(node, gfp_mask, 0);
page              526 mm/vmalloc.c   		if (unlikely(!page)) {
page              531 mm/vmalloc.c   		area->pages[i] = page;
page              822 mm/vmalloc.c   		struct page *page = vmalloc_to_page(addr);
page              823 mm/vmalloc.c   		ret = vm_insert_page(vma, uaddr, page);
page               84 mm/vmscan.c    #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
page               90 mm/vmscan.c    			struct page *prev;				\
page              104 mm/vmscan.c    			struct page *prev;				\
page              239 mm/vmscan.c    	if (page_mapped(page))
page              243 mm/vmscan.c    	if (PageSwapCache(page))
page              246 mm/vmscan.c    	mapping = page_mapping(page);
page              256 mm/vmscan.c    	return page_count(page) - !!PagePrivate(page) == 2;
page              285 mm/vmscan.c    	lock_page(page);
page              286 mm/vmscan.c    	if (page_mapping(page) == mapping)
page              288 mm/vmscan.c    	unlock_page(page);
page              333 mm/vmscan.c    	if (!is_page_cache_freeable(page))
page              340 mm/vmscan.c    		if (PagePrivate(page)) {
page              341 mm/vmscan.c    			if (try_to_free_buffers(page)) {
page              342 mm/vmscan.c    				ClearPageDirty(page);
page              354 mm/vmscan.c    	if (clear_page_dirty_for_io(page)) {
page              365 mm/vmscan.c    		SetPageReclaim(page);
page              366 mm/vmscan.c    		res = mapping->a_ops->writepage(page, &wbc);
page              368 mm/vmscan.c    			handle_write_error(mapping, page, res);
page              370 mm/vmscan.c    			ClearPageReclaim(page);
page              379 mm/vmscan.c    		if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
page              380 mm/vmscan.c    			wait_on_page_writeback(page);
page              382 mm/vmscan.c    		if (!PageWriteback(page)) {
page              384 mm/vmscan.c    			ClearPageReclaim(page);
page              386 mm/vmscan.c    		inc_zone_page_state(page, NR_VMSCAN_WRITE);
page              399 mm/vmscan.c    	BUG_ON(!PageLocked(page));
page              400 mm/vmscan.c    	BUG_ON(mapping != page_mapping(page));
page              428 mm/vmscan.c    	if (!page_freeze_refs(page, 2))
page              431 mm/vmscan.c    	if (unlikely(PageDirty(page))) {
page              432 mm/vmscan.c    		page_unfreeze_refs(page, 2);
page              436 mm/vmscan.c    	if (PageSwapCache(page)) {
page              437 mm/vmscan.c    		swp_entry_t swap = { .val = page_private(page) };
page              438 mm/vmscan.c    		__delete_from_swap_cache(page);
page              442 mm/vmscan.c    		__remove_from_page_cache(page);
page              461 mm/vmscan.c    	if (__remove_mapping(mapping, page)) {
page              467 mm/vmscan.c    		page_unfreeze_refs(page, 1);
page              490 mm/vmscan.c    		struct page *page;
page              496 mm/vmscan.c    		page = lru_to_page(page_list);
page              497 mm/vmscan.c    		list_del(&page->lru);
page              499 mm/vmscan.c    		if (!trylock_page(page))
page              502 mm/vmscan.c    		VM_BUG_ON(PageActive(page));
page              506 mm/vmscan.c    		if (!sc->may_swap && page_mapped(page))
page              510 mm/vmscan.c    		if (page_mapped(page) || PageSwapCache(page))
page              514 mm/vmscan.c    			(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
page              516 mm/vmscan.c    		if (PageWriteback(page)) {
page              526 mm/vmscan.c    				wait_on_page_writeback(page);
page              531 mm/vmscan.c    		referenced = page_referenced(page, 1, sc->mem_cgroup);
page              534 mm/vmscan.c    					referenced && page_mapping_inuse(page))
page              542 mm/vmscan.c    		if (PageAnon(page) && !PageSwapCache(page))
page              543 mm/vmscan.c    			if (!add_to_swap(page, GFP_ATOMIC))
page              547 mm/vmscan.c    		mapping = page_mapping(page);
page              553 mm/vmscan.c    		if (page_mapped(page) && mapping) {
page              554 mm/vmscan.c    			switch (try_to_unmap(page, 0)) {
page              564 mm/vmscan.c    		if (PageDirty(page)) {
page              573 mm/vmscan.c    			switch (pageout(page, mapping, sync_writeback)) {
page              579 mm/vmscan.c    				if (PageWriteback(page) || PageDirty(page))
page              585 mm/vmscan.c    				if (!trylock_page(page))
page              587 mm/vmscan.c    				if (PageDirty(page) || PageWriteback(page))
page              589 mm/vmscan.c    				mapping = page_mapping(page);
page              616 mm/vmscan.c    		if (PagePrivate(page)) {
page              617 mm/vmscan.c    			if (!try_to_release_page(page, sc->gfp_mask))
page              619 mm/vmscan.c    			if (!mapping && page_count(page) == 1) {
page              620 mm/vmscan.c    				unlock_page(page);
page              621 mm/vmscan.c    				if (put_page_testzero(page))
page              637 mm/vmscan.c    		if (!mapping || !__remove_mapping(mapping, page))
page              640 mm/vmscan.c    		unlock_page(page);
page              643 mm/vmscan.c    		if (!pagevec_add(&freed_pvec, page)) {
page              650 mm/vmscan.c    		SetPageActive(page);
page              653 mm/vmscan.c    		unlock_page(page);
page              655 mm/vmscan.c    		list_add(&page->lru, &ret_pages);
page              656 mm/vmscan.c    		VM_BUG_ON(PageLRU(page));
page              685 mm/vmscan.c    	if (!PageLRU(page))
page              693 mm/vmscan.c    	if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
page              697 mm/vmscan.c    	if (likely(get_page_unless_zero(page))) {
page              703 mm/vmscan.c    		ClearPageLRU(page);
page              737 mm/vmscan.c    		struct page *page;
page              743 mm/vmscan.c    		page = lru_to_page(src);
page              744 mm/vmscan.c    		prefetchw_prev_lru_page(page, src, flags);
page              746 mm/vmscan.c    		VM_BUG_ON(!PageLRU(page));
page              748 mm/vmscan.c    		switch (__isolate_lru_page(page, mode)) {
page              750 mm/vmscan.c    			list_move(&page->lru, dst);
page              756 mm/vmscan.c    			list_move(&page->lru, src);
page              775 mm/vmscan.c    		zone_id = page_zone_id(page);
page              776 mm/vmscan.c    		page_pfn = page_to_pfn(page);
page              780 mm/vmscan.c    			struct page *cursor_page;
page              836 mm/vmscan.c    	struct page *page;
page              838 mm/vmscan.c    	list_for_each_entry(page, page_list, lru)
page              839 mm/vmscan.c    		if (PageActive(page)) {
page              840 mm/vmscan.c    			ClearPageActive(page);
page              864 mm/vmscan.c    		struct page *page;
page              927 mm/vmscan.c    			page = lru_to_page(&page_list);
page              928 mm/vmscan.c    			VM_BUG_ON(PageLRU(page));
page              929 mm/vmscan.c    			SetPageLRU(page);
page              930 mm/vmscan.c    			list_del(&page->lru);
page              931 mm/vmscan.c    			if (PageActive(page))
page              932 mm/vmscan.c    				add_page_to_active_list(zone, page);
page              934 mm/vmscan.c    				add_page_to_inactive_list(zone, page);
page              935 mm/vmscan.c    			if (!pagevec_add(&pvec, page)) {
page             1104 mm/vmscan.c    	struct page *page;
page             1128 mm/vmscan.c    		page = lru_to_page(&l_hold);
page             1129 mm/vmscan.c    		list_del(&page->lru);
page             1130 mm/vmscan.c    		if (page_mapped(page)) {
page             1132 mm/vmscan.c    			    (total_swap_pages == 0 && PageAnon(page)) ||
page             1133 mm/vmscan.c    			    page_referenced(page, 0, sc->mem_cgroup)) {
page             1134 mm/vmscan.c    				list_add(&page->lru, &l_active);
page             1138 mm/vmscan.c    		list_add(&page->lru, &l_inactive);
page             1145 mm/vmscan.c    		page = lru_to_page(&l_inactive);
page             1146 mm/vmscan.c    		prefetchw_prev_lru_page(page, &l_inactive, flags);
page             1147 mm/vmscan.c    		VM_BUG_ON(PageLRU(page));
page             1148 mm/vmscan.c    		SetPageLRU(page);
page             1149 mm/vmscan.c    		VM_BUG_ON(!PageActive(page));
page             1150 mm/vmscan.c    		ClearPageActive(page);
page             1152 mm/vmscan.c    		list_move(&page->lru, &zone->inactive_list);
page             1153 mm/vmscan.c    		mem_cgroup_move_lists(page, false);
page             1155 mm/vmscan.c    		if (!pagevec_add(&pvec, page)) {
page             1176 mm/vmscan.c    		page = lru_to_page(&l_active);
page             1177 mm/vmscan.c    		prefetchw_prev_lru_page(page, &l_active, flags);
page             1178 mm/vmscan.c    		VM_BUG_ON(PageLRU(page));
page             1179 mm/vmscan.c    		SetPageLRU(page);
page             1180 mm/vmscan.c    		VM_BUG_ON(!PageActive(page));
page             1182 mm/vmscan.c    		list_move(&page->lru, &zone->active_list);
page             1183 mm/vmscan.c    		mem_cgroup_move_lists(page, true);
page             1185 mm/vmscan.c    		if (!pagevec_add(&pvec, page)) {
page              224 mm/vmstat.c    	__inc_zone_state(page_zone(page), item);
page              245 mm/vmstat.c    	__dec_zone_state(page_zone(page), item);
page              263 mm/vmstat.c    	zone = page_zone(page);
page              275 mm/vmstat.c    	__dec_zone_page_state(page, item);
page              513 mm/vmstat.c    		struct page *page;
page              518 mm/vmstat.c    		page = pfn_to_page(pfn);
page              532 mm/vmstat.c    		if (page_zone(page) != zone)
page              535 mm/vmstat.c    		mtype = get_pageblock_migratetype(page);
page              195 net/atm/mpoa_proc.c 	char *page, *p;
page              204 net/atm/mpoa_proc.c 	page = (char *)__get_free_page(GFP_KERNEL);
page              205 net/atm/mpoa_proc.c 	if (!page)
page              208 net/atm/mpoa_proc.c 	for (p = page, len = 0; len < nbytes; p++, len++) {
page              210 net/atm/mpoa_proc.c 			free_page((unsigned long)page);
page              219 net/atm/mpoa_proc.c 	if (!parse_qos(page))
page              220 net/atm/mpoa_proc.c 		printk("mpoa: proc_mpc_write: could not parse '%s'\n", page);
page              222 net/atm/mpoa_proc.c 	free_page((unsigned long)page);
page              375 net/atm/proc.c 	unsigned long page;
page              379 net/atm/proc.c 	page = get_zeroed_page(GFP_KERNEL);
page              380 net/atm/proc.c 	if (!page) return -ENOMEM;
page              385 net/atm/proc.c 		length = dev->ops->proc_read(dev,pos,(char *) page);
page              389 net/atm/proc.c 		if (copy_to_user(buf,(char *) page,length)) length = -EFAULT;
page              392 net/atm/proc.c 	free_page(page);
page              531 net/bluetooth/cmtp/capi.c 	len += sprintf(page + len, "%s\n\n", cmtp_procinfo(ctrl));
page              532 net/bluetooth/cmtp/capi.c 	len += sprintf(page + len, "addr %s\n", session->name);
page              533 net/bluetooth/cmtp/capi.c 	len += sprintf(page + len, "ctrl %d\n", session->num);
page              537 net/bluetooth/cmtp/capi.c 		len += sprintf(page + len, "appl %d -> %d\n", app->appl, app->mapping);
page              546 net/bluetooth/cmtp/capi.c 	*start = page + off;
page             1158 net/bluetooth/hci_event.c 				cp.page = 0x01;
page             1597 net/bluetooth/hci_event.c 		if (!ev->status && ev->page == 0x01) {
page              154 net/can/bcm.c  	len += snprintf(page + len, PAGE_SIZE - len, ">>> socket %p",
page              156 net/can/bcm.c  	len += snprintf(page + len, PAGE_SIZE - len, " / sk %p", sk);
page              157 net/can/bcm.c  	len += snprintf(page + len, PAGE_SIZE - len, " / bo %p", bo);
page              158 net/can/bcm.c  	len += snprintf(page + len, PAGE_SIZE - len, " / dropped %lu",
page              160 net/can/bcm.c  	len += snprintf(page + len, PAGE_SIZE - len, " / bound %s",
page              162 net/can/bcm.c  	len += snprintf(page + len, PAGE_SIZE - len, " <<<\n");
page              172 net/can/bcm.c  		len += snprintf(page + len, PAGE_SIZE - len,
page              175 net/can/bcm.c  		len += snprintf(page + len, PAGE_SIZE - len, "[%d]%c ",
page              179 net/can/bcm.c  			len += snprintf(page + len, PAGE_SIZE - len,
page              185 net/can/bcm.c  			len += snprintf(page + len, PAGE_SIZE - len,
page              190 net/can/bcm.c  		len += snprintf(page + len, PAGE_SIZE - len,
page              196 net/can/bcm.c  		len += snprintf(page + len, PAGE_SIZE - len, "%s%ld%%\n",
page              201 net/can/bcm.c  			len += snprintf(page + len, PAGE_SIZE - len, "(..)\n");
page              208 net/can/bcm.c  		len += snprintf(page + len, PAGE_SIZE - len,
page              214 net/can/bcm.c  			len += snprintf(page + len, PAGE_SIZE - len, "t1=%lld ",
page              218 net/can/bcm.c  			len += snprintf(page + len, PAGE_SIZE - len, "t2=%lld ",
page              221 net/can/bcm.c  		len += snprintf(page + len, PAGE_SIZE - len, "# sent %ld\n",
page              226 net/can/bcm.c  			len += snprintf(page + len, PAGE_SIZE - len, "(..)\n");
page              231 net/can/bcm.c  	len += snprintf(page + len, PAGE_SIZE - len, "\n");
page              211 net/can/proc.c 		len += snprintf(page + len, PAGE_SIZE - len, fmt,
page              221 net/can/proc.c 			len += snprintf(page + len, PAGE_SIZE - len,
page              237 net/can/proc.c 	len += snprintf(page + len, PAGE_SIZE - len,
page              249 net/can/proc.c 	len += snprintf(page + len, PAGE_SIZE - len, "\n");
page              250 net/can/proc.c 	len += snprintf(page + len, PAGE_SIZE - len,
page              253 net/can/proc.c 	len += snprintf(page + len, PAGE_SIZE - len,
page              255 net/can/proc.c 	len += snprintf(page + len, PAGE_SIZE - len,
page              258 net/can/proc.c 	len += snprintf(page + len, PAGE_SIZE - len, "\n");
page              261 net/can/proc.c 		len += snprintf(page + len, PAGE_SIZE - len,
page              265 net/can/proc.c 		len += snprintf(page + len, PAGE_SIZE - len,
page              268 net/can/proc.c 		len += snprintf(page + len, PAGE_SIZE - len,
page              272 net/can/proc.c 		len += snprintf(page + len, PAGE_SIZE - len, "\n");
page              274 net/can/proc.c 		len += snprintf(page + len, PAGE_SIZE - len,
page              278 net/can/proc.c 		len += snprintf(page + len, PAGE_SIZE - len,
page              281 net/can/proc.c 		len += snprintf(page + len, PAGE_SIZE - len,
page              285 net/can/proc.c 		len += snprintf(page + len, PAGE_SIZE - len, "\n");
page              287 net/can/proc.c 		len += snprintf(page + len, PAGE_SIZE - len,
page              291 net/can/proc.c 		len += snprintf(page + len, PAGE_SIZE - len,
page              294 net/can/proc.c 		len += snprintf(page + len, PAGE_SIZE - len,
page              298 net/can/proc.c 		len += snprintf(page + len, PAGE_SIZE - len, "\n");
page              301 net/can/proc.c 	len += snprintf(page + len, PAGE_SIZE - len,
page              304 net/can/proc.c 	len += snprintf(page + len, PAGE_SIZE - len,
page              309 net/can/proc.c 		len += snprintf(page + len, PAGE_SIZE - len,
page              314 net/can/proc.c 		len += snprintf(page + len, PAGE_SIZE - len,
page              318 net/can/proc.c 	len += snprintf(page + len, PAGE_SIZE - len, "\n");
page              332 net/can/proc.c 		len += snprintf(page + len, PAGE_SIZE - len,
page              340 net/can/proc.c 		len += snprintf(page + len, PAGE_SIZE - len,
page              354 net/can/proc.c 	len += snprintf(page + len, PAGE_SIZE - len, "%s\n",
page              369 net/can/proc.c 	len += snprintf(page + len, PAGE_SIZE - len,
page              376 net/can/proc.c 			len = can_print_recv_banner(page, len);
page              377 net/can/proc.c 			len = can_print_rcvlist(page, len, &d->rx[idx], d->dev);
page              379 net/can/proc.c 			len += snprintf(page + len, PAGE_SIZE - len,
page              388 net/can/proc.c 	len += snprintf(page + len, PAGE_SIZE - len, "\n");
page              402 net/can/proc.c 	len += snprintf(page + len, PAGE_SIZE - len,
page              416 net/can/proc.c 			len = can_print_recv_banner(page, len);
page              420 net/can/proc.c 					len = can_print_rcvlist(page, len,
page              425 net/can/proc.c 			len += snprintf(page + len, PAGE_SIZE - len,
page              434 net/can/proc.c 	len += snprintf(page + len, PAGE_SIZE - len, "\n");
page              295 net/core/datagram.c 			struct page *page = frag->page;
page              299 net/core/datagram.c 			vaddr = kmap(page);
page              302 net/core/datagram.c 			kunmap(page);
page              380 net/core/datagram.c 			struct page *page = frag->page;
page              384 net/core/datagram.c 			vaddr = kmap(page);
page              387 net/core/datagram.c 			kunmap(page);
page              464 net/core/datagram.c 			struct page *page = frag->page;
page              468 net/core/datagram.c 			vaddr = kmap(page);
page              473 net/core/datagram.c 			kunmap(page);
page             1594 net/core/dev.c 		if (PageHighMem(skb_shinfo(skb)->frags[i].page))
page               10 net/core/kmap_skb.h 	return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ);
page             2636 net/core/pktgen.c 			struct page *page = alloc_pages(GFP_KERNEL, 0);
page             2637 net/core/pktgen.c 			skb_shinfo(skb)->frags[i].page = page;
page             2662 net/core/pktgen.c 			get_page(skb_shinfo(skb)->frags[i].page);
page             2663 net/core/pktgen.c 			skb_shinfo(skb)->frags[i].page =
page             2664 net/core/pktgen.c 			    skb_shinfo(skb)->frags[i - 1].page;
page             2984 net/core/pktgen.c 			struct page *page = alloc_pages(GFP_KERNEL, 0);
page             2985 net/core/pktgen.c 			skb_shinfo(skb)->frags[i].page = page;
page             3010 net/core/pktgen.c 			get_page(skb_shinfo(skb)->frags[i].page);
page             3011 net/core/pktgen.c 			skb_shinfo(skb)->frags[i].page =
page             3012 net/core/pktgen.c 			    skb_shinfo(skb)->frags[i - 1].page;
page               27 net/core/skb_dma_map.c 		map = dma_map_page(dev, fp->page, fp->page_offset,
page              266 net/core/skbuff.c struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
page              269 net/core/skbuff.c 	struct page *page;
page              271 net/core/skbuff.c 	page = alloc_pages_node(node, gfp_mask, 0);
page              272 net/core/skbuff.c 	return page;
page              279 net/core/skbuff.c 	skb_fill_page_desc(skb, i, page, off, size);
page              342 net/core/skbuff.c 				put_page(skb_shinfo(skb)->frags[i].page);
page              718 net/core/skbuff.c 			get_page(skb_shinfo(n)->frags[i].page);
page              783 net/core/skbuff.c 		get_page(skb_shinfo(skb)->frags[i].page);
page             1052 net/core/skbuff.c 			put_page(skb_shinfo(skb)->frags[i].page);
page             1221 net/core/skbuff.c 			put_page(skb_shinfo(skb)->frags[i].page);
page             1339 net/core/skbuff.c 	spd->pages[spd->nr_pages] = page;
page             1351 net/core/skbuff.c 	*page += *poff / PAGE_SIZE;
page             1372 net/core/skbuff.c 		__segment_seek(&page, &poff, &plen, *off);
page             1382 net/core/skbuff.c 		if (spd_fill_page(spd, page, flen, poff, skb))
page             1385 net/core/skbuff.c 		__segment_seek(&page, &poff, &plen, flen);
page             1418 net/core/skbuff.c 		if (__splice_segment(f->page, f->page_offset, f->size,
page             1437 net/core/skbuff.c 	struct page *pages[PIPE_BUFFERS];
page             1978 net/core/skbuff.c 				get_page(skb_shinfo(skb)->frags[i].page);
page             2187 net/core/skbuff.c 	struct page *page = NULL;
page             2199 net/core/skbuff.c 		page = alloc_pages(sk->sk_allocation, 0);
page             2204 net/core/skbuff.c 		if (page == NULL)
page             2208 net/core/skbuff.c 		sk->sk_sndmsg_page = page;
page             2210 net/core/skbuff.c 		skb_fill_page_desc(skb, frg_cnt, page, 0, 0);
page             2222 net/core/skbuff.c 		ret = getfrag(from, (page_address(frag->page) +
page             2346 net/core/skbuff.c 			get_page(frag->page);
page             2438 net/core/skbuff.c 			sg_set_page(&sg[elt], frag->page, copy,
page             1297 net/core/sock.c 					struct page *page;
page             1300 net/core/sock.c 					page = alloc_pages(sk->sk_allocation, 0);
page             1301 net/core/sock.c 					if (!page) {
page             1309 net/core/sock.c 					frag->page = page;
page             1605 net/core/sock.c 	char *kaddr = kmap(page);
page             1609 net/core/sock.c 	kunmap(page);
page               80 net/core/user_dma.c 			struct page *page = frag->page;
page               85 net/core/user_dma.c 			cookie = dma_memcpy_pg_to_iovec(chan, to, pinned_list, page,
page              238 net/ieee80211/ieee80211_module.c 	return snprintf(page, count, "0x%08X\n", ieee80211_debug_level);
page              726 net/ipv4/af_inet.c 		return sk->sk_prot->sendpage(sk, page, offset, size, flags);
page              727 net/ipv4/af_inet.c 	return sock_no_sendpage(sock, page, offset, size, flags);
page              452 net/ipv4/inet_lro.c 		mac_hdr = page_address(frags->page) + frags->page_offset;
page              713 net/ipv4/ip_output.c 	kaddr = kmap(page);
page              715 net/ipv4/ip_output.c 	kunmap(page);
page              999 net/ipv4/ip_output.c 			struct page *page = sk->sk_sndmsg_page;
page             1003 net/ipv4/ip_output.c 			if (page && (left = PAGE_SIZE - off) > 0) {
page             1006 net/ipv4/ip_output.c 				if (page != frag->page) {
page             1011 net/ipv4/ip_output.c 					get_page(page);
page             1012 net/ipv4/ip_output.c 					skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
page             1018 net/ipv4/ip_output.c 				page = alloc_pages(sk->sk_allocation, 0);
page             1019 net/ipv4/ip_output.c 				if (page == NULL)  {
page             1023 net/ipv4/ip_output.c 				sk->sk_sndmsg_page = page;
page             1026 net/ipv4/ip_output.c 				skb_fill_page_desc(skb, i, page, 0, 0);
page             1032 net/ipv4/ip_output.c 			if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
page             1166 net/ipv4/ip_output.c 		if (skb_can_coalesce(skb, i, page, offset)) {
page             1169 net/ipv4/ip_output.c 			get_page(page);
page             1170 net/ipv4/ip_output.c 			skb_fill_page_desc(skb, i, page, offset, len);
page             1178 net/ipv4/ip_output.c 			csum = csum_page(page, offset, len);
page              684 net/ipv4/tcp.c 		struct page *page = pages[poffset / PAGE_SIZE];
page              706 net/ipv4/tcp.c 		can_coalesce = skb_can_coalesce(skb, i, page, offset);
page              717 net/ipv4/tcp.c 			get_page(page);
page              718 net/ipv4/tcp.c 			skb_fill_page_desc(skb, i, page, offset, copy);
page              782 net/ipv4/tcp.c 		return sock_no_sendpage(sock, page, offset, size, flags);
page              786 net/ipv4/tcp.c 	res = do_tcp_sendpages(sk, &page, offset, size, flags);
page              903 net/ipv4/tcp.c 				struct page *page = TCP_PAGE(sk);
page              906 net/ipv4/tcp.c 				if (skb_can_coalesce(skb, i, page, off) &&
page              920 net/ipv4/tcp.c 				} else if (page) {
page              922 net/ipv4/tcp.c 						put_page(page);
page              923 net/ipv4/tcp.c 						TCP_PAGE(sk) = page = NULL;
page              935 net/ipv4/tcp.c 				if (!page) {
page              937 net/ipv4/tcp.c 					if (!(page = sk_stream_alloc_page(sk)))
page              943 net/ipv4/tcp.c 				err = skb_copy_to_page(sk, from, skb, page,
page              950 net/ipv4/tcp.c 						TCP_PAGE(sk) = page;
page              961 net/ipv4/tcp.c 					skb_fill_page_desc(skb, i, page, off, copy);
page              963 net/ipv4/tcp.c 						get_page(page);
page              965 net/ipv4/tcp.c 						get_page(page);
page              966 net/ipv4/tcp.c 						TCP_PAGE(sk) = page;
page             2630 net/ipv4/tcp.c 		sg_set_page(&sg, f->page, f->size, f->page_offset);
page              863 net/ipv4/tcp_output.c 			put_page(skb_shinfo(skb)->frags[i].page);
page              751 net/ipv4/udp.c 	ret = ip_append_page(sk, page, offset, size, flags);
page              754 net/ipv4/udp.c 		return sock_no_sendpage(sk->sk_socket, page, offset,
page             1353 net/ipv6/ip6_output.c 			struct page *page = sk->sk_sndmsg_page;
page             1357 net/ipv6/ip6_output.c 			if (page && (left = PAGE_SIZE - off) > 0) {
page             1360 net/ipv6/ip6_output.c 				if (page != frag->page) {
page             1365 net/ipv6/ip6_output.c 					get_page(page);
page             1366 net/ipv6/ip6_output.c 					skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
page             1372 net/ipv6/ip6_output.c 				page = alloc_pages(sk->sk_allocation, 0);
page             1373 net/ipv6/ip6_output.c 				if (page == NULL) {
page             1377 net/ipv6/ip6_output.c 				sk->sk_sndmsg_page = page;
page             1380 net/ipv6/ip6_output.c 				skb_fill_page_desc(skb, i, page, 0, 0);
page             1386 net/ipv6/ip6_output.c 			if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
page              730 net/packet/af_packet.c 		struct page *p_start, *p_end;
page             1927 net/packet/af_packet.c 		struct page *page = virt_to_page(po->pg_vec[i]);
page             1930 net/packet/af_packet.c 		for (pg_num = 0; pg_num < po->pg_vec_pages; pg_num++, page++) {
page             1931 net/packet/af_packet.c 			err = vm_insert_page(vma, start, page);
page              698 net/socket.c   	return sock->ops->sendpage(sock, page, offset, size, flags);
page             2430 net/socket.c   		return sock->ops->sendpage(sock, page, offset, size, flags);
page             2432 net/socket.c   	return sock_no_sendpage(sock, page, offset, size, flags);
page             1076 net/sunrpc/auth_gss/auth_gss.c 		= kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *),
page             1104 net/sunrpc/auth_gss/auth_gss.c 	struct page	**inpages;
page              165 net/sunrpc/auth_gss/gss_krb5_crypto.c 	struct page **pages;
page              177 net/sunrpc/auth_gss/gss_krb5_crypto.c 	struct page *in_page;
page              142 net/sunrpc/auth_gss/gss_krb5_wrap.c 	struct page		**tmp_pages;
page             1274 net/sunrpc/auth_gss/svcauth_gss.c 	struct page **inpages = NULL;
page               72 net/sunrpc/socklib.c 	struct page	**ppage = xdr->pages;
page              512 net/sunrpc/svc.c 		struct page *p = alloc_page(GFP_KERNEL);
page              592 net/sunrpc/svc_xprt.c 			struct page *p = alloc_page(GFP_KERNEL);
page              176 net/sunrpc/svcsock.c 	struct page	**ppage = xdr->pages;
page              183 net/sunrpc/xdr.c 	struct page **pgfrom, **pgto;
page              240 net/sunrpc/xdr.c 	struct page **pgto;
page              284 net/sunrpc/xdr.c 	struct page **pgfrom;
page              833 net/sunrpc/xdr.c 	struct page **ppages = NULL;
page               61 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	struct page *page;
page               66 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	page = ctxt->pages[0];
page               68 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	rqstp->rq_pages[0] = page;
page               71 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	rqstp->rq_arg.head[0].iov_base = page_address(page);
page               85 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 		page = ctxt->pages[sge_no];
page               87 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 		rqstp->rq_pages[sge_no] = page;
page              105 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 		page = ctxt->pages[sge_no++];
page              106 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 		put_page(page);
page              124 net/sunrpc/xprtrdma/svc_rdma_sendto.c 		struct page *page;
page              126 net/sunrpc/xprtrdma/svc_rdma_sendto.c 		page = xdr->pages[page_no++];
page              131 net/sunrpc/xprtrdma/svc_rdma_sendto.c 			ib_dma_map_page(xprt->sc_cm_id->device, page, 0,
page              521 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	ctxt->pages[0] = page;
page              532 net/sunrpc/xprtrdma/svc_rdma_sendto.c 				page, 0, PAGE_SIZE, DMA_TO_DEVICE);
page              639 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	struct page *res_page;
page              473 net/sunrpc/xprtrdma/svc_rdma_transport.c struct page *svc_rdma_get_page(void)
page              475 net/sunrpc/xprtrdma/svc_rdma_transport.c 	struct page *page;
page              477 net/sunrpc/xprtrdma/svc_rdma_transport.c 	while ((page = alloc_page(GFP_KERNEL)) == NULL) {
page              483 net/sunrpc/xprtrdma/svc_rdma_transport.c 	return page;
page              490 net/sunrpc/xprtrdma/svc_rdma_transport.c 	struct page *page;
page              501 net/sunrpc/xprtrdma/svc_rdma_transport.c 		page = svc_rdma_get_page();
page              502 net/sunrpc/xprtrdma/svc_rdma_transport.c 		ctxt->pages[sge_no] = page;
page              504 net/sunrpc/xprtrdma/svc_rdma_transport.c 				     page, 0, PAGE_SIZE,
page             1304 net/sunrpc/xprtrdma/svc_rdma_transport.c 	struct page *p;
page              170 net/sunrpc/xprtrdma/xprt_rdma.h 	struct page	*mr_page;	/* owning page, if any */
page              428 net/sunrpc/xprtsock.c 	struct page **ppage;
page              731 net/xfrm/xfrm_algo.c 			sg_set_page(&sg, frag->page, copy,
page               80 net/xfrm/xfrm_ipcomp.c 		frag->page = alloc_page(GFP_ATOMIC);
page               83 net/xfrm/xfrm_ipcomp.c 		if (!frag->page)
page               90 net/xfrm/xfrm_ipcomp.c 		memcpy(page_address(frag->page), scratch, len);
page               33 scripts/kconfig/lxdialog/textbox.c static const char *page;
page               64 scripts/kconfig/lxdialog/textbox.c 	page = buf;	/* page is pointer to start of page to be displayed */
page              139 scripts/kconfig/lxdialog/textbox.c 				page = buf;
page              149 scripts/kconfig/lxdialog/textbox.c 			page = buf + strlen(buf);
page              278 scripts/kconfig/lxdialog/textbox.c 		if (*page == '\0') {
page              284 scripts/kconfig/lxdialog/textbox.c 		if (page == buf) {
page              288 scripts/kconfig/lxdialog/textbox.c 		page--;
page              290 scripts/kconfig/lxdialog/textbox.c 			if (page == buf) {
page              294 scripts/kconfig/lxdialog/textbox.c 			page--;
page              295 scripts/kconfig/lxdialog/textbox.c 		} while (*page != '\n');
page              296 scripts/kconfig/lxdialog/textbox.c 		page++;
page              356 scripts/kconfig/lxdialog/textbox.c 	while (*page != '\n') {
page              357 scripts/kconfig/lxdialog/textbox.c 		if (*page == '\0') {
page              363 scripts/kconfig/lxdialog/textbox.c 			line[i++] = *(page++);
page              368 scripts/kconfig/lxdialog/textbox.c 			page++;
page              374 scripts/kconfig/lxdialog/textbox.c 		page++;		/* move pass '\n' */
page              388 scripts/kconfig/lxdialog/textbox.c 	percent = (page - buf) * 100 / strlen(buf);
page              269 security/selinux/avc.c 	return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n"
page              153 security/selinux/selinuxfs.c 	char *page;
page              163 security/selinux/selinuxfs.c 	page = (char *)get_zeroed_page(GFP_KERNEL);
page              164 security/selinux/selinuxfs.c 	if (!page)
page              167 security/selinux/selinuxfs.c 	if (copy_from_user(page, buf, count))
page              171 security/selinux/selinuxfs.c 	if (sscanf(page, "%d", &new_value) != 1)
page              190 security/selinux/selinuxfs.c 	free_page((unsigned long) page);
page              224 security/selinux/selinuxfs.c 	char *page;
page              235 security/selinux/selinuxfs.c 	page = (char *)get_zeroed_page(GFP_KERNEL);
page              236 security/selinux/selinuxfs.c 	if (!page)
page              239 security/selinux/selinuxfs.c 	if (copy_from_user(page, buf, count))
page              243 security/selinux/selinuxfs.c 	if (sscanf(page, "%d", &new_value) != 1)
page              258 security/selinux/selinuxfs.c 	free_page((unsigned long) page);
page              418 security/selinux/selinuxfs.c 	char *page;
page              432 security/selinux/selinuxfs.c 	page = (char *)get_zeroed_page(GFP_KERNEL);
page              433 security/selinux/selinuxfs.c 	if (!page)
page              436 security/selinux/selinuxfs.c 	if (copy_from_user(page, buf, count))
page              440 security/selinux/selinuxfs.c 	if (sscanf(page, "%u", &new_value) != 1)
page              446 security/selinux/selinuxfs.c 	free_page((unsigned long) page);
page              467 security/selinux/selinuxfs.c 	char *page;
page              481 security/selinux/selinuxfs.c 	page = (char *)get_zeroed_page(GFP_KERNEL);
page              482 security/selinux/selinuxfs.c 	if (!page)
page              485 security/selinux/selinuxfs.c 	if (copy_from_user(page, buf, count))
page              489 security/selinux/selinuxfs.c 	if (sscanf(page, "%d", &new_value) != 1)
page              495 security/selinux/selinuxfs.c 	free_page((unsigned long) page);
page              855 security/selinux/selinuxfs.c 	char *page = NULL;
page              874 security/selinux/selinuxfs.c 	page = (char *)get_zeroed_page(GFP_KERNEL);
page              875 security/selinux/selinuxfs.c 	if (!page) {
page              885 security/selinux/selinuxfs.c 	length = scnprintf(page, PAGE_SIZE, "%d %d", cur_enforcing,
page              887 security/selinux/selinuxfs.c 	ret = simple_read_from_buffer(buf, count, ppos, page, length);
page              890 security/selinux/selinuxfs.c 	if (page)
page              891 security/selinux/selinuxfs.c 		free_page((unsigned long)page);
page              898 security/selinux/selinuxfs.c 	char *page = NULL;
page              926 security/selinux/selinuxfs.c 	page = (char *)get_zeroed_page(GFP_KERNEL);
page              927 security/selinux/selinuxfs.c 	if (!page) {
page              933 security/selinux/selinuxfs.c 	if (copy_from_user(page, buf, count))
page              937 security/selinux/selinuxfs.c 	if (sscanf(page, "%d", &new_value) != 1)
page              948 security/selinux/selinuxfs.c 	if (page)
page              949 security/selinux/selinuxfs.c 		free_page((unsigned long) page);
page              962 security/selinux/selinuxfs.c 	char *page = NULL;
page              980 security/selinux/selinuxfs.c 	page = (char *)get_zeroed_page(GFP_KERNEL);
page              981 security/selinux/selinuxfs.c 	if (!page) {
page              987 security/selinux/selinuxfs.c 	if (copy_from_user(page, buf, count))
page              991 security/selinux/selinuxfs.c 	if (sscanf(page, "%d", &new_value) != 1)
page             1001 security/selinux/selinuxfs.c 	if (page)
page             1002 security/selinux/selinuxfs.c 		free_page((unsigned long) page);
page             1044 security/selinux/selinuxfs.c 	char **names = NULL, *page;
page             1057 security/selinux/selinuxfs.c 	page = (char *)get_zeroed_page(GFP_KERNEL);
page             1058 security/selinux/selinuxfs.c 	if (!page)
page             1077 security/selinux/selinuxfs.c 		len = snprintf(page, PAGE_SIZE, "/%s/%s", BOOL_DIR_NAME, names[i]);
page             1086 security/selinux/selinuxfs.c 		ret = security_genfs_sid("selinuxfs", page, SECCLASS_FILE, &sid);
page             1099 security/selinux/selinuxfs.c 	free_page((unsigned long)page);
page             1132 security/selinux/selinuxfs.c 	char *page;
page             1147 security/selinux/selinuxfs.c 	page = (char *)get_zeroed_page(GFP_KERNEL);
page             1148 security/selinux/selinuxfs.c 	if (!page) {
page             1153 security/selinux/selinuxfs.c 	if (copy_from_user(page, buf, count)) {
page             1158 security/selinux/selinuxfs.c 	if (sscanf(page, "%u", &new_value) != 1) {
page             1171 security/selinux/selinuxfs.c 	free_page((unsigned long)page);
page             1179 security/selinux/selinuxfs.c 	char *page;
page             1182 security/selinux/selinuxfs.c 	page = (char *)__get_free_page(GFP_KERNEL);
page             1183 security/selinux/selinuxfs.c 	if (!page) {
page             1187 security/selinux/selinuxfs.c 	ret = avc_get_hash_stats(page);
page             1189 security/selinux/selinuxfs.c 		ret = simple_read_from_buffer(buf, count, ppos, page, ret);
page             1190 security/selinux/selinuxfs.c 	free_page((unsigned long)page);
page             1383 security/selinux/selinuxfs.c 	char *page;
page             1386 security/selinux/selinuxfs.c 	page = (char *)__get_free_page(GFP_KERNEL);
page             1387 security/selinux/selinuxfs.c 	if (!page) {
page             1392 security/selinux/selinuxfs.c 	len = snprintf(page, PAGE_SIZE, "%d", sel_ino_to_class(ino));
page             1393 security/selinux/selinuxfs.c 	rc = simple_read_from_buffer(buf, count, ppos, page, len);
page             1394 security/selinux/selinuxfs.c 	free_page((unsigned long)page);
page             1407 security/selinux/selinuxfs.c 	char *page;
page             1410 security/selinux/selinuxfs.c 	page = (char *)__get_free_page(GFP_KERNEL);
page             1411 security/selinux/selinuxfs.c 	if (!page) {
page             1416 security/selinux/selinuxfs.c 	len = snprintf(page, PAGE_SIZE, "%d", sel_ino_to_perm(ino));
page             1417 security/selinux/selinuxfs.c 	rc = simple_read_from_buffer(buf, count, ppos, page, len);
page             1418 security/selinux/selinuxfs.c 	free_page((unsigned long)page);
page              315 sound/core/pcm_memory.c struct page *snd_pcm_sgbuf_ops_page(struct snd_pcm_substream *substream, unsigned long offset)
page             3004 sound/core/pcm_native.c 	vmf->page = virt_to_page(runtime->status);
page             3005 sound/core/pcm_native.c 	get_page(vmf->page);
page             3043 sound/core/pcm_native.c 	vmf->page = virt_to_page(runtime->control);
page             3044 sound/core/pcm_native.c 	get_page(vmf->page);
page             3094 sound/core/pcm_native.c 	struct page * page;
page             3105 sound/core/pcm_native.c 	if (substream->ops->page) {
page             3106 sound/core/pcm_native.c 		page = substream->ops->page(substream, offset);
page             3107 sound/core/pcm_native.c 		if (!page)
page             3111 sound/core/pcm_native.c 		page = virt_to_page(vaddr);
page             3113 sound/core/pcm_native.c 	get_page(page);
page             3114 sound/core/pcm_native.c 	vmf->page = page;
page               73 sound/core/sgbuf.c 	struct page **pgtable;
page               63 sound/drivers/vx/vx_pcm.c static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
page              956 sound/drivers/vx/vx_pcm.c 	.page =		snd_pcm_get_vmalloc_page,
page             1176 sound/drivers/vx/vx_pcm.c 	.page =		snd_pcm_get_vmalloc_page,
page               85 sound/isa/wavefront/wavefront_fx.c 	if (page < 0 || page > 7) {
page              100 sound/isa/wavefront/wavefront_fx.c 		outb (page, dev->fx_dsp_page);
page              106 sound/isa/wavefront/wavefront_fx.c 			page, addr, data[0]);
page              112 sound/isa/wavefront/wavefront_fx.c 		outb (page, dev->fx_dsp_page);
page              126 sound/isa/wavefront/wavefront_fx.c 				    page, addr, (unsigned long) data, cnt);
page              237 sound/mips/hal2.h 	u32 page;		/* DOC Page register */
page              697 sound/mips/sgio2audio.c static struct page *snd_sgio2audio_page(struct snd_pcm_substream *substream,
page              713 sound/mips/sgio2audio.c 	.page =        snd_sgio2audio_page,
page              725 sound/mips/sgio2audio.c 	.page =        snd_sgio2audio_page,
page              737 sound/mips/sgio2audio.c 	.page =        snd_sgio2audio_page,
page               62 sound/oss/dmabuf.c 	struct page *page;
page              118 sound/oss/dmabuf.c 	for (page = virt_to_page(start_addr); page <= virt_to_page(end_addr); page++)
page              119 sound/oss/dmabuf.c 		SetPageReserved(page);
page              126 sound/oss/dmabuf.c 	struct page *page;
page              138 sound/oss/dmabuf.c 	for (page = virt_to_page(start_addr); page <= virt_to_page(end_addr); page++)
page              139 sound/oss/dmabuf.c 		ClearPageReserved(page);
page              889 sound/oss/msnd_pinnacle.c 	char *page = (char *)__get_free_page(GFP_KERNEL);
page              891 sound/oss/msnd_pinnacle.c 	if (!page)
page              904 sound/oss/msnd_pinnacle.c 		n = msnd_fifo_read(&dev.DARF, page, k);
page              906 sound/oss/msnd_pinnacle.c 		if (copy_to_user(buf, page, n)) {
page              907 sound/oss/msnd_pinnacle.c 			free_page((unsigned long)page);
page              923 sound/oss/msnd_pinnacle.c 			free_page((unsigned long)page);
page              935 sound/oss/msnd_pinnacle.c 				free_page((unsigned long)page);
page              940 sound/oss/msnd_pinnacle.c 	free_page((unsigned long)page);
page              947 sound/oss/msnd_pinnacle.c 	char *page = (char *)__get_free_page(GFP_KERNEL);
page              949 sound/oss/msnd_pinnacle.c 	if (!page)
page              960 sound/oss/msnd_pinnacle.c 		if (copy_from_user(page, buf, k)) {
page              961 sound/oss/msnd_pinnacle.c 			free_page((unsigned long)page);
page              967 sound/oss/msnd_pinnacle.c 		n = msnd_fifo_write(&dev.DAPF, page, k);
page              982 sound/oss/msnd_pinnacle.c 			free_page((unsigned long)page);
page              993 sound/oss/msnd_pinnacle.c 				free_page((unsigned long)page);
page              999 sound/oss/msnd_pinnacle.c 	free_page((unsigned long)page);
page              789 sound/oss/sscape.c 	struct page *page;
page              826 sound/oss/sscape.c 	for (page = virt_to_page(start_addr); page <= virt_to_page(end_addr); page++)
page              827 sound/oss/sscape.c 		SetPageReserved(page);
page              835 sound/oss/sscape.c 	struct page *page;
page              842 sound/oss/sscape.c 	for (page = virt_to_page(start_addr); page <= virt_to_page(end_addr); page++)
page              843 sound/oss/sscape.c 		ClearPageReserved(page);
page              498 sound/pci/ac97/ac97_codec.c 		unsigned short page = (kcontrol->private_value >> 26) & 0x0f;
page              501 sound/pci/ac97/ac97_codec.c 		snd_ac97_update_bits(ac97, AC97_INT_PAGING, AC97_PAGE_MASK, page);
page               65 sound/pci/ac97/ac97_patch.c 	snd_ac97_update_bits(ac97, AC97_INT_PAGING, AC97_PAGE_MASK, page);
page               29 sound/pci/ac97/ac97_patch.h 	(AC97_SINGLE_VALUE(reg,shift,mask,invert) | (1<<25) | ((page) << 26))
page               39 sound/pci/ac97/ac97_patch.h   .private_value =  AC97_PAGE_SINGLE_VALUE(reg, shift, mask, invert, page) }
page             1172 sound/pci/au88x0/au88x0_core.c 	int page, p, pp, delta, i;
page             1174 sound/pci/au88x0/au88x0_core.c 	page =
page             1178 sound/pci/au88x0/au88x0_core.c 		delta = (page - dma->period_real) & 3;
page             1180 sound/pci/au88x0/au88x0_core.c 		delta = (page - dma->period_real);
page             1209 sound/pci/au88x0/au88x0_core.c 	dma->period_real = page;
page             1433 sound/pci/au88x0/au88x0_core.c 	int page, p, pp, delta, i;
page             1435 sound/pci/au88x0/au88x0_core.c 	page =
page             1440 sound/pci/au88x0/au88x0_core.c 		delta = (page - dma->period_real) & 3;
page             1442 sound/pci/au88x0/au88x0_core.c 		delta = (page - dma->period_real);
page             1473 sound/pci/au88x0/au88x0_core.c 	dma->period_real = page;
page              402 sound/pci/au88x0/au88x0_pcm.c 	.page = snd_pcm_sgbuf_ops_page,
page              552 sound/pci/bt87x.c 	.page = snd_pcm_sgbuf_ops_page,
page              492 sound/pci/echoaudio/echoaudio.c 	int err, per, rest, page, edge, offs;
page              535 sound/pci/echoaudio/echoaudio.c 	for (offs = page = per = 0; offs < params_buffer_bytes(hw_params);
page              556 sound/pci/echoaudio/echoaudio.c 				page++;
page              800 sound/pci/echoaudio/echoaudio.c 	.page = snd_pcm_sgbuf_ops_page,
page              811 sound/pci/echoaudio/echoaudio.c 	.page = snd_pcm_sgbuf_ops_page,
page              824 sound/pci/echoaudio/echoaudio.c 	.page = snd_pcm_sgbuf_ops_page,
page              836 sound/pci/echoaudio/echoaudio.c 	.page = snd_pcm_sgbuf_ops_page,
page             1326 sound/pci/emu10k1/emupcm.c 	.page =			snd_pcm_sgbuf_ops_page,
page             1350 sound/pci/emu10k1/emupcm.c 	.page =			snd_pcm_sgbuf_ops_page,
page               35 sound/pci/emu10k1/memory.c 	(((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page)))
page               42 sound/pci/emu10k1/memory.c #define aligned_page_offset(page)	((page) << PAGE_SHIFT)
page               47 sound/pci/emu10k1/memory.c #define set_ptb_entry(emu,page,addr)	__set_ptb_entry(emu,page,addr)
page               49 sound/pci/emu10k1/memory.c #define set_silent_ptb(emu,page)	__set_ptb_entry(emu,page,emu->silent_page.addr)
page               55 sound/pci/emu10k1/memory.c 	page *= UNIT_PAGES;
page               56 sound/pci/emu10k1/memory.c 	for (i = 0; i < UNIT_PAGES; i++, page++) {
page               57 sound/pci/emu10k1/memory.c 		__set_ptb_entry(emu, page, addr);
page               64 sound/pci/emu10k1/memory.c 	page *= UNIT_PAGES;
page               65 sound/pci/emu10k1/memory.c 	for (i = 0; i < UNIT_PAGES; i++, page++)
page               67 sound/pci/emu10k1/memory.c 		__set_ptb_entry(emu, page, emu->silent_page.addr);
page              102 sound/pci/emu10k1/memory.c 	int page = 0, found_page = -ENOMEM;
page              112 sound/pci/emu10k1/memory.c 		size = blk->mapped_page - page;
page              115 sound/pci/emu10k1/memory.c 			return page;
page              121 sound/pci/emu10k1/memory.c 			found_page = page;
page              123 sound/pci/emu10k1/memory.c 		page = blk->mapped_page + blk->pages;
page              125 sound/pci/emu10k1/memory.c 	size = MAX_ALIGN_PAGES - page;
page              128 sound/pci/emu10k1/memory.c 		return page;
page              141 sound/pci/emu10k1/memory.c 	int page, pg;
page              144 sound/pci/emu10k1/memory.c 	page = search_empty_map_area(emu, blk->pages, &next);
page              145 sound/pci/emu10k1/memory.c 	if (page < 0) /* not found */
page              146 sound/pci/emu10k1/memory.c 		return page;
page              151 sound/pci/emu10k1/memory.c 	blk->mapped_page = page;
page              154 sound/pci/emu10k1/memory.c 		set_ptb_entry(emu, page, emu->page_addr_table[pg]);
page              155 sound/pci/emu10k1/memory.c 		page++;
page              207 sound/pci/emu10k1/memory.c 	int page, psize;
page              210 sound/pci/emu10k1/memory.c 	page = 0;
page              213 sound/pci/emu10k1/memory.c 		if (page + psize <= blk->first_page)
page              215 sound/pci/emu10k1/memory.c 		page = blk->last_page + 1;
page              217 sound/pci/emu10k1/memory.c 	if (page + psize > emu->max_cache_pages)
page              225 sound/pci/emu10k1/memory.c 	blk->mem.offset = aligned_page_offset(page); /* set aligned offset */
page              301 sound/pci/emu10k1/memory.c 	int page, err, idx;
page              322 sound/pci/emu10k1/memory.c 	for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
page              331 sound/pci/emu10k1/memory.c 		emu->page_addr_table[page] = addr;
page              332 sound/pci/emu10k1/memory.c 		emu->page_ptr_table[page] = NULL;
page              442 sound/pci/emu10k1/memory.c 	int page;
page              444 sound/pci/emu10k1/memory.c 	for (page = first_page; page <= last_page; page++) {
page              445 sound/pci/emu10k1/memory.c 		free_page((unsigned long)emu->page_ptr_table[page]);
page              446 sound/pci/emu10k1/memory.c 		emu->page_addr_table[page] = 0;
page              447 sound/pci/emu10k1/memory.c 		emu->page_ptr_table[page] = NULL;
page              456 sound/pci/emu10k1/memory.c 	int page, first_page, last_page;
page              461 sound/pci/emu10k1/memory.c 	for (page = first_page; page <= last_page; page++) {
page              463 sound/pci/emu10k1/memory.c 		struct page *p = alloc_page(GFP_KERNEL | GFP_DMA32 |
page              474 sound/pci/emu10k1/memory.c 			__synth_free_pages(emu, first_page, page - 1);
page              477 sound/pci/emu10k1/memory.c 		emu->page_addr_table[page] = page_to_phys(p);
page              478 sound/pci/emu10k1/memory.c 		emu->page_ptr_table[page] = page_address(p);
page              499 sound/pci/emu10k1/memory.c 	if (snd_BUG_ON(page < 0 || page >= emu->max_cache_pages))
page              501 sound/pci/emu10k1/memory.c 	ptr = emu->page_ptr_table[page];
page              503 sound/pci/emu10k1/memory.c 		printk(KERN_ERR "emu10k1: access to NULL ptr: page = %d\n", page);
page              516 sound/pci/emu10k1/memory.c 	int page, nextofs, end_offset, temp, temp1;
page              522 sound/pci/emu10k1/memory.c 	page = get_aligned_page(offset);
page              524 sound/pci/emu10k1/memory.c 		nextofs = aligned_page_offset(page + 1);
page              529 sound/pci/emu10k1/memory.c 		ptr = offset_ptr(emu, page + p->first_page, offset);
page              533 sound/pci/emu10k1/memory.c 		page++;
page              546 sound/pci/emu10k1/memory.c 	int page, nextofs, end_offset, temp, temp1;
page              552 sound/pci/emu10k1/memory.c 	page = get_aligned_page(offset);
page              554 sound/pci/emu10k1/memory.c 		nextofs = aligned_page_offset(page + 1);
page              559 sound/pci/emu10k1/memory.c 		ptr = offset_ptr(emu, page + p->first_page, offset);
page              564 sound/pci/emu10k1/memory.c 		page++;
page             1716 sound/pci/hda/hda_intel.c 	.page = snd_pcm_sgbuf_ops_page,
page             1702 sound/pci/riptide/riptide.c 	.page = snd_pcm_sgbuf_ops_page,
page             1713 sound/pci/riptide/riptide.c 	.page = snd_pcm_sgbuf_ops_page,
page             4186 sound/pci/rme9652/hdspm.c 	.page = snd_pcm_sgbuf_ops_page,
page             4199 sound/pci/rme9652/hdspm.c 	.page = snd_pcm_sgbuf_ops_page,
page             2087 sound/pci/trident/trident_main.c 	.page =		snd_pcm_sgbuf_ops_page,
page             2132 sound/pci/trident/trident_main.c 	.page =		snd_pcm_sgbuf_ops_page,
page               38 sound/pci/trident/trident_memory.c 	do { (trident)->tlb.entries[page] = cpu_to_le32((addr) & ~(SNDRV_TRIDENT_PAGE_SIZE-1)); \
page               39 sound/pci/trident/trident_memory.c 	     (trident)->tlb.shadow_entries[page] = (ptr); } while (0)
page               41 sound/pci/trident/trident_memory.c 	(void*)((trident)->tlb.shadow_entries[page])
page               43 sound/pci/trident/trident_memory.c 	(dma_addr_t)le32_to_cpu((trident->tlb.entries[page]) & ~(SNDRV_TRIDENT_PAGE_SIZE - 1))
page               50 sound/pci/trident/trident_memory.c #define set_tlb_bus(trident,page,ptr,addr) __set_tlb_bus(trident,page,ptr,addr)
page               52 sound/pci/trident/trident_memory.c #define set_silent_tlb(trident,page)	__set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr)
page               56 sound/pci/trident/trident_memory.c #define aligned_page_offset(page)	((page) << 12)
page               58 sound/pci/trident/trident_memory.c #define page_to_ptr(trident,page)	__tlb_to_ptr(trident, page)
page               60 sound/pci/trident/trident_memory.c #define page_to_addr(trident,page)	__tlb_to_addr(trident, page)
page               67 sound/pci/trident/trident_memory.c #define aligned_page_offset(page)	((page) << 13)
page               68 sound/pci/trident/trident_memory.c #define page_to_ptr(trident,page)	__tlb_to_ptr(trident, (page) << 1)
page               69 sound/pci/trident/trident_memory.c #define page_to_addr(trident,page)	__tlb_to_addr(trident, (page) << 1)
page               75 sound/pci/trident/trident_memory.c 	page <<= 1;
page               76 sound/pci/trident/trident_memory.c 	__set_tlb_bus(trident, page, ptr, addr);
page               77 sound/pci/trident/trident_memory.c 	__set_tlb_bus(trident, page+1, ptr + SNDRV_TRIDENT_PAGE_SIZE, addr + SNDRV_TRIDENT_PAGE_SIZE);
page               81 sound/pci/trident/trident_memory.c 	page <<= 1;
page               82 sound/pci/trident/trident_memory.c 	__set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr);
page               83 sound/pci/trident/trident_memory.c 	__set_tlb_bus(trident, page+1, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr);
page               97 sound/pci/trident/trident_memory.c #define aligned_page_offset(page)	((page) * ALIGN_PAGE_SIZE)
page               98 sound/pci/trident/trident_memory.c #define page_to_ptr(trident,page)	__tlb_to_ptr(trident, (page) * UNIT_PAGES)
page               99 sound/pci/trident/trident_memory.c #define page_to_addr(trident,page)	__tlb_to_addr(trident, (page) * UNIT_PAGES)
page              106 sound/pci/trident/trident_memory.c 	page *= UNIT_PAGES;
page              107 sound/pci/trident/trident_memory.c 	for (i = 0; i < UNIT_PAGES; i++, page++) {
page              108 sound/pci/trident/trident_memory.c 		__set_tlb_bus(trident, page, ptr, addr);
page              116 sound/pci/trident/trident_memory.c 	page *= UNIT_PAGES;
page              117 sound/pci/trident/trident_memory.c 	for (i = 0; i < UNIT_PAGES; i++, page++)
page              118 sound/pci/trident/trident_memory.c 		__set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr);
page              143 sound/pci/trident/trident_memory.c 	int page, psize;
page              148 sound/pci/trident/trident_memory.c 	page = 0;
page              151 sound/pci/trident/trident_memory.c 		if (page + psize <= firstpg(blk))
page              153 sound/pci/trident/trident_memory.c 		page = lastpg(blk) + 1;
page              155 sound/pci/trident/trident_memory.c 	if (page + psize > MAX_ALIGN_PAGES)
page              163 sound/pci/trident/trident_memory.c 	blk->offset = aligned_page_offset(page); /* set aligned offset */
page              164 sound/pci/trident/trident_memory.c 	firstpg(blk) = page;
page              165 sound/pci/trident/trident_memory.c 	lastpg(blk) = page + psize - 1;
page              196 sound/pci/trident/trident_memory.c 	int idx, page;
page              217 sound/pci/trident/trident_memory.c 	for (page = firstpg(blk); page <= lastpg(blk); page++, idx++) {
page              227 sound/pci/trident/trident_memory.c 		set_tlb_bus(trident, page, ptr, addr);
page              242 sound/pci/trident/trident_memory.c 	int page;
page              265 sound/pci/trident/trident_memory.c 	for (page = firstpg(blk); page <= lastpg(blk); page++,
page              272 sound/pci/trident/trident_memory.c 		set_tlb_bus(trident, page, ptr, addr);
page              301 sound/pci/trident/trident_memory.c 	int page;
page              309 sound/pci/trident/trident_memory.c 	for (page = firstpg(blk); page <= lastpg(blk); page++)
page              310 sound/pci/trident/trident_memory.c 		set_silent_tlb(trident, page);
page             1309 sound/pci/via82xx.c 	.page =		snd_pcm_sgbuf_ops_page,
page             1322 sound/pci/via82xx.c 	.page =		snd_pcm_sgbuf_ops_page,
page             1335 sound/pci/via82xx.c 	.page =		snd_pcm_sgbuf_ops_page,
page             1348 sound/pci/via82xx.c 	.page =		snd_pcm_sgbuf_ops_page,
page             1361 sound/pci/via82xx.c 	.page =		snd_pcm_sgbuf_ops_page,
page              808 sound/pci/via82xx_modem.c 	.page =		snd_pcm_sgbuf_ops_page,
page              821 sound/pci/via82xx_modem.c 	.page =		snd_pcm_sgbuf_ops_page,
page               36 sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs, unsigned long offset)
page              321 sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c 	.page =		snd_pcm_get_vmalloc_page,
page               14 sound/soc/codecs/tlv320aic26.h #define AIC26_PAGE_ADDR(page, offset)	((page << 6) | offset)
page              942 sound/soc/soc-core.c 	soc_pcm_ops.page = socdev->platform->pcm_ops->page;
page              730 sound/usb/usbaudio.c static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
page             2010 sound/usb/usbaudio.c 	.page =		snd_pcm_get_vmalloc_page,
page             2022 sound/usb/usbaudio.c 	.page =		snd_pcm_get_vmalloc_page,
page              117 sound/usb/usx2y/us122l.c 	struct page *page;
page              138 sound/usb/usx2y/us122l.c 	page = virt_to_page(vaddr);
page              140 sound/usb/usx2y/us122l.c 	get_page(page);
page              143 sound/usb/usx2y/us122l.c 	vmf->page = page;
page               40 sound/usb/usx2y/usX2Yhwdep.c 	struct page * page;
page               49 sound/usb/usx2y/usX2Yhwdep.c 	page = virt_to_page(vaddr);
page               50 sound/usb/usx2y/usX2Yhwdep.c 	get_page(page);
page               51 sound/usb/usx2y/usX2Yhwdep.c 	vmf->page = page;
page               54 sound/usb/usx2y/usX2Yhwdep.c 		    vaddr, page);
page              694 sound/usb/usx2y/usx2yhwdeppcm.c 	vmf->page = virt_to_page(vaddr);
page              695 sound/usb/usx2y/usx2yhwdeppcm.c 	get_page(vmf->page);
page              160 virt/kvm/kvm_main.c 	struct page *page;
page              169 virt/kvm/kvm_main.c 	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
page              170 virt/kvm/kvm_main.c 	if (!page) {
page              174 virt/kvm/kvm_main.c 	vcpu->run = page_address(page);
page              316 virt/kvm/kvm_main.c 	struct page *page;
page              323 virt/kvm/kvm_main.c 	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
page              324 virt/kvm/kvm_main.c 	if (!page) {
page              329 virt/kvm/kvm_main.c 			(struct kvm_coalesced_mmio_ring *)page_address(page);
page              339 virt/kvm/kvm_main.c 			put_page(page);
page              505 virt/kvm/kvm_main.c 		new.rmap = vmalloc(npages * sizeof(struct page *));
page              642 virt/kvm/kvm_main.c 	return page == bad_page;
page              716 virt/kvm/kvm_main.c 	struct page *page[1];
page              729 virt/kvm/kvm_main.c 	npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
page              745 virt/kvm/kvm_main.c 		pfn = page_to_pfn(page[0]);
page              752 virt/kvm/kvm_main.c struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
page              770 virt/kvm/kvm_main.c 	kvm_release_pfn_clean(page_to_pfn(page));
page              783 virt/kvm/kvm_main.c 	kvm_release_pfn_dirty(page_to_pfn(page));
page              796 virt/kvm/kvm_main.c 	kvm_set_pfn_dirty(page_to_pfn(page));
page              803 virt/kvm/kvm_main.c 		struct page *page = pfn_to_page(pfn);
page              804 virt/kvm/kvm_main.c 		if (!PageReserved(page))
page              805 virt/kvm/kvm_main.c 			SetPageDirty(page);
page             1003 virt/kvm/kvm_main.c 	struct page *page;
page             1006 virt/kvm/kvm_main.c 		page = virt_to_page(vcpu->run);
page             1009 virt/kvm/kvm_main.c 		page = virt_to_page(vcpu->arch.pio_data);
page             1013 virt/kvm/kvm_main.c 		page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
page             1017 virt/kvm/kvm_main.c 	get_page(page);
page             1018 virt/kvm/kvm_main.c 	vmf->page = page;
page             1373 virt/kvm/kvm_main.c 	struct page *page;
page             1377 virt/kvm/kvm_main.c 	page = gfn_to_page(kvm, vmf->pgoff);
page             1378 virt/kvm/kvm_main.c 	if (is_error_page(page)) {
page             1379 virt/kvm/kvm_main.c 		kvm_release_page_clean(page);
page             1382 virt/kvm/kvm_main.c 	vmf->page = page;
page             1679 virt/kvm/kvm_main.c struct page *bad_page;