page_private      288 fs/afs/file.c  		wb = (struct afs_writeback *) page_private(page);
page_private      217 fs/afs/write.c 	wb = (struct afs_writeback *) page_private(page);
page_private      410 fs/afs/write.c 			    page_private(page) != (unsigned long) wb) {
page_private      492 fs/afs/write.c 	wb = (struct afs_writeback *) page_private(page);
page_private      560 fs/afs/write.c 		wb = (struct afs_writeback *) page_private(page);
page_private      685 fs/afs/write.c 			if (page_private(page) == (unsigned long) wb) {
page_private       91 fs/jfs/jfs_metapage.c #define mp_anchor(page) ((struct meta_anchor *)page_private(page))
page_private      160 fs/jfs/jfs_metapage.c 	return PagePrivate(page) ? (struct metapage *)page_private(page) : NULL;
page_private      114 fs/nfs/write.c 		req = (struct nfs_page *)page_private(page);
page_private      152 fs/xfs/linux-2.6/xfs_buf.c 		page_private(page) | page_region_mask(offset, length));
page_private      153 fs/xfs/linux-2.6/xfs_buf.c 	if (page_private(page) == ~0UL)
page_private      165 fs/xfs/linux-2.6/xfs_buf.c 	return (mask && (page_private(page) & mask) == mask);
page_private      570 include/asm-x86/kvm_host.h 	return (struct kvm_mmu_page *)page_private(page);
page_private      138 include/linux/buffer_head.h 		((struct buffer_head *)page_private(page));	\
page_private      653 include/linux/mm.h 		return page_private(page);
page_private      375 kernel/kexec.c 	order = page_private(page);
page_private     1196 kernel/relay.c 	rbuf = (struct rchan_buf *)page_private(buf->page);
page_private      492 mm/hugetlb.c   	mapping = (struct address_space *) page_private(page);
page_private     1767 mm/hugetlb.c   	mapping = (struct address_space *)page_private(page);
page_private       52 mm/internal.h  	return page_private(page);
page_private      350 mm/migrate.c   		set_page_private(newpage, page_private(page));
page_private      497 mm/migrate.c   	set_page_private(newpage, page_private(page));
page_private     1065 mm/page_alloc.c 				if (page_private(page) == migratetype)
page_private     1069 mm/page_alloc.c 				if (page_private(page) == migratetype)
page_private      105 mm/page_io.c   	bio = get_swap_bio(GFP_NOIO, page_private(page), page,
page_private      130 mm/page_io.c   	bio = get_swap_bio(GFP_KERNEL, page_private(page), page,
page_private      105 mm/page_isolation.c 				page_private(page) == MIGRATE_ISOLATE)
page_private      738 mm/rmap.c      		swp_entry_t entry = { .val = page_private(page) };
page_private      369 mm/shmem.c     		set_page_private(page, page_private(page) + incdec);
page_private      684 mm/shmem.c     		if (subdir && page_private(subdir) /* has swap entries */) {
page_private      696 mm/shmem.c     					page_private(subdir) - freed);
page_private      699 mm/shmem.c     				BUG_ON(page_private(subdir) != freed);
page_private      886 mm/shmem.c     		if (subdir && page_private(subdir)) {
page_private      115 mm/swap_state.c 	radix_tree_delete(&swapper_space.page_tree, page_private(page));
page_private      184 mm/swap_state.c 	entry.val = page_private(page);
page_private       65 mm/swapfile.c  	entry.val = page_private(page);
page_private      318 mm/swapfile.c  	entry.val = page_private(page);
page_private      363 mm/swapfile.c  	entry.val = page_private(page);
page_private     1196 mm/swapfile.c  		swp_entry_t entry = { .val = page_private(page) };
page_private      437 mm/vmscan.c    		swp_entry_t swap = { .val = page_private(page) };