zone_page_state 175 include/linux/vmstat.h zone_page_state(&zones[ZONE_DMA], item) + zone_page_state 178 include/linux/vmstat.h zone_page_state(&zones[ZONE_DMA32], item) + zone_page_state 181 include/linux/vmstat.h zone_page_state(&zones[ZONE_HIGHMEM], item) + zone_page_state 183 include/linux/vmstat.h zone_page_state(&zones[ZONE_NORMAL], item) + zone_page_state 184 include/linux/vmstat.h zone_page_state(&zones[ZONE_MOVABLE], item); zone_page_state 799 kernel/power/snapshot.c cnt += zone_page_state(zone, NR_FREE_PAGES); zone_page_state 1084 kernel/power/snapshot.c free += zone_page_state(zone, NR_FREE_PAGES); zone_page_state 237 kernel/power/swsusp.c zone_page_state(zone, NR_FREE_PAGES); zone_page_state 239 kernel/power/swsusp.c tmp -= zone_page_state(zone, NR_FREE_PAGES); zone_page_state 51 mm/highmem.c pages += zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], zone_page_state 54 mm/highmem.c pages += zone_page_state( zone_page_state 332 mm/page-writeback.c x += zone_page_state(z, NR_FREE_PAGES) zone_page_state 333 mm/page-writeback.c + zone_page_state(z, NR_INACTIVE) zone_page_state 334 mm/page-writeback.c + zone_page_state(z, NR_ACTIVE); zone_page_state 1218 mm/page_alloc.c long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1; zone_page_state 1825 mm/page_alloc.c val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], zone_page_state 1898 mm/page_alloc.c K(zone_page_state(zone, NR_FREE_PAGES)), zone_page_state 1902 mm/page_alloc.c K(zone_page_state(zone, NR_ACTIVE)), zone_page_state 1903 mm/page_alloc.c K(zone_page_state(zone, NR_INACTIVE)), zone_page_state 36 mm/quicklist.c zone_page_state(&zones[ZONE_DMA], NR_FREE_PAGES) + zone_page_state 39 mm/quicklist.c zone_page_state(&zones[ZONE_DMA32], NR_FREE_PAGES) + zone_page_state 41 mm/quicklist.c zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES); zone_page_state 965 mm/vmscan.c return zone->pages_scanned >= (zone_page_state(zone, NR_ACTIVE) zone_page_state 966 mm/vmscan.c + zone_page_state(zone, NR_INACTIVE))*3; zone_page_state 1036 mm/vmscan.c imbalance = zone_page_state(zone, NR_ACTIVE); zone_page_state 1037 mm/vmscan.c imbalance /= zone_page_state(zone, NR_INACTIVE) + 1; zone_page_state 1219 mm/vmscan.c (zone_page_state(zone, NR_ACTIVE) >> priority) + 1; zone_page_state 1222 mm/vmscan.c (zone_page_state(zone, NR_INACTIVE) >> priority) + 1; zone_page_state 1367 mm/vmscan.c lru_pages += zone_page_state(zone, NR_ACTIVE) zone_page_state 1368 mm/vmscan.c + zone_page_state(zone, NR_INACTIVE); zone_page_state 1570 mm/vmscan.c lru_pages += zone_page_state(zone, NR_ACTIVE) zone_page_state 1571 mm/vmscan.c + zone_page_state(zone, NR_INACTIVE); zone_page_state 1615 mm/vmscan.c (zone_page_state(zone, NR_ACTIVE) zone_page_state 1616 mm/vmscan.c + zone_page_state(zone, NR_INACTIVE)) * 6) zone_page_state 1789 mm/vmscan.c (zone_page_state(zone, NR_ACTIVE) >> prio) + 1; zone_page_state 1793 mm/vmscan.c zone_page_state(zone, NR_ACTIVE)); zone_page_state 1799 mm/vmscan.c (zone_page_state(zone, NR_INACTIVE) >> prio) + 1; zone_page_state 1803 mm/vmscan.c zone_page_state(zone, NR_INACTIVE)); zone_page_state 2038 mm/vmscan.c if (zone_page_state(zone, NR_FILE_PAGES) - zone_page_state 2039 mm/vmscan.c zone_page_state(zone, NR_FILE_MAPPED) > zone_page_state 2053 mm/vmscan.c slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE); zone_page_state 2066 mm/vmscan.c zone_page_state(zone, NR_SLAB_RECLAIMABLE) > zone_page_state 2075 mm/vmscan.c zone_page_state(zone, NR_SLAB_RECLAIMABLE); zone_page_state 2098 mm/vmscan.c if (zone_page_state(zone, NR_FILE_PAGES) - zone_page_state 2099 mm/vmscan.c zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages zone_page_state 2100 mm/vmscan.c && zone_page_state(zone, NR_SLAB_RECLAIMABLE) zone_page_state 694 mm/vmstat.c zone_page_state(zone, NR_FREE_PAGES), zone_page_state 705 mm/vmstat.c zone_page_state(zone, i));