zone 1062 arch/x86/mm/init_32.c struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
zone 1066 arch/x86/mm/init_32.c return __add_pages(zone, start_pfn, nr_pages);
zone 829 arch/x86/mm/init_64.c struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
zone 838 arch/x86/mm/init_64.c ret = __add_pages(zone, start_pfn, nr_pages);
zone 394 arch/x86/mm/numa_32.c struct zone *zone;
zone 397 arch/x86/mm/numa_32.c for_each_zone(zone) {
zone 400 arch/x86/mm/numa_32.c if (!is_highmem(zone))
zone 403 arch/x86/mm/numa_32.c zone_start_pfn = zone->zone_start_pfn;
zone 404 arch/x86/mm/numa_32.c zone_end_pfn = zone_start_pfn + zone->spanned_pages;
zone 406 arch/x86/mm/numa_32.c nid = zone_to_nid(zone);
zone 408 arch/x86/mm/numa_32.c zone->name, nid, zone_start_pfn, zone_end_pfn);
zone 205 fs/adfs/map.c dm = asb->s_map + zone;
zone 206 fs/adfs/map.c zone = asb->s_map_size;
zone 207 fs/adfs/map.c dm_end = asb->s_map + zone;
zone 218 fs/adfs/map.c } while (--zone > 0);
zone 241 fs/adfs/map.c unsigned int zone;
zone 244 fs/adfs/map.c zone = asb->s_map_size;
zone 248 fs/adfs/map.c } while (--zone > 0);
zone 258 fs/adfs/map.c unsigned int zone, mapoff;
zone 266 fs/adfs/map.c zone = asb->s_map_size >> 1;
zone 268 fs/adfs/map.c zone = frag_id / asb->s_ids_per_zone;
zone 270 fs/adfs/map.c if (zone >= asb->s_map_size)
zone 277 fs/adfs/map.c result = scan_map(asb, zone, frag_id, mapoff);
zone 294 fs/adfs/map.c frag_id, zone, asb->s_map_size);
zone 290 fs/adfs/super.c int i, zone;
zone 307 fs/adfs/super.c for (zone = 0; zone < nzones; zone++, map_addr++) {
zone 308 fs/adfs/super.c dm[zone].dm_startbit = 0;
zone 309 fs/adfs/super.c dm[zone].dm_endbit = zone_size;
zone 310 fs/adfs/super.c dm[zone].dm_startblk = zone * zone_size - ADFS_DR_SIZE_BITS;
zone 311 fs/adfs/super.c dm[zone].dm_bh = sb_bread(sb, map_addr);
zone 313 fs/adfs/super.c if (!dm[zone].dm_bh) {
zone 320 fs/adfs/super.c i = zone - 1;
zone 333 fs/adfs/super.c while (--zone >= 0)
zone 334 fs/adfs/super.c brelse(dm[zone].dm_bh);
zone 363 fs/buffer.c struct zone *zone;
zone 372 fs/buffer.c &zone);
zone 373 fs/buffer.c if (zone)
zone 58 fs/minix/bitmap.c unsigned long bit, zone;
zone 64 fs/minix/bitmap.c zone = block - sbi->s_firstdatazone + 1;
zone 65 fs/minix/bitmap.c bit = zone & ((1<<k) - 1);
zone 66 fs/minix/bitmap.c zone >>= k;
zone 67 fs/minix/bitmap.c if (zone >= sbi->s_zmap_blocks) {
zone 71 fs/minix/bitmap.c bh = sbi->s_zmap[zone];
zone 167 fs/ntfs/lcnalloc.c zone == MFT_ZONE ? "MFT" : "DATA");
zone 174 fs/ntfs/lcnalloc.c BUG_ON(zone < FIRST_ZONE);
zone 175 fs/ntfs/lcnalloc.c BUG_ON(zone > LAST_ZONE);
zone 202 fs/ntfs/lcnalloc.c if (zone == DATA_ZONE)
zone 213 fs/ntfs/lcnalloc.c } else if (zone == DATA_ZONE && zone_start >= vol->mft_zone_start &&
zone 221 fs/ntfs/lcnalloc.c } else if (zone == MFT_ZONE && (zone_start < vol->mft_zone_start ||
zone 232 fs/ntfs/lcnalloc.c if (zone == MFT_ZONE) {
zone 696 fs/ntfs/lcnalloc.c if (zone == MFT_ZONE || mft_zone_size <= 0) {
zone 187 fs/sysv/balloc.c sysv_zone_t zone;
zone 190 fs/sysv/balloc.c zone = 0;
zone 191 fs/sysv/balloc.c while (n && (zone = blocks[--n]) != 0)
zone 193 fs/sysv/balloc.c if (zone == 0)
zone 196 fs/sysv/balloc.c block = fs32_to_cpu(sbi, zone);
zone 126 fs/xfs/linux-2.6/kmem.c ptr = kmem_cache_alloc(zone, lflags);
zone 142 fs/xfs/linux-2.6/kmem.c ptr = kmem_zone_alloc(zone, flags);
zone 144 fs/xfs/linux-2.6/kmem.c memset((char *)ptr, 0, kmem_cache_size(zone));
zone 90 fs/xfs/linux-2.6/kmem.h kmem_cache_free(zone, ptr);
zone 96 fs/xfs/linux-2.6/kmem.h if (zone)
zone 97 fs/xfs/linux-2.6/kmem.h kmem_cache_destroy(zone);
zone 59 fs/xfs/xfs_acl.h (zone) = kmem_zone_init(sizeof(xfs_acl_t), (name))
zone 60 fs/xfs/xfs_acl.h #define xfs_acl_zone_destroy(zone) kmem_zone_destroy(zone)
zone 26 fs/xfs/xfs_da_btree.h struct zone;
zone 9 include/linux/memory_hotplug.h struct zone;
zone 46 include/linux/memory_hotplug.h return read_seqbegin(&zone->span_seqlock);
zone 50 include/linux/memory_hotplug.h return read_seqretry(&zone->span_seqlock, iv);
zone 54 include/linux/memory_hotplug.h write_seqlock(&zone->span_seqlock);
zone 58 include/linux/memory_hotplug.h write_sequnlock(&zone->span_seqlock);
zone 62 include/linux/memory_hotplug.h seqlock_init(&zone->span_seqlock);
zone 511 include/linux/mm.h return zone->node;
zone 526 include/linux/mm.h static inline struct zone *page_zone(struct page *page)
zone 541 include/linux/mm.h page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
zone 559 include/linux/mm.h set_page_zone(page, zone);
zone 4 include/linux/mm_inline.h list_add(&page->lru, &zone->active_list);
zone 5 include/linux/mm_inline.h __inc_zone_state(zone, NR_ACTIVE);
zone 11 include/linux/mm_inline.h list_add(&page->lru, &zone->inactive_list);
zone 12 include/linux/mm_inline.h __inc_zone_state(zone, NR_INACTIVE);
zone 19 include/linux/mm_inline.h __dec_zone_state(zone, NR_ACTIVE);
zone 26 include/linux/mm_inline.h __dec_zone_state(zone, NR_INACTIVE);
zone 35 include/linux/mm_inline.h __dec_zone_state(zone, NR_ACTIVE);
zone 37 include/linux/mm_inline.h __dec_zone_state(zone, NR_INACTIVE);
zone 345 include/linux/mmzone.h set_bit(flag, &zone->flags);
zone 350 include/linux/mmzone.h return test_and_set_bit(flag, &zone->flags);
zone 355 include/linux/mmzone.h clear_bit(flag, &zone->flags);
zone 360 include/linux/mmzone.h return test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->flags);
zone 365 include/linux/mmzone.h return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
zone 370 include/linux/mmzone.h return test_bit(ZONE_OOM_LOCKED, &zone->flags);
zone 469 include/linux/mmzone.h struct zone *zone; /* Pointer to actual zone */
zone 524 include/linux/mmzone.h struct zone node_zones[MAX_NR_ZONES];
zone 589 include/linux/mmzone.h #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
zone 593 include/linux/mmzone.h return (!!zone->present_pages);
zone 631 include/linux/mmzone.h int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones;
zone 632 include/linux/mmzone.h return zone_off == ZONE_HIGHMEM * sizeof(*zone) ||
zone 633 include/linux/mmzone.h (zone_off == ZONE_MOVABLE * sizeof(*zone) &&
zone 642 include/linux/mmzone.h return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL;
zone 648 include/linux/mmzone.h return zone == zone->zone_pgdat->node_zones + ZONE_DMA32;
zone 657 include/linux/mmzone.h return zone == zone->zone_pgdat->node_zones + ZONE_DMA;
zone 703 include/linux/mmzone.h extern struct zone *next_zone(struct zone *zone);
zone 721 include/linux/mmzone.h for (zone = (first_online_pgdat())->node_zones; \
zone 722 include/linux/mmzone.h zone; \
zone 723 include/linux/mmzone.h zone = next_zone(zone))
zone 725 include/linux/mmzone.h static inline struct zone *zonelist_zone(struct zoneref *zoneref)
zone 727 include/linux/mmzone.h return zoneref->zone;
zone 739 include/linux/mmzone.h return zoneref->zone->node;
zone 781 include/linux/mmzone.h zone);
zone 796 include/linux/mmzone.h for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
zone 797 include/linux/mmzone.h zone; \
zone 798 include/linux/mmzone.h z = next_zones_zonelist(++z, highidx, nodemask, &zone)) \
zone 810 include/linux/mmzone.h for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
zone 95 include/linux/swap.h struct zone;
zone 66 include/linux/tipc.h return (zone << 24) | (cluster << 12) | node;
zone 127 include/linux/vmstat.h zone_idx(zone), delta)
zone 137 include/linux/vmstat.h atomic_long_add(x, &zone->vm_stat[item]);
zone 154 include/linux/vmstat.h long x = atomic_long_read(&zone->vm_stat[item]);
zone 171 include/linux/vmstat.h struct zone *zones = NODE_DATA(node)->node_zones;
zone 206 include/linux/vmstat.h memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
zone 235 include/linux/vmstat.h zone_page_state_add(delta, zone, item);
zone 240 include/linux/vmstat.h atomic_long_inc(&zone->vm_stat[item]);
zone 252 include/linux/vmstat.h atomic_long_dec(&zone->vm_stat[item]);
zone 183 include/sound/emux_synth.h struct snd_sf_zone *zone; /* Zone assigned to this note */
zone 1387 kernel/kexec.c VMCOREINFO_STRUCT_SIZE(zone);
zone 1403 kernel/kexec.c VMCOREINFO_OFFSET(zone, free_area);
zone 1404 kernel/kexec.c VMCOREINFO_OFFSET(zone, vm_stat);
zone 1405 kernel/kexec.c VMCOREINFO_OFFSET(zone, spanned_pages);
zone 1409 kernel/kexec.c VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
zone 338 kernel/power/snapshot.c struct zone *zone;
zone 347 kernel/power/snapshot.c for_each_zone(zone)
zone 348 kernel/power/snapshot.c if (populated_zone(zone))
zone 360 kernel/power/snapshot.c for_each_zone(zone) {
zone 363 kernel/power/snapshot.c if (!populated_zone(zone))
zone 366 kernel/power/snapshot.c zone_bm->start_pfn = zone->zone_start_pfn;
zone 367 kernel/power/snapshot.c zone_bm->end_pfn = zone->zone_start_pfn + zone->spanned_pages;
zone 369 kernel/power/snapshot.c nr = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
zone 376 kernel/power/snapshot.c nr = zone->spanned_pages;
zone 377 kernel/power/snapshot.c pfn = zone->zone_start_pfn;
zone 781 kernel/power/snapshot.c res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
zone 794 kernel/power/snapshot.c struct zone *zone;
zone 797 kernel/power/snapshot.c for_each_zone(zone)
zone 798 kernel/power/snapshot.c if (populated_zone(zone) && is_highmem(zone))
zone 799 kernel/power/snapshot.c cnt += zone_page_state(zone, NR_FREE_PAGES);
zone 837 kernel/power/snapshot.c struct zone *zone;
zone 840 kernel/power/snapshot.c for_each_zone(zone) {
zone 843 kernel/power/snapshot.c if (!is_highmem(zone))
zone 846 kernel/power/snapshot.c mark_free_pages(zone);
zone 847 kernel/power/snapshot.c max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
zone 848 kernel/power/snapshot.c for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
zone 895 kernel/power/snapshot.c struct zone *zone;
zone 899 kernel/power/snapshot.c for_each_zone(zone) {
zone 900 kernel/power/snapshot.c if (is_highmem(zone))
zone 903 kernel/power/snapshot.c mark_free_pages(zone);
zone 904 kernel/power/snapshot.c max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
zone 905 kernel/power/snapshot.c for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
zone 946 kernel/power/snapshot.c return is_highmem(zone) ?
zone 990 kernel/power/snapshot.c struct zone *zone;
zone 993 kernel/power/snapshot.c for_each_zone(zone) {
zone 996 kernel/power/snapshot.c mark_free_pages(zone);
zone 997 kernel/power/snapshot.c max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
zone 998 kernel/power/snapshot.c for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
zone 999 kernel/power/snapshot.c if (page_is_saveable(zone, pfn))
zone 1026 kernel/power/snapshot.c struct zone *zone;
zone 1029 kernel/power/snapshot.c for_each_zone(zone) {
zone 1030 kernel/power/snapshot.c max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
zone 1031 kernel/power/snapshot.c for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
zone 1078 kernel/power/snapshot.c struct zone *zone;
zone 1081 kernel/power/snapshot.c for_each_zone(zone) {
zone 1082 kernel/power/snapshot.c meta += snapshot_additional_pages(zone);
zone 1083 kernel/power/snapshot.c if (!is_highmem(zone))
zone 1084 kernel/power/snapshot.c free += zone_page_state(zone, NR_FREE_PAGES);
zone 1385 kernel/power/snapshot.c struct zone *zone;
zone 1389 kernel/power/snapshot.c for_each_zone(zone) {
zone 1390 kernel/power/snapshot.c max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
zone 1391 kernel/power/snapshot.c for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
zone 217 kernel/power/swsusp.c struct zone *zone;
zone 232 kernel/power/swsusp.c for_each_zone (zone)
zone 233 kernel/power/swsusp.c if (populated_zone(zone)) {
zone 234 kernel/power/swsusp.c tmp += snapshot_additional_pages(zone);
zone 235 kernel/power/swsusp.c if (is_highmem(zone)) {
zone 237 kernel/power/swsusp.c zone_page_state(zone, NR_FREE_PAGES);
zone 239 kernel/power/swsusp.c tmp -= zone_page_state(zone, NR_FREE_PAGES);
zone 240 kernel/power/swsusp.c tmp += zone->lowmem_reserve[ZONE_NORMAL];
zone 248 kernel/wait.c const struct zone *zone = page_zone(virt_to_page(word));
zone 251 kernel/wait.c return &zone->wait_table[hash_long(val, zone->wait_table_bits)];
zone 531 mm/filemap.c const struct zone *zone = page_zone(page);
zone 533 mm/filemap.c return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
zone 417 mm/hugetlb.c struct zone *zone;
zone 433 mm/hugetlb.c for_each_zone_zonelist_nodemask(zone, z, zonelist,
zone 435 mm/hugetlb.c nid = zone_to_nid(zone);
zone 436 mm/hugetlb.c if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) &&
zone 451 mm/memcontrol.c int nid = zone->zone_pgdat->node_id;
zone 452 mm/memcontrol.c int zid = zone_idx(zone);
zone 463 mm/memcontrol.c int nid = zone->zone_pgdat->node_id;
zone 464 mm/memcontrol.c int zid = zone_idx(zone);
zone 1065 mm/memcontrol.c int zone, tmp = node;
zone 1083 mm/memcontrol.c for (zone = 0; zone < MAX_NR_ZONES; zone++) {
zone 1084 mm/memcontrol.c mz = &pn->zoneinfo[zone];
zone 131 mm/memory_hotplug.c struct zone *zone;
zone 139 mm/memory_hotplug.c zone = &pgdat->node_zones[0];
zone 140 mm/memory_hotplug.c for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {
zone 141 mm/memory_hotplug.c if (zone->wait_table) {
zone 142 mm/memory_hotplug.c nr_pages = zone->wait_table_hash_nr_entries
zone 145 mm/memory_hotplug.c page = virt_to_page(zone->wait_table);
zone 167 mm/memory_hotplug.c zone_span_writelock(zone);
zone 169 mm/memory_hotplug.c old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
zone 170 mm/memory_hotplug.c if (start_pfn < zone->zone_start_pfn)
zone 171 mm/memory_hotplug.c zone->zone_start_pfn = start_pfn;
zone 173 mm/memory_hotplug.c zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
zone 174 mm/memory_hotplug.c zone->zone_start_pfn;
zone 176 mm/memory_hotplug.c zone_span_writeunlock(zone);
zone 194 mm/memory_hotplug.c struct pglist_data *pgdat = zone->zone_pgdat;
zone 200 mm/memory_hotplug.c zone_type = zone - pgdat->node_zones;
zone 201 mm/memory_hotplug.c if (!zone->wait_table) {
zone 204 mm/memory_hotplug.c ret = init_currently_empty_zone(zone, phys_start_pfn,
zone 209 mm/memory_hotplug.c pgdat_resize_lock(zone->zone_pgdat, &flags);
zone 210 mm/memory_hotplug.c grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages);
zone 211 mm/memory_hotplug.c grow_pgdat_span(zone->zone_pgdat, phys_start_pfn,
zone 213 mm/memory_hotplug.c pgdat_resize_unlock(zone->zone_pgdat, &flags);
zone 227 mm/memory_hotplug.c ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
zone 232 mm/memory_hotplug.c ret = __add_zone(zone, phys_start_pfn);
zone 253 mm/memory_hotplug.c struct pglist_data *pgdat = zone->zone_pgdat;
zone 264 mm/memory_hotplug.c sparse_remove_one_section(zone, ms);
zone 287 mm/memory_hotplug.c err = __add_section(zone, i << PFN_SECTION_SHIFT);
zone 331 mm/memory_hotplug.c ret = __remove_section(zone, __pfn_to_section(pfn));
zone 378 mm/memory_hotplug.c struct zone *zone;
zone 403 mm/memory_hotplug.c zone = page_zone(pfn_to_page(pfn));
zone 409 mm/memory_hotplug.c if (!populated_zone(zone))
zone 421 mm/memory_hotplug.c zone->present_pages += onlined_pages;
zone 422 mm/memory_hotplug.c zone->zone_pgdat->node_present_pages += onlined_pages;
zone 426 mm/memory_hotplug.c kswapd_run(zone_to_nid(zone));
zone 427 mm/memory_hotplug.c node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
zone 591 mm/memory_hotplug.c struct zone *zone = NULL;
zone 604 mm/memory_hotplug.c if (zone && page_zone(page) != zone)
zone 606 mm/memory_hotplug.c zone = page_zone(page);
zone 744 mm/memory_hotplug.c struct zone *zone;
zone 758 mm/memory_hotplug.c zone = page_zone(pfn_to_page(start_pfn));
zone 759 mm/memory_hotplug.c node = zone_to_nid(zone);
zone 832 mm/memory_hotplug.c zone->present_pages -= offlined_pages;
zone 833 mm/memory_hotplug.c zone->zone_pgdat->node_present_pages -= offlined_pages;
zone 131 mm/mempolicy.c struct zone *z;
zone 1402 mm/mempolicy.c struct zone *zone;
zone 1407 mm/mempolicy.c &zone);
zone 1408 mm/mempolicy.c return zone->node;
zone 52 mm/migrate.c struct zone *zone = page_zone(page);
zone 54 mm/migrate.c spin_lock_irq(&zone->lru_lock);
zone 59 mm/migrate.c del_page_from_active_list(zone, page);
zone 61 mm/migrate.c del_page_from_inactive_list(zone, page);
zone 64 mm/migrate.c spin_unlock_irq(&zone->lru_lock);
zone 31 mm/mm_init.c struct zone *zone;
zone 43 mm/mm_init.c zone = &pgdat->node_zones[zoneid];
zone 44 mm/mm_init.c if (!populated_zone(zone))
zone 50 mm/mm_init.c zone->name);
zone 53 mm/mm_init.c for_each_zone_zonelist(zone, z, zonelist, zoneid) {
zone 56 mm/mm_init.c zone->node, zone->name);
zone 58 mm/mm_init.c printk(KERN_CONT "0:%s ", zone->name);
zone 128 mm/mm_init.c BUG_ON(page_zonenum(page) != zone);
zone 29 mm/mmzone.c struct zone *next_zone(struct zone *zone)
zone 31 mm/mmzone.c pg_data_t *pgdat = zone->zone_pgdat;
zone 33 mm/mmzone.c if (zone < pgdat->node_zones + MAX_NR_ZONES - 1)
zone 34 mm/mmzone.c zone++;
zone 38 mm/mmzone.c zone = pgdat->node_zones;
zone 40 mm/mmzone.c zone = NULL;
zone 42 mm/mmzone.c return zone;
zone 69 mm/mmzone.c (z->zone && !zref_in_nodemask(z, nodes)))
zone 72 mm/mmzone.c *zone = zonelist_zone(z);
zone 179 mm/oom_kill.c struct zone *zone;
zone 184 mm/oom_kill.c for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
zone 185 mm/oom_kill.c if (cpuset_zone_allowed_softwall(zone, gfp_mask))
zone 186 mm/oom_kill.c node_clear(zone_to_nid(zone), nodes);
zone 469 mm/oom_kill.c struct zone *zone;
zone 473 mm/oom_kill.c for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
zone 474 mm/oom_kill.c if (zone_is_oom_locked(zone)) {
zone 480 mm/oom_kill.c for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
zone 486 mm/oom_kill.c zone_set_flag(zone, ZONE_OOM_LOCKED);
zone 502 mm/oom_kill.c struct zone *zone;
zone 505 mm/oom_kill.c for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
zone 506 mm/oom_kill.c zone_clear_flag(zone, ZONE_OOM_LOCKED);
zone 329 mm/page-writeback.c struct zone *z =
zone 186 mm/page_alloc.c seq = zone_span_seqbegin(zone);
zone 187 mm/page_alloc.c if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
zone 189 mm/page_alloc.c else if (pfn < zone->zone_start_pfn)
zone 191 mm/page_alloc.c } while (zone_span_seqretry(zone, seq));
zone 200 mm/page_alloc.c if (zone != page_zone(page))
zone 210 mm/page_alloc.c if (page_outside_zone_boundaries(zone, page))
zone 212 mm/page_alloc.c if (!page_is_consistent(zone, page))
zone 429 mm/page_alloc.c VM_BUG_ON(bad_range(zone, page));
zone 431 mm/page_alloc.c __mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
zone 442 mm/page_alloc.c zone->free_area[order].nr_free--;
zone 451 mm/page_alloc.c &zone->free_area[order].free_list[migratetype]);
zone 452 mm/page_alloc.c zone->free_area[order].nr_free++;
zone 487 mm/page_alloc.c spin_lock(&zone->lock);
zone 488 mm/page_alloc.c zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
zone 489 mm/page_alloc.c zone->pages_scanned = 0;
zone 497 mm/page_alloc.c __free_one_page(page, zone, order);
zone 499 mm/page_alloc.c spin_unlock(&zone->lock);
zone 504 mm/page_alloc.c spin_lock(&zone->lock);
zone 505 mm/page_alloc.c zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
zone 506 mm/page_alloc.c zone->pages_scanned = 0;
zone 507 mm/page_alloc.c __free_one_page(page, zone, order);
zone 508 mm/page_alloc.c spin_unlock(&zone->lock);
zone 589 mm/page_alloc.c VM_BUG_ON(bad_range(zone, &page[size]));
zone 646 mm/page_alloc.c area = &(zone->free_area[current_order]);
zone 655 mm/page_alloc.c __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
zone 656 mm/page_alloc.c expand(zone, page, order, current_order, area, migratetype);
zone 701 mm/page_alloc.c VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
zone 716 mm/page_alloc.c &zone->free_area[order].free_list[migratetype]);
zone 737 mm/page_alloc.c if (start_pfn < zone->zone_start_pfn)
zone 739 mm/page_alloc.c if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
zone 742 mm/page_alloc.c return move_freepages(zone, start_page, end_page, migratetype);
zone 764 mm/page_alloc.c area = &(zone->free_area[current_order]);
zone 781 mm/page_alloc.c pages = move_freepages_block(zone, page,
zone 795 mm/page_alloc.c __mod_zone_page_state(zone, NR_FREE_PAGES,
zone 802 mm/page_alloc.c expand(zone, page, order, current_order, area, migratetype);
zone 808 mm/page_alloc.c return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
zone 820 mm/page_alloc.c page = __rmqueue_smallest(zone, order, migratetype);
zone 823 mm/page_alloc.c page = __rmqueue_fallback(zone, order, migratetype);
zone 839 mm/page_alloc.c spin_lock(&zone->lock);
zone 841 mm/page_alloc.c struct page *page = __rmqueue(zone, order, migratetype);
zone 858 mm/page_alloc.c spin_unlock(&zone->lock);
zone 881 mm/page_alloc.c free_pages_bulk(zone, to_drain, &pcp->list, 0);
zone 897 mm/page_alloc.c struct zone *zone;
zone 899 mm/page_alloc.c for_each_zone(zone) {
zone 903 mm/page_alloc.c if (!populated_zone(zone))
zone 906 mm/page_alloc.c pset = zone_pcp(zone, cpu);
zone 910 mm/page_alloc.c free_pages_bulk(zone, pcp->count, &pcp->list, 0);
zone 941 mm/page_alloc.c if (!zone->spanned_pages)
zone 944 mm/page_alloc.c spin_lock_irqsave(&zone->lock, flags);
zone 946 mm/page_alloc.c max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
zone 947 mm/page_alloc.c for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
zone 956 mm/page_alloc.c list_for_each(curr, &zone->free_area[order].free_list[t]) {
zone 964 mm/page_alloc.c spin_unlock_irqrestore(&zone->lock, flags);
zone 973 mm/page_alloc.c struct zone *zone = page_zone(page);
zone 989 mm/page_alloc.c pcp = &zone_pcp(zone, get_cpu())->pcp;
zone 999 mm/page_alloc.c free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
zone 1053 mm/page_alloc.c pcp = &zone_pcp(zone, cpu)->pcp;
zone 1056 mm/page_alloc.c pcp->count = rmqueue_bulk(zone, 0,
zone 1075 mm/page_alloc.c pcp->count += rmqueue_bulk(zone, 0,
zone 1083 mm/page_alloc.c spin_lock_irqsave(&zone->lock, flags);
zone 1084 mm/page_alloc.c page = __rmqueue(zone, order, migratetype);
zone 1085 mm/page_alloc.c spin_unlock(&zone->lock);
zone 1090 mm/page_alloc.c __count_zone_vm_events(PGALLOC, zone, 1 << order);
zone 1091 mm/page_alloc.c zone_statistics(preferred_zone, zone);
zone 1095 mm/page_alloc.c VM_BUG_ON(bad_range(zone, page));
zone 1372 mm/page_alloc.c struct zone *zone, *preferred_zone;
zone 1389 mm/page_alloc.c for_each_zone_zonelist_nodemask(zone, z, zonelist,
zone 1395 mm/page_alloc.c !cpuset_zone_allowed_softwall(zone, gfp_mask))
zone 1401 mm/page_alloc.c mark = zone->pages_min;
zone 1403 mm/page_alloc.c mark = zone->pages_low;
zone 1405 mm/page_alloc.c mark = zone->pages_high;
zone 1406 mm/page_alloc.c if (!zone_watermark_ok(zone, order, mark,
zone 1409 mm/page_alloc.c !zone_reclaim(zone, gfp_mask, order))
zone 1414 mm/page_alloc.c page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask);
zone 1447 mm/page_alloc.c struct zone *zone;
zone 1464 mm/page_alloc.c if (unlikely(!z->zone)) {
zone 1488 mm/page_alloc.c for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
zone 1489 mm/page_alloc.c wakeup_kswapd(zone, order);
zone 1763 mm/page_alloc.c struct zone *zone;
zone 1770 mm/page_alloc.c for_each_zone_zonelist(zone, z, zonelist, offset) {
zone 1771 mm/page_alloc.c unsigned long size = zone->present_pages;
zone 1772 mm/page_alloc.c unsigned long high = zone->pages_high;
zone 1800 mm/page_alloc.c printk("Node %d ", zone_to_nid(zone));
zone 1845 mm/page_alloc.c struct zone *zone;
zone 1847 mm/page_alloc.c for_each_zone(zone) {
zone 1848 mm/page_alloc.c if (!populated_zone(zone))
zone 1851 mm/page_alloc.c show_node(zone);
zone 1852 mm/page_alloc.c printk("%s per-cpu:\n", zone->name);
zone 1857 mm/page_alloc.c pageset = zone_pcp(zone, cpu);
zone 1879 mm/page_alloc.c for_each_zone(zone) {
zone 1882 mm/page_alloc.c if (!populated_zone(zone))
zone 1885 mm/page_alloc.c show_node(zone);
zone 1897 mm/page_alloc.c zone->name,
zone 1898 mm/page_alloc.c K(zone_page_state(zone, NR_FREE_PAGES)),
zone 1899 mm/page_alloc.c K(zone->pages_min),
zone 1900 mm/page_alloc.c K(zone->pages_low),
zone 1901 mm/page_alloc.c K(zone->pages_high),
zone 1902 mm/page_alloc.c K(zone_page_state(zone, NR_ACTIVE)),
zone 1903 mm/page_alloc.c K(zone_page_state(zone, NR_INACTIVE)),
zone 1904 mm/page_alloc.c K(zone->present_pages),
zone 1905 mm/page_alloc.c zone->pages_scanned,
zone 1906 mm/page_alloc.c (zone_is_all_unreclaimable(zone) ? "yes" : "no")
zone 1910 mm/page_alloc.c printk(" %lu", zone->lowmem_reserve[i]);
zone 1914 mm/page_alloc.c for_each_zone(zone) {
zone 1917 mm/page_alloc.c if (!populated_zone(zone))
zone 1920 mm/page_alloc.c show_node(zone);
zone 1921 mm/page_alloc.c printk("%s: ", zone->name);
zone 1923 mm/page_alloc.c spin_lock_irqsave(&zone->lock, flags);
zone 1925 mm/page_alloc.c nr[order] = zone->free_area[order].nr_free;
zone 1928 mm/page_alloc.c spin_unlock_irqrestore(&zone->lock, flags);
zone 1941 mm/page_alloc.c zoneref->zone = zone;
zone 1942 mm/page_alloc.c zoneref->zone_idx = zone_idx(zone);
zone 1953 mm/page_alloc.c struct zone *zone;
zone 1960 mm/page_alloc.c zone = pgdat->node_zones + zone_type;
zone 1961 mm/page_alloc.c if (populated_zone(zone)) {
zone 1962 mm/page_alloc.c zoneref_set_zone(zone,
zone 2139 mm/page_alloc.c for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
zone 2143 mm/page_alloc.c zonelist->_zonerefs[j].zone = NULL;
zone 2157 mm/page_alloc.c zonelist->_zonerefs[j].zone = NULL;
zone 2173 mm/page_alloc.c struct zone *z;
zone 2189 mm/page_alloc.c zonelist->_zonerefs[pos].zone = NULL;
zone 2197 mm/page_alloc.c struct zone *z;
zone 2267 mm/page_alloc.c zonelist->_zonerefs[0].zone = NULL;
zone 2325 mm/page_alloc.c for (z = zonelist->_zonerefs; z->zone; z++)
zone 2369 mm/page_alloc.c zonelist->_zonerefs[j].zone = NULL;
zone 2515 mm/page_alloc.c start_pfn = zone->zone_start_pfn;
zone 2516 mm/page_alloc.c end_pfn = start_pfn + zone->spanned_pages;
zone 2517 mm/page_alloc.c reserve = roundup(zone->pages_min, pageblock_nr_pages) >>
zone 2526 mm/page_alloc.c if (page_to_nid(page) != zone_to_nid(zone))
zone 2544 mm/page_alloc.c move_freepages_block(zone, page, MIGRATE_RESERVE);
zone 2555 mm/page_alloc.c move_freepages_block(zone, page, MIGRATE_MOVABLE);
zone 2571 mm/page_alloc.c struct zone *z;
zone 2573 mm/page_alloc.c z = &NODE_DATA(nid)->node_zones[zone];
zone 2587 mm/page_alloc.c set_page_links(page, zone, nid, pfn);
zone 2588 mm/page_alloc.c mminit_verify_page_links(page, zone, nid, pfn);
zone 2614 mm/page_alloc.c if (!is_highmem_idx(zone))
zone 2624 mm/page_alloc.c INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
zone 2625 mm/page_alloc.c zone->free_area[order].nr_free = 0;
zone 2631 mm/page_alloc.c memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
zone 2644 mm/page_alloc.c batch = zone->present_pages / 1024;
zone 2723 mm/page_alloc.c struct zone *zone, *dzone;
zone 2728 mm/page_alloc.c for_each_zone(zone) {
zone 2730 mm/page_alloc.c if (!populated_zone(zone))
zone 2733 mm/page_alloc.c zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
zone 2735 mm/page_alloc.c if (!zone_pcp(zone, cpu))
zone 2738 mm/page_alloc.c setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
zone 2741 mm/page_alloc.c setup_pagelist_highmark(zone_pcp(zone, cpu),
zone 2742 mm/page_alloc.c (zone->present_pages / percpu_pagelist_fraction));
zone 2750 mm/page_alloc.c if (dzone == zone)
zone 2760 mm/page_alloc.c struct zone *zone;
zone 2762 mm/page_alloc.c for_each_zone(zone) {
zone 2763 mm/page_alloc.c struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
zone 2768 mm/page_alloc.c zone_pcp(zone, cpu) = NULL;
zone 2819 mm/page_alloc.c struct pglist_data *pgdat = zone->zone_pgdat;
zone 2826 mm/page_alloc.c zone->wait_table_hash_nr_entries =
zone 2828 mm/page_alloc.c zone->wait_table_bits =
zone 2829 mm/page_alloc.c wait_table_bits(zone->wait_table_hash_nr_entries);
zone 2830 mm/page_alloc.c alloc_size = zone->wait_table_hash_nr_entries
zone 2834 mm/page_alloc.c zone->wait_table = (wait_queue_head_t *)
zone 2847 mm/page_alloc.c zone->wait_table = vmalloc(alloc_size);
zone 2849 mm/page_alloc.c if (!zone->wait_table)
zone 2852 mm/page_alloc.c for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
zone 2853 mm/page_alloc.c init_waitqueue_head(zone->wait_table + i);
zone 2861 mm/page_alloc.c unsigned long batch = zone_batchsize(zone);
zone 2866 mm/page_alloc.c zone_pcp(zone, cpu) = &boot_pageset[cpu];
zone 2869 mm/page_alloc.c setup_pageset(zone_pcp(zone,cpu), batch);
zone 2872 mm/page_alloc.c if (zone->present_pages)
zone 2874 mm/page_alloc.c zone->name, zone->present_pages, batch);
zone 2882 mm/page_alloc.c struct pglist_data *pgdat = zone->zone_pgdat;
zone 2884 mm/page_alloc.c ret = zone_wait_table_init(zone, size);
zone 2887 mm/page_alloc.c pgdat->nr_zones = zone_idx(zone) + 1;
zone 2889 mm/page_alloc.c zone->zone_start_pfn = zone_start_pfn;
zone 2894 mm/page_alloc.c (unsigned long)zone_idx(zone),
zone 2897 mm/page_alloc.c zone_init_free_lists(zone);
zone 3344 mm/page_alloc.c zone->pageblock_flags = NULL;
zone 3346 mm/page_alloc.c zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
zone 3347 mm/page_alloc.c memset(zone->pageblock_flags, 0, usemapsize);
zone 3415 mm/page_alloc.c struct zone *zone = pgdat->node_zones + j;
zone 3451 mm/page_alloc.c zone->spanned_pages = size;
zone 3452 mm/page_alloc.c zone->present_pages = realsize;
zone 3454 mm/page_alloc.c zone->node = nid;
zone 3455 mm/page_alloc.c zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
zone 3457 mm/page_alloc.c zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
zone 3459 mm/page_alloc.c zone->name = zone_names[j];
zone 3460 mm/page_alloc.c spin_lock_init(&zone->lock);
zone 3461 mm/page_alloc.c spin_lock_init(&zone->lru_lock);
zone 3462 mm/page_alloc.c zone_seqlock_init(zone);
zone 3463 mm/page_alloc.c zone->zone_pgdat = pgdat;
zone 3465 mm/page_alloc.c zone->prev_priority = DEF_PRIORITY;
zone 3467 mm/page_alloc.c zone_pcp_init(zone);
zone 3468 mm/page_alloc.c INIT_LIST_HEAD(&zone->active_list);
zone 3469 mm/page_alloc.c INIT_LIST_HEAD(&zone->inactive_list);
zone 3470 mm/page_alloc.c zone->nr_scan_active = 0;
zone 3471 mm/page_alloc.c zone->nr_scan_inactive = 0;
zone 3472 mm/page_alloc.c zap_zone_vm_stats(zone);
zone 3473 mm/page_alloc.c zone->flags = 0;
zone 3478 mm/page_alloc.c setup_usemap(pgdat, zone, size);
zone 3479 mm/page_alloc.c ret = init_currently_empty_zone(zone, zone_start_pfn,
zone 3932 mm/page_alloc.c struct zone *zone = &pgdat->node_zones[zone_type];
zone 3933 mm/page_alloc.c if (zone->present_pages)
zone 3934 mm/page_alloc.c node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
zone 4132 mm/page_alloc.c struct zone *zone = pgdat->node_zones + i;
zone 4137 mm/page_alloc.c if (zone->lowmem_reserve[j] > max)
zone 4138 mm/page_alloc.c max = zone->lowmem_reserve[j];
zone 4142 mm/page_alloc.c max += zone->pages_high;
zone 4144 mm/page_alloc.c if (max > zone->present_pages)
zone 4145 mm/page_alloc.c max = zone->present_pages;
zone 4165 mm/page_alloc.c struct zone *zone = pgdat->node_zones + j;
zone 4166 mm/page_alloc.c unsigned long present_pages = zone->present_pages;
zone 4168 mm/page_alloc.c zone->lowmem_reserve[j] = 0;
zone 4172 mm/page_alloc.c struct zone *lower_zone;
zone 4201 mm/page_alloc.c struct zone *zone;
zone 4205 mm/page_alloc.c for_each_zone(zone) {
zone 4206 mm/page_alloc.c if (!is_highmem(zone))
zone 4207 mm/page_alloc.c lowmem_pages += zone->present_pages;
zone 4210 mm/page_alloc.c for_each_zone(zone) {
zone 4213 mm/page_alloc.c spin_lock_irqsave(&zone->lru_lock, flags);
zone 4214 mm/page_alloc.c tmp = (u64)pages_min * zone->present_pages;
zone 4216 mm/page_alloc.c if (is_highmem(zone)) {
zone 4228 mm/page_alloc.c min_pages = zone->present_pages / 1024;
zone 4233 mm/page_alloc.c zone->pages_min = min_pages;
zone 4239 mm/page_alloc.c zone->pages_min = tmp;
zone 4242 mm/page_alloc.c zone->pages_low = zone->pages_min + (tmp >> 2);
zone 4243 mm/page_alloc.c zone->pages_high = zone->pages_min + (tmp >> 1);
zone 4244 mm/page_alloc.c setup_zone_migrate_reserve(zone);
zone 4245 mm/page_alloc.c spin_unlock_irqrestore(&zone->lru_lock, flags);
zone 4311 mm/page_alloc.c struct zone *zone;
zone 4318 mm/page_alloc.c for_each_zone(zone)
zone 4319 mm/page_alloc.c zone->min_unmapped_pages = (zone->present_pages *
zone 4327 mm/page_alloc.c struct zone *zone;
zone 4334 mm/page_alloc.c for_each_zone(zone)
zone 4335 mm/page_alloc.c zone->min_slab_pages = (zone->present_pages *
zone 4367 mm/page_alloc.c struct zone *zone;
zone 4374 mm/page_alloc.c for_each_zone(zone) {
zone 4377 mm/page_alloc.c high = zone->present_pages / percpu_pagelist_fraction;
zone 4378 mm/page_alloc.c setup_pagelist_highmark(zone_pcp(zone, cpu), high);
zone 4511 mm/page_alloc.c return zone->pageblock_flags;
zone 4521 mm/page_alloc.c pfn = pfn - zone->zone_start_pfn;
zone 4536 mm/page_alloc.c struct zone *zone;
zone 4542 mm/page_alloc.c zone = page_zone(page);
zone 4544 mm/page_alloc.c bitmap = get_pageblock_bitmap(zone, pfn);
zone 4545 mm/page_alloc.c bitidx = pfn_to_bitidx(zone, pfn);
zone 4564 mm/page_alloc.c struct zone *zone;
zone 4569 mm/page_alloc.c zone = page_zone(page);
zone 4571 mm/page_alloc.c bitmap = get_pageblock_bitmap(zone, pfn);
zone 4572 mm/page_alloc.c bitidx = pfn_to_bitidx(zone, pfn);
zone 4573 mm/page_alloc.c VM_BUG_ON(pfn < zone->zone_start_pfn);
zone 4574 mm/page_alloc.c VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
zone 4591 mm/page_alloc.c struct zone *zone;
zone 4595 mm/page_alloc.c zone = page_zone(page);
zone 4596 mm/page_alloc.c spin_lock_irqsave(&zone->lock, flags);
zone 4603 mm/page_alloc.c move_freepages_block(zone, page, MIGRATE_ISOLATE);
zone 4606 mm/page_alloc.c spin_unlock_irqrestore(&zone->lock, flags);
zone 4614 mm/page_alloc.c struct zone *zone;
zone 4616 mm/page_alloc.c zone = page_zone(page);
zone 4617 mm/page_alloc.c spin_lock_irqsave(&zone->lock, flags);
zone 4621 mm/page_alloc.c move_freepages_block(zone, page, MIGRATE_MOVABLE);
zone 4623 mm/page_alloc.c spin_unlock_irqrestore(&zone->lock, flags);
zone 4634 mm/page_alloc.c struct zone *zone;
zone 4644 mm/page_alloc.c zone = page_zone(pfn_to_page(pfn));
zone 4645 mm/page_alloc.c spin_lock_irqsave(&zone->lock, flags);
zone 4662 mm/page_alloc.c zone->free_area[order].nr_free--;
zone 4663 mm/page_alloc.c __mod_zone_page_state(zone, NR_FREE_PAGES,
zone 4669 mm/page_alloc.c spin_unlock_irqrestore(&zone->lock, flags);
zone 119 mm/page_isolation.c struct zone *zone;
zone 136 mm/page_isolation.c zone = page_zone(pfn_to_page(pfn));
zone 137 mm/page_isolation.c spin_lock_irqsave(&zone->lock, flags);
zone 139 mm/page_isolation.c spin_unlock_irqrestore(&zone->lock, flags);
zone 30 mm/quicklist.c struct zone *zones = NODE_DATA(node)->node_zones;
zone 3236 mm/slab.c struct zone *zone;
zone 3252 mm/slab.c for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
zone 3253 mm/slab.c nid = zone_to_nid(zone);
zone 3255 mm/slab.c if (cpuset_zone_allowed_hardwall(zone, flags) &&
zone 1299 mm/slub.c struct zone *zone;
zone 1326 mm/slub.c for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
zone 1329 mm/slub.c n = get_node(s, zone_to_nid(zone));
zone 1331 mm/slub.c if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
zone 577 mm/sparse.c struct pglist_data *pgdat = zone->zone_pgdat;
zone 49 mm/swap.c struct zone *zone = page_zone(page);
zone 51 mm/swap.c spin_lock_irqsave(&zone->lru_lock, flags);
zone 54 mm/swap.c del_page_from_lru(zone, page);
zone 55 mm/swap.c spin_unlock_irqrestore(&zone->lru_lock, flags);
zone 107 mm/swap.c struct zone *zone = NULL;
zone 111 mm/swap.c struct zone *pagezone = page_zone(page);
zone 113 mm/swap.c if (pagezone != zone) {
zone 114 mm/swap.c if (zone)
zone 115 mm/swap.c spin_unlock(&zone->lru_lock);
zone 116 mm/swap.c zone = pagezone;
zone 117 mm/swap.c spin_lock(&zone->lru_lock);
zone 120 mm/swap.c list_move_tail(&page->lru, &zone->inactive_list);
zone 124 mm/swap.c if (zone)
zone 125 mm/swap.c spin_unlock(&zone->lru_lock);
zone 157 mm/swap.c struct zone *zone = page_zone(page);
zone 159 mm/swap.c spin_lock_irq(&zone->lru_lock);
zone 161 mm/swap.c del_page_from_inactive_list(zone, page);
zone 163 mm/swap.c add_page_to_active_list(zone, page);
zone 167 mm/swap.c spin_unlock_irq(&zone->lru_lock);
zone 290 mm/swap.c struct zone *zone = NULL;
zone 298 mm/swap.c if (zone) {
zone 299 mm/swap.c spin_unlock_irqrestore(&zone->lru_lock, flags);
zone 300 mm/swap.c zone = NULL;
zone 310 mm/swap.c struct zone *pagezone = page_zone(page);
zone 311 mm/swap.c if (pagezone != zone) {
zone 312 mm/swap.c if (zone)
zone 313 mm/swap.c spin_unlock_irqrestore(&zone->lru_lock,
zone 315 mm/swap.c zone = pagezone;
zone 316 mm/swap.c spin_lock_irqsave(&zone->lru_lock, flags);
zone 320 mm/swap.c del_page_from_lru(zone, page);
zone 324 mm/swap.c if (zone) {
zone 325 mm/swap.c spin_unlock_irqrestore(&zone->lru_lock, flags);
zone 326 mm/swap.c zone = NULL;
zone 332 mm/swap.c if (zone)
zone 333 mm/swap.c spin_unlock_irqrestore(&zone->lru_lock, flags);
zone 386 mm/swap.c struct zone *zone = NULL;
zone 390 mm/swap.c struct zone *pagezone = page_zone(page);
zone 392 mm/swap.c if (pagezone != zone) {
zone 393 mm/swap.c if (zone)
zone 394 mm/swap.c spin_unlock_irq(&zone->lru_lock);
zone 395 mm/swap.c zone = pagezone;
zone 396 mm/swap.c spin_lock_irq(&zone->lru_lock);
zone 400 mm/swap.c add_page_to_inactive_list(zone, page);
zone 402 mm/swap.c if (zone)
zone 403 mm/swap.c spin_unlock_irq(&zone->lru_lock);
zone 413 mm/swap.c struct zone *zone = NULL;
zone 417 mm/swap.c struct zone *pagezone = page_zone(page);
zone 419 mm/swap.c if (pagezone != zone) {
zone 420 mm/swap.c if (zone)
zone 421 mm/swap.c spin_unlock_irq(&zone->lru_lock);
zone 422 mm/swap.c zone = pagezone;
zone 423 mm/swap.c spin_lock_irq(&zone->lru_lock);
zone 429 mm/swap.c add_page_to_active_list(zone, page);
zone 431 mm/swap.c if (zone)
zone 432 mm/swap.c spin_unlock_irq(&zone->lru_lock);
zone 80 mm/vmscan.c struct zone *z, struct mem_cgroup *mem_cont,
zone 862 mm/vmscan.c spin_lock_irq(&zone->lru_lock);
zone 874 mm/vmscan.c zone, sc->mem_cgroup, 0);
zone 878 mm/vmscan.c __mod_zone_page_state(zone, NR_ACTIVE, -nr_active);
zone 879 mm/vmscan.c __mod_zone_page_state(zone, NR_INACTIVE,
zone 882 mm/vmscan.c zone->pages_scanned += nr_scan;
zone 883 mm/vmscan.c spin_unlock_irq(&zone->lru_lock);
zone 912 mm/vmscan.c __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
zone 915 mm/vmscan.c __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
zone 917 mm/vmscan.c __count_zone_vm_events(PGSTEAL, zone, nr_freed);
zone 922 mm/vmscan.c spin_lock(&zone->lru_lock);
zone 932 mm/vmscan.c add_page_to_active_list(zone, page);
zone 934 mm/vmscan.c add_page_to_inactive_list(zone, page);
zone 936 mm/vmscan.c spin_unlock_irq(&zone->lru_lock);
zone 938 mm/vmscan.c spin_lock_irq(&zone->lru_lock);
zone 942 mm/vmscan.c spin_unlock(&zone->lru_lock);
zone 959 mm/vmscan.c if (priority < zone->prev_priority)
zone 960 mm/vmscan.c zone->prev_priority = priority;
zone 965 mm/vmscan.c return zone->pages_scanned >= (zone_page_state(zone, NR_ACTIVE)
zone 966 mm/vmscan.c + zone_page_state(zone, NR_INACTIVE))*3;
zone 983 mm/vmscan.c if (scan_global_lru(sc) && zone_is_near_oom(zone))
zone 990 mm/vmscan.c prev_priority = zone->prev_priority;
zone 1036 mm/vmscan.c imbalance = zone_page_state(zone, NR_ACTIVE);
zone 1037 mm/vmscan.c imbalance /= zone_page_state(zone, NR_INACTIVE) + 1;
zone 1109 mm/vmscan.c reclaim_mapped = calc_reclaim_mapped(sc, zone, priority);
zone 1112 mm/vmscan.c spin_lock_irq(&zone->lru_lock);
zone 1114 mm/vmscan.c ISOLATE_ACTIVE, zone,
zone 1121 mm/vmscan.c zone->pages_scanned += pgscanned;
zone 1123 mm/vmscan.c __mod_zone_page_state(zone, NR_ACTIVE, -pgmoved);
zone 1124 mm/vmscan.c spin_unlock_irq(&zone->lru_lock);
zone 1143 mm/vmscan.c spin_lock_irq(&zone->lru_lock);
zone 1152 mm/vmscan.c list_move(&page->lru, &zone->inactive_list);
zone 1156 mm/vmscan.c __mod_zone_page_state(zone, NR_INACTIVE, pgmoved);
zone 1157 mm/vmscan.c spin_unlock_irq(&zone->lru_lock);
zone 1163 mm/vmscan.c spin_lock_irq(&zone->lru_lock);
zone 1166 mm/vmscan.c __mod_zone_page_state(zone, NR_INACTIVE, pgmoved);
zone 1169 mm/vmscan.c spin_unlock_irq(&zone->lru_lock);
zone 1171 mm/vmscan.c spin_lock_irq(&zone->lru_lock);
zone 1182 mm/vmscan.c list_move(&page->lru, &zone->active_list);
zone 1186 mm/vmscan.c __mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
zone 1188 mm/vmscan.c spin_unlock_irq(&zone->lru_lock);
zone 1190 mm/vmscan.c spin_lock_irq(&zone->lru_lock);
zone 1193 mm/vmscan.c __mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
zone 1195 mm/vmscan.c __count_zone_vm_events(PGREFILL, zone, pgscanned);
zone 1197 mm/vmscan.c spin_unlock_irq(&zone->lru_lock);
zone 1218 mm/vmscan.c zone->nr_scan_active +=
zone 1219 mm/vmscan.c (zone_page_state(zone, NR_ACTIVE) >> priority) + 1;
zone 1220 mm/vmscan.c nr_active = zone->nr_scan_active;
zone 1221 mm/vmscan.c zone->nr_scan_inactive +=
zone 1222 mm/vmscan.c (zone_page_state(zone, NR_INACTIVE) >> priority) + 1;
zone 1223 mm/vmscan.c nr_inactive = zone->nr_scan_inactive;
zone 1225 mm/vmscan.c zone->nr_scan_inactive = 0;
zone 1230 mm/vmscan.c zone->nr_scan_active = 0;
zone 1240 mm/vmscan.c zone, priority);
zone 1243 mm/vmscan.c zone, priority);
zone 1252 mm/vmscan.c shrink_active_list(nr_to_scan, zone, sc, priority);
zone 1259 mm/vmscan.c nr_reclaimed += shrink_inactive_list(nr_to_scan, zone,
zone 1290 mm/vmscan.c struct zone *zone;
zone 1293 mm/vmscan.c for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
zone 1294 mm/vmscan.c if (!populated_zone(zone))
zone 1301 mm/vmscan.c if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
zone 1303 mm/vmscan.c note_zone_scanning_priority(zone, priority);
zone 1305 mm/vmscan.c if (zone_is_all_unreclaimable(zone) &&
zone 1319 mm/vmscan.c nr_reclaimed += shrink_zone(priority, zone, sc);
zone 1351 mm/vmscan.c struct zone *zone;
zone 1362 mm/vmscan.c for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
zone 1364 mm/vmscan.c if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
zone 1367 mm/vmscan.c lru_pages += zone_page_state(zone, NR_ACTIVE)
zone 1368 mm/vmscan.c + zone_page_state(zone, NR_INACTIVE);
zone 1426 mm/vmscan.c for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
zone 1428 mm/vmscan.c if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
zone 1431 mm/vmscan.c zone->prev_priority = priority;
zone 1549 mm/vmscan.c struct zone *zone = pgdat->node_zones + i;
zone 1551 mm/vmscan.c if (!populated_zone(zone))
zone 1554 mm/vmscan.c if (zone_is_all_unreclaimable(zone) &&
zone 1558 mm/vmscan.c if (!zone_watermark_ok(zone, order, zone->pages_high,
zone 1568 mm/vmscan.c struct zone *zone = pgdat->node_zones + i;
zone 1570 mm/vmscan.c lru_pages += zone_page_state(zone, NR_ACTIVE)
zone 1571 mm/vmscan.c + zone_page_state(zone, NR_INACTIVE);
zone 1584 mm/vmscan.c struct zone *zone = pgdat->node_zones + i;
zone 1587 mm/vmscan.c if (!populated_zone(zone))
zone 1590 mm/vmscan.c if (zone_is_all_unreclaimable(zone) &&
zone 1594 mm/vmscan.c if (!zone_watermark_ok(zone, order, zone->pages_high,
zone 1599 mm/vmscan.c note_zone_scanning_priority(zone, priority);
zone 1604 mm/vmscan.c if (!zone_watermark_ok(zone, order, 8*zone->pages_high,
zone 1606 mm/vmscan.c nr_reclaimed += shrink_zone(priority, zone, &sc);
zone 1612 mm/vmscan.c if (zone_is_all_unreclaimable(zone))
zone 1614 mm/vmscan.c if (nr_slab == 0 && zone->pages_scanned >=
zone 1615 mm/vmscan.c (zone_page_state(zone, NR_ACTIVE)
zone 1616 mm/vmscan.c + zone_page_state(zone, NR_INACTIVE)) * 6)
zone 1617 mm/vmscan.c zone_set_flag(zone,
zone 1653 mm/vmscan.c struct zone *zone = pgdat->node_zones + i;
zone 1655 mm/vmscan.c zone->prev_priority = temp_priority[i];
zone 1749 mm/vmscan.c if (!populated_zone(zone))
zone 1752 mm/vmscan.c pgdat = zone->zone_pgdat;
zone 1753 mm/vmscan.c if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0))
zone 1757 mm/vmscan.c if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
zone 1775 mm/vmscan.c struct zone *zone;
zone 1778 mm/vmscan.c for_each_zone(zone) {
zone 1780 mm/vmscan.c if (!populated_zone(zone))
zone 1783 mm/vmscan.c if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
zone 1788 mm/vmscan.c zone->nr_scan_active +=
zone 1789 mm/vmscan.c (zone_page_state(zone, NR_ACTIVE) >> prio) + 1;
zone 1790 mm/vmscan.c if (zone->nr_scan_active >= nr_pages || pass > 3) {
zone 1791 mm/vmscan.c zone->nr_scan_active = 0;
zone 1793 mm/vmscan.c zone_page_state(zone, NR_ACTIVE));
zone 1794 mm/vmscan.c shrink_active_list(nr_to_scan, zone, sc, prio);
zone 1798 mm/vmscan.c zone->nr_scan_inactive +=
zone 1799 mm/vmscan.c (zone_page_state(zone, NR_INACTIVE) >> prio) + 1;
zone 1800 mm/vmscan.c if (zone->nr_scan_inactive >= nr_pages || pass > 3) {
zone 1801 mm/vmscan.c zone->nr_scan_inactive = 0;
zone 1803 mm/vmscan.c zone_page_state(zone, NR_INACTIVE));
zone 1804 mm/vmscan.c ret += shrink_inactive_list(nr_to_scan, zone, sc);
zone 2038 mm/vmscan.c if (zone_page_state(zone, NR_FILE_PAGES) -
zone 2039 mm/vmscan.c zone_page_state(zone, NR_FILE_MAPPED) >
zone 2040 mm/vmscan.c zone->min_unmapped_pages) {
zone 2047 mm/vmscan.c note_zone_scanning_priority(zone, priority);
zone 2048 mm/vmscan.c nr_reclaimed += shrink_zone(priority, zone, &sc);
zone 2053 mm/vmscan.c slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
zone 2054 mm/vmscan.c if (slab_reclaimable > zone->min_slab_pages) {
zone 2066 mm/vmscan.c zone_page_state(zone, NR_SLAB_RECLAIMABLE) >
zone 2075 mm/vmscan.c zone_page_state(zone, NR_SLAB_RECLAIMABLE);
zone 2098 mm/vmscan.c if (zone_page_state(zone, NR_FILE_PAGES) -
zone 2099 mm/vmscan.c zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages
zone 2100 mm/vmscan.c && zone_page_state(zone, NR_SLAB_RECLAIMABLE)
zone 2101 mm/vmscan.c <= zone->min_slab_pages)
zone 2104 mm/vmscan.c if (zone_is_all_unreclaimable(zone))
zone 2119 mm/vmscan.c node_id = zone_to_nid(zone);
zone 2123 mm/vmscan.c if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
zone 2125 mm/vmscan.c ret = __zone_reclaim(zone, gfp_mask, order);
zone 2126 mm/vmscan.c zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
zone 117 mm/vmstat.c mem = zone->present_pages >> (27 - PAGE_SHIFT);
zone 134 mm/vmstat.c struct zone *zone;
zone 138 mm/vmstat.c for_each_zone(zone) {
zone 140 mm/vmstat.c if (!zone->present_pages)
zone 143 mm/vmstat.c threshold = calculate_threshold(zone);
zone 146 mm/vmstat.c zone_pcp(zone, cpu)->stat_threshold = threshold;
zone 156 mm/vmstat.c struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
zone 163 mm/vmstat.c zone_page_state_add(x, zone, item);
zone 179 mm/vmstat.c __mod_zone_page_state(zone, item, delta);
zone 209 mm/vmstat.c struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
zone 217 mm/vmstat.c zone_page_state_add(*p + overstep, zone, item);
zone 230 mm/vmstat.c struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
zone 238 mm/vmstat.c zone_page_state_add(*p - overstep, zone, item);
zone 254 mm/vmstat.c __inc_zone_state(zone, item);
zone 261 mm/vmstat.c struct zone *zone;
zone 263 mm/vmstat.c zone = page_zone(page);
zone 265 mm/vmstat.c __inc_zone_state(zone, item);
zone 300 mm/vmstat.c struct zone *zone;
zone 304 mm/vmstat.c for_each_zone(zone) {
zone 307 mm/vmstat.c if (!populated_zone(zone))
zone 310 mm/vmstat.c p = zone_pcp(zone, cpu);
zone 321 mm/vmstat.c atomic_long_add(v, &zone->vm_stat[i]);
zone 343 mm/vmstat.c if (zone_to_nid(zone) == numa_node_id()) {
zone 353 mm/vmstat.c drain_zone_pages(zone, &p->pcp);
zone 426 mm/vmstat.c struct zone *zone;
zone 427 mm/vmstat.c struct zone *node_zones = pgdat->node_zones;
zone 430 mm/vmstat.c for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
zone 431 mm/vmstat.c if (!populated_zone(zone))
zone 434 mm/vmstat.c spin_lock_irqsave(&zone->lock, flags);
zone 435 mm/vmstat.c print(m, pgdat, zone);
zone 436 mm/vmstat.c spin_unlock_irqrestore(&zone->lock, flags);
zone 445 mm/vmstat.c seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
zone 447 mm/vmstat.c seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
zone 469 mm/vmstat.c zone->name,
zone 476 mm/vmstat.c area = &(zone->free_area[order]);
zone 508 mm/vmstat.c unsigned long start_pfn = zone->zone_start_pfn;
zone 509 mm/vmstat.c unsigned long end_pfn = start_pfn + zone->spanned_pages;
zone 532 mm/vmstat.c if (page_zone(page) != zone)
zone 542 mm/vmstat.c seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
zone 685 mm/vmstat.c seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
zone 694 mm/vmstat.c zone_page_state(zone, NR_FREE_PAGES),
zone 695 mm/vmstat.c zone->pages_min,
zone 696 mm/vmstat.c zone->pages_low,
zone 697 mm/vmstat.c zone->pages_high,
zone 698 mm/vmstat.c zone->pages_scanned,
zone 699 mm/vmstat.c zone->nr_scan_active, zone->nr_scan_inactive,
zone 700 mm/vmstat.c zone->spanned_pages,
zone 701 mm/vmstat.c zone->present_pages);
zone 705 mm/vmstat.c zone_page_state(zone, i));
zone 709 mm/vmstat.c zone->lowmem_reserve[0]);
zone 710 mm/vmstat.c for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
zone 711 mm/vmstat.c seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
zone 718 mm/vmstat.c pageset = zone_pcp(zone, i);
zone 737 mm/vmstat.c zone_is_all_unreclaimable(zone),
zone 738 mm/vmstat.c zone->prev_priority,
zone 739 mm/vmstat.c zone->zone_start_pfn);
zone 806 net/ipv4/fib_hash.c struct fn_zone *zone;
zone 833 net/ipv4/fib_hash.c for (iter->zone = table->fn_zone_list; iter->zone;
zone 834 net/ipv4/fib_hash.c iter->zone = iter->zone->fz_next) {
zone 837 net/ipv4/fib_hash.c if (!iter->zone->fz_nent)
zone 840 net/ipv4/fib_hash.c iter->hash_head = iter->zone->fz_hash;
zone 841 net/ipv4/fib_hash.c maxslot = iter->zone->fz_divisor;
zone 898 net/ipv4/fib_hash.c if (!iter->zone)
zone 905 net/ipv4/fib_hash.c maxslot = iter->zone->fz_divisor;
zone 919 net/ipv4/fib_hash.c iter->zone = iter->zone->fz_next;
zone 921 net/ipv4/fib_hash.c if (!iter->zone)
zone 925 net/ipv4/fib_hash.c iter->hash_head = iter->zone->fz_hash;
zone 1023 net/ipv4/fib_hash.c mask = FZ_MASK(iter->zone);
zone 215 sound/synth/emux/emux_effect.c origp = (unsigned char*)&vp->zone->v.parm + offset;
zone 114 sound/synth/emux/emux_synth.c vp->zone = table[i];
zone 115 sound/synth/emux/emux_synth.c if (vp->zone->sample)
zone 116 sound/synth/emux/emux_synth.c vp->block = vp->zone->sample->block;
zone 514 sound/synth/emux/emux_synth.c vp->zone = NULL;
zone 567 sound/synth/emux/emux_synth.c vp->reg = vp->zone->v;
zone 522 sound/synth/emux/soundfont.c struct snd_sf_zone *zone;
zone 558 sound/synth/emux/soundfont.c for (zone = sf->zones; zone; zone = zone->next) {
zone 559 sound/synth/emux/soundfont.c if (!zone->mapped &&
zone 560 sound/synth/emux/soundfont.c zone->bank == hdr.bank &&
zone 561 sound/synth/emux/soundfont.c zone->instr == hdr.instr)
zone 590 sound/synth/emux/soundfont.c if ((zone = sf_zone_new(sflist, sf)) == NULL) {
zone 595 sound/synth/emux/soundfont.c zone->bank = tmpzone.bank;
zone 596 sound/synth/emux/soundfont.c zone->instr = tmpzone.instr;
zone 597 sound/synth/emux/soundfont.c zone->v = tmpzone.v;
zone 600 sound/synth/emux/soundfont.c zone->sample = set_sample(sf, &zone->v);
zone 949 sound/synth/emux/soundfont.c struct snd_sf_zone *zone;
zone 1005 sound/synth/emux/soundfont.c if ((zone = sf_zone_new(sflist, sf)) == NULL) {
zone 1027 sound/synth/emux/soundfont.c zone->v.sample = sample_id; /* the last sample */
zone 1028 sound/synth/emux/soundfont.c zone->v.rate_offset = calc_rate_offset(patch.base_freq);
zone 1030 sound/synth/emux/soundfont.c zone->v.root = note / 100;
zone 1031 sound/synth/emux/soundfont.c zone->v.tune = -(note % 100);
zone 1032 sound/synth/emux/soundfont.c zone->v.low = (freq_to_note(patch.low_note) + 99) / 100;
zone 1033 sound/synth/emux/soundfont.c zone->v.high = freq_to_note(patch.high_note) / 100;
zone 1035 sound/synth/emux/soundfont.c zone->v.pan = (patch.panning + 128) / 2;
zone 1038 sound/synth/emux/soundfont.c (int)patch.base_freq, zone->v.rate_offset,
zone 1039 sound/synth/emux/soundfont.c zone->v.root, zone->v.tune, zone->v.low, zone->v.high);
zone 1063 sound/synth/emux/soundfont.c zone->v.parm.volatkhld =
zone 1066 sound/synth/emux/soundfont.c zone->v.parm.voldcysus = (calc_gus_sustain(patch.env_offset[2]) << 8) |
zone 1068 sound/synth/emux/soundfont.c zone->v.parm.volrelease = 0x8000 | snd_sf_calc_parm_decay(release);
zone 1069 sound/synth/emux/soundfont.c zone->v.attenuation = calc_gus_attenuation(patch.env_offset[0]);
zone 1072 sound/synth/emux/soundfont.c zone->v.parm.volatkhld,
zone 1073 sound/synth/emux/soundfont.c zone->v.parm.voldcysus,
zone 1074 sound/synth/emux/soundfont.c zone->v.parm.volrelease,
zone 1075 sound/synth/emux/soundfont.c zone->v.attenuation);
zone 1081 sound/synth/emux/soundfont.c zone->v.parm.volrelease = 0x807f;
zone 1087 sound/synth/emux/soundfont.c zone->v.parm.tremfrq = ((patch.tremolo_depth / 2) << 8) | rate;
zone 1092 sound/synth/emux/soundfont.c zone->v.parm.fm2frq2 = ((patch.vibrato_depth / 6) << 8) | rate;
zone 1098 sound/synth/emux/soundfont.c zone->v.mode = SNDRV_SFNT_MODE_LOOPING;
zone 1100 sound/synth/emux/soundfont.c zone->v.mode = 0;
zone 1104 sound/synth/emux/soundfont.c zone->bank = 0;
zone 1105 sound/synth/emux/soundfont.c zone->instr = patch.instr_no;
zone 1106 sound/synth/emux/soundfont.c zone->mapped = 0;
zone 1107 sound/synth/emux/soundfont.c zone->v.sf_id = sf->id;
zone 1109 sound/synth/emux/soundfont.c zone->sample = set_sample(sf, &zone->v);
zone 1112 sound/synth/emux/soundfont.c add_preset(sflist, zone);
zone 1168 sound/synth/emux/soundfont.c struct snd_sf_zone *zone;
zone 1171 sound/synth/emux/soundfont.c zone = search_first_zone(sflist, cur->bank, cur->instr, cur->v.low);
zone 1172 sound/synth/emux/soundfont.c if (zone && zone->v.sf_id != cur->v.sf_id) {
zone 1176 sound/synth/emux/soundfont.c for (p = zone; p; p = p->next_zone) {
zone 1182 sound/synth/emux/soundfont.c delete_preset(sflist, zone);
zone 1183 sound/synth/emux/soundfont.c zone = NULL; /* do not forget to clear this! */
zone 1189 sound/synth/emux/soundfont.c cur->next_zone = zone; /* zone link */
zone 22 virt/kvm/coalesced_mmio.c struct kvm_coalesced_mmio_zone *zone;
zone 50 virt/kvm/coalesced_mmio.c zone = &dev->zone[i];
zone 56 virt/kvm/coalesced_mmio.c if (zone->addr <= addr &&
zone 57 virt/kvm/coalesced_mmio.c addr + len <= zone->addr + zone->size)
zone 118 virt/kvm/coalesced_mmio.c dev->zone[dev->nb_zones] = *zone;
zone 139 virt/kvm/coalesced_mmio.c z = &dev->zone[i - 1];
zone 145 virt/kvm/coalesced_mmio.c if (zone->addr <= z->addr &&
zone 146 virt/kvm/coalesced_mmio.c z->addr + z->size <= zone->addr + zone->size) {
zone 148 virt/kvm/coalesced_mmio.c *z = dev->zone[dev->nb_zones];
zone 16 virt/kvm/coalesced_mmio.h struct kvm_coalesced_mmio_zone zone[KVM_COALESCED_MMIO_ZONE_MAX];
zone 1339 virt/kvm/kvm_main.c struct kvm_coalesced_mmio_zone zone;
zone 1341 virt/kvm/kvm_main.c if (copy_from_user(&zone, argp, sizeof zone))
zone 1344 virt/kvm/kvm_main.c r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
zone 1351 virt/kvm/kvm_main.c struct kvm_coalesced_mmio_zone zone;
zone 1353 virt/kvm/kvm_main.c if (copy_from_user(&zone, argp, sizeof zone))
zone 1356 virt/kvm/kvm_main.c r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);