__GFP_ZERO 1196 arch/x86/kernel/amd_iommu.c flag |= __GFP_ZERO; __GFP_ZERO 416 arch/x86/kernel/amd_iommu_init.c u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, __GFP_ZERO 445 arch/x86/kernel/amd_iommu_init.c iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, __GFP_ZERO 1062 arch/x86/kernel/amd_iommu_init.c amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, __GFP_ZERO 1086 arch/x86/kernel/amd_iommu_init.c amd_iommu_pd_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, __GFP_ZERO 1092 arch/x86/kernel/amd_iommu_init.c GFP_KERNEL | __GFP_ZERO, __GFP_ZERO 146 arch/x86/kernel/pci-dma.c flag |= __GFP_ZERO; __GFP_ZERO 497 arch/x86/kernel/pci-gart_64.c page = alloc_pages(flag | __GFP_ZERO, get_order(size)); __GFP_ZERO 676 arch/x86/kernel/pci-gart_64.c gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, __GFP_ZERO 793 arch/x86/kernel/pci-gart_64.c iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, __GFP_ZERO 800 arch/x86/kernel/pci-gart_64.c iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, __GFP_ZERO 3877 arch/x86/kvm/x86.c page = alloc_page(GFP_KERNEL | __GFP_ZERO); __GFP_ZERO 9 arch/x86/mm/pgtable.c return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); __GFP_ZERO 17 arch/x86/mm/pgtable.c pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0); __GFP_ZERO 19 arch/x86/mm/pgtable.c pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); __GFP_ZERO 231 arch/x86/mm/pgtable.c pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); __GFP_ZERO 903 arch/x86/xen/enlighten.c user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); __GFP_ZERO 1363 block/as-iosched.c ad = kmalloc_node(sizeof(*ad), GFP_KERNEL | __GFP_ZERO, q->node); __GFP_ZERO 489 block/blk-core.c gfp_mask | __GFP_ZERO, node_id); __GFP_ZERO 329 block/blk-integrity.c GFP_KERNEL | __GFP_ZERO); __GFP_ZERO 1340 block/cfq-iosched.c cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO, __GFP_ZERO 1456 block/cfq-iosched.c gfp_mask | __GFP_NOFAIL | __GFP_ZERO, __GFP_ZERO 1462 block/cfq-iosched.c gfp_mask | __GFP_ZERO, __GFP_ZERO 2214 block/cfq-iosched.c cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); __GFP_ZERO 354 block/deadline-iosched.c dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node); __GFP_ZERO 211 block/elevator.c eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL | __GFP_ZERO, q->node); __GFP_ZERO 1065 block/genhd.c GFP_KERNEL | __GFP_ZERO, node_id); __GFP_ZERO 296 fs/exec.c page = alloc_page(GFP_HIGHUSER|__GFP_ZERO); __GFP_ZERO 19 include/asm-cris/page.h alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) __GFP_ZERO 27 include/asm-cris/pgalloc.h pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); __GFP_ZERO 34 include/asm-cris/pgalloc.h pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); __GFP_ZERO 18 include/asm-m32r/page.h alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) __GFP_ZERO 23 include/asm-m32r/pgalloc.h pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); __GFP_ZERO 36 include/asm-m32r/pgalloc.h pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); __GFP_ZERO 44 include/asm-m32r/pgalloc.h struct page *pte = alloc_page(GFP_KERNEL|__GFP_ZERO); __GFP_ZERO 14 include/asm-m68k/motorola_pgalloc.h pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); __GFP_ZERO 32 include/asm-m68k/motorola_pgalloc.h struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); __GFP_ZERO 123 include/asm-parisc/pgalloc.h struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); __GFP_ZERO 132 include/asm-parisc/pgalloc.h pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); __GFP_ZERO 85 include/asm-x86/page.h alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) __GFP_ZERO 152 include/asm-x86/thread_info.h #define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO) __GFP_ZERO 32 include/asm-xtensa/pgalloc.h return (pgd_t*) __get_free_pages(GFP_KERNEL | __GFP_ZERO, PGD_ORDER); __GFP_ZERO 49 include/linux/quicklist.h p = (void *)__get_free_page(flags | __GFP_ZERO); __GFP_ZERO 186 include/linux/slab.h return __kmalloc(n * size, flags | __GFP_ZERO); __GFP_ZERO 267 include/linux/slab.h return kmem_cache_alloc(k, flags | __GFP_ZERO); __GFP_ZERO 277 include/linux/slab.h return kmalloc(size, flags | __GFP_ZERO); __GFP_ZERO 288 include/linux/slab.h return kmalloc_node(size, flags | __GFP_ZERO, node); __GFP_ZERO 102 kernel/fork.c gfp_t mask = GFP_KERNEL | __GFP_ZERO; __GFP_ZERO 346 kernel/profile.c GFP_KERNEL | __GFP_ZERO, __GFP_ZERO 354 kernel/profile.c GFP_KERNEL | __GFP_ZERO, __GFP_ZERO 539 kernel/profile.c GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, __GFP_ZERO 546 kernel/profile.c GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, __GFP_ZERO 8490 kernel/sched.c GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); __GFP_ZERO 8495 kernel/sched.c GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); __GFP_ZERO 8579 kernel/sched.c GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); __GFP_ZERO 8584 kernel/sched.c GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); __GFP_ZERO 1374 kernel/timer.c GFP_KERNEL | __GFP_ZERO, __GFP_ZERO 57 lib/genalloc.c chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid); __GFP_ZERO 67 mm/allocpercpu.c pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node); __GFP_ZERO 35 mm/filemap_xip.c struct page *page = alloc_page(GFP_HIGHUSER | __GFP_ZERO); __GFP_ZERO 65 mm/mempool.c pool = kmalloc_node(sizeof(*pool), GFP_KERNEL | __GFP_ZERO, node_id); __GFP_ZERO 202 mm/nommu.c ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, __GFP_ZERO 624 mm/page_alloc.c if (gfp_flags & __GFP_ZERO) __GFP_ZERO 1669 mm/page_alloc.c page = alloc_pages(gfp_mask | __GFP_ZERO, 0); __GFP_ZERO 110 mm/shmem.c return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO, __GFP_ZERO 3415 mm/slab.c if (unlikely((flags & __GFP_ZERO) && ptr)) __GFP_ZERO 3469 mm/slab.c if (unlikely((flags & __GFP_ZERO) && objp)) __GFP_ZERO 352 mm/slob.c b = slob_new_page(gfp & ~__GFP_ZERO, 0, node); __GFP_ZERO 368 mm/slob.c if (unlikely((gfp & __GFP_ZERO) && b)) __GFP_ZERO 1508 mm/slub.c gfpflags &= ~__GFP_ZERO; __GFP_ZERO 1607 mm/slub.c if (unlikely((gfpflags & __GFP_ZERO) && object)) __GFP_ZERO 52 mm/sparse-vmemmap.c GFP_KERNEL | __GFP_ZERO, get_order(size)); __GFP_ZERO 502 mm/vmalloc.c pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO, __GFP_ZERO 507 mm/vmalloc.c (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO, __GFP_ZERO 614 mm/vmalloc.c ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); __GFP_ZERO 697 mm/vmalloc.c ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL); __GFP_ZERO 322 net/core/flow.c __get_free_pages(GFP_KERNEL|__GFP_ZERO, order); __GFP_ZERO 308 net/core/neighbour.c __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size)); __GFP_ZERO 49 net/core/request_sock.c GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, __GFP_ZERO 944 net/core/sock.c sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family); __GFP_ZERO 107 net/ipv4/fib_hash.c __get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(size)); __GFP_ZERO 615 net/ipv4/fib_semantics.c __get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(bytes)); __GFP_ZERO 357 net/ipv4/fib_trie.c return __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); __GFP_ZERO 248 net/netlink/af_netlink.c __get_free_pages(GFP_ATOMIC | __GFP_ZERO, __GFP_ZERO 1757 net/packet/af_packet.c return (char *) __get_free_pages(GFP_KERNEL | __GFP_COMP | __GFP_ZERO, __GFP_ZERO 114 net/tipc/ref.c GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); __GFP_ZERO 22 net/xfrm/xfrm_hash.c n = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); __GFP_ZERO 25 net/xfrm/xfrm_hash.c __get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, __GFP_ZERO 200 sound/usb/usx2y/usb_stream.c sk->s = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO, pg); __GFP_ZERO 220 sound/usb/usx2y/usb_stream.c (void *)__get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO, pg); __GFP_ZERO 169 virt/kvm/kvm_main.c page = alloc_page(GFP_KERNEL | __GFP_ZERO); __GFP_ZERO 323 virt/kvm/kvm_main.c page = alloc_page(GFP_KERNEL | __GFP_ZERO); __GFP_ZERO 1715 virt/kvm/kvm_main.c bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);