PUD_SIZE 85 arch/x86/kernel/head64.c BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE); PUD_SIZE 35 arch/x86/kernel/machine_kexec_64.c end_addr = addr + PUD_SIZE; PUD_SIZE 63 arch/x86/kernel/machine_kexec_64.c addr += PUD_SIZE; PUD_SIZE 68 arch/x86/kernel/machine_kexec_64.c addr += PUD_SIZE; PUD_SIZE 27 arch/x86/mm/hugetlbpage.c unsigned long s_end = sbase + PUD_SIZE; PUD_SIZE 44 arch/x86/mm/hugetlbpage.c unsigned long end = base + PUD_SIZE; PUD_SIZE 137 arch/x86/mm/hugetlbpage.c if (sz == PUD_SIZE) { PUD_SIZE 434 arch/x86/mm/hugetlbpage.c } else if (ps == PUD_SIZE && cpu_has_gbpages) { PUD_SIZE 782 arch/x86/mm/init_32.c puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; PUD_SIZE 469 arch/x86/mm/init_64.c for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) { PUD_SIZE 479 arch/x86/mm/init_64.c !e820_any_mapped(addr, addr+PUD_SIZE, 0)) { PUD_SIZE 513 arch/x86/mm/init_64.c last_map_addr = (addr & PUD_MASK) + PUD_SIZE; PUD_SIZE 549 arch/x86/mm/init_64.c puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; PUD_SIZE 714 arch/x86/mm/init_64.c end_pfn = ((start + (PUD_SIZE - 1))>>PUD_SHIFT) PUD_SIZE 214 arch/x86/mm/pgtable.c i++, pud++, addr += PUD_SIZE) { PUD_SIZE 50 arch/x86/power/hibernate_64.c paddr = address + i*PUD_SIZE; PUD_SIZE 135 include/asm-frv/pgtable.h #define PUD_MASK (~(PUD_SIZE - 1)) PUD_SIZE 18 include/asm-generic/pgtable-nopud.h #define PUD_MASK (~(PUD_SIZE-1)) PUD_SIZE 145 include/asm-generic/pgtable.h ({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ PUD_SIZE 144 include/asm-x86/pgtable_64.h #define PUD_MASK (~(PUD_SIZE - 1))