[funini.com] -> [kei@sodan] -> Kernel Reading

root/mm/sparse.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. page_to_nid
  2. set_section_nid
  3. set_section_nid
  4. sparse_index_alloc
  5. sparse_index_init
  6. sparse_index_init
  7. __section_nr
  8. sparse_encode_early_nid
  9. sparse_early_nid
  10. mminit_validate_memmodel_limits
  11. memory_present
  12. node_memmap_size_bytes
  13. sparse_encode_mem_map
  14. sparse_decode_mem_map
  15. sparse_init_one_section
  16. usemap_size
  17. __kmalloc_section_usemap
  18. sparse_early_usemap_alloc_pgdat_section
  19. check_usemap_section_nr
  20. sparse_early_usemap_alloc_pgdat_section
  21. check_usemap_section_nr
  22. sparse_early_usemap_alloc
  23. sparse_mem_map_populate
  24. sparse_early_mem_map_alloc
  25. vmemmap_populate_print_last
  26. sparse_init
  27. kmalloc_section_memmap
  28. __kfree_section_memmap
  29. free_map_bootmem
  30. __kmalloc_section_memmap
  31. kmalloc_section_memmap
  32. __kfree_section_memmap
  33. free_map_bootmem
  34. free_section_usemap
  35. sparse_add_one_section
  36. sparse_remove_one_section

/*
 * sparse memory mappings.
 */
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include "internal.h"
#include <asm/dma.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>

/*
 * Permanent SPARSEMEM data:
 *
 * 1) mem_section       - memory sections, mem_map's for valid memory
 */
#ifdef CONFIG_SPARSEMEM_EXTREME
struct mem_section *mem_section[NR_SECTION_ROOTS]
        ____cacheline_internodealigned_in_smp;
#else
struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
        ____cacheline_internodealigned_in_smp;
#endif
EXPORT_SYMBOL(mem_section);

#ifdef NODE_NOT_IN_PAGE_FLAGS
/*
 * If we did not store the node number in the page then we have to
 * do a lookup in the section_to_node_table in order to find which
 * node the page belongs to.
 */
#if MAX_NUMNODES <= 256
static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
#else
static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
#endif

int page_to_nid(struct page *page)
{
        return section_to_node_table[page_to_section(page)];
}
EXPORT_SYMBOL(page_to_nid);

static void set_section_nid(unsigned long section_nr, int nid)
{
        section_to_node_table[section_nr] = nid;
}
#else /* !NODE_NOT_IN_PAGE_FLAGS */
static inline void set_section_nid(unsigned long section_nr, int nid)
{
}
#endif

#ifdef CONFIG_SPARSEMEM_EXTREME
static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
{
        struct mem_section *section = NULL;
        unsigned long array_size = SECTIONS_PER_ROOT *
                                   sizeof(struct mem_section);

        if (slab_is_available())
                section = kmalloc_node(array_size, GFP_KERNEL, nid);
        else
                section = alloc_bootmem_node(NODE_DATA(nid), array_size);

        if (section)
                memset(section, 0, array_size);

        return section;
}

static int __meminit sparse_index_init(unsigned long section_nr, int nid)
{
        static DEFINE_SPINLOCK(index_init_lock);
        unsigned long root = SECTION_NR_TO_ROOT(section_nr);
        struct mem_section *section;
        int ret = 0;

        if (mem_section[root])
                return -EEXIST;

        section = sparse_index_alloc(nid);
        if (!section)
                return -ENOMEM;
        /*
         * This lock keeps two different sections from
         * reallocating for the same index
         */
        spin_lock(&index_init_lock);

        if (mem_section[root]) {
                ret = -EEXIST;
                goto out;
        }

        mem_section[root] = section;
out:
        spin_unlock(&index_init_lock);
        return ret;
}
#else /* !SPARSEMEM_EXTREME */
static inline int sparse_index_init(unsigned long section_nr, int nid)
{
        return 0;
}
#endif

/*
 * Although written for the SPARSEMEM_EXTREME case, this happens
 * to also work for the flat array case because
 * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
 */
int __section_nr(struct mem_section* ms)
{
        unsigned long root_nr;
        struct mem_section* root;

        for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
                root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
                if (!root)
                        continue;

                if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
                     break;
        }

        return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
}

/*
 * During early boot, before section_mem_map is used for an actual
 * mem_map, we use section_mem_map to store the section's NUMA
 * node.  This keeps us from having to use another data structure.  The
 * node information is cleared just before we store the real mem_map.
 */
static inline unsigned long sparse_encode_early_nid(int nid)
{
        return (nid << SECTION_NID_SHIFT);
}

static inline int sparse_early_nid(struct mem_section *section)
{
        return (section->section_mem_map >> SECTION_NID_SHIFT);
}

/* Validate the physical addressing limitations of the model */
void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
                                                unsigned long *end_pfn)
{
        unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);

        /*
         * Sanity checks - do not allow an architecture to pass
         * in larger pfns than the maximum scope of sparsemem:
         */
        if (*start_pfn > max_sparsemem_pfn) {
                mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
                        "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
                        *start_pfn, *end_pfn, max_sparsemem_pfn);
                WARN_ON_ONCE(1);
                *start_pfn = max_sparsemem_pfn;
                *end_pfn = max_sparsemem_pfn;
        }

        if (*end_pfn > max_sparsemem_pfn) {
                mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
                        "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
                        *start_pfn, *end_pfn, max_sparsemem_pfn);
                WARN_ON_ONCE(1);
                *end_pfn = max_sparsemem_pfn;
        }
}

/* Record a memory area against a node. */
void __init memory_present(int nid, unsigned long start, unsigned long end)
{
        unsigned long pfn;

        start &= PAGE_SECTION_MASK;
        mminit_validate_memmodel_limits(&start, &end);
        for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
                unsigned long section = pfn_to_section_nr(pfn);
                struct mem_section *ms;

                sparse_index_init(section, nid);
                set_section_nid(section, nid);

                ms = __nr_to_section(section);
                if (!ms->section_mem_map)
                        ms->section_mem_map = sparse_encode_early_nid(nid) |
                                                        SECTION_MARKED_PRESENT;
        }
}

/*
 * Only used by the i386 NUMA architecures, but relatively
 * generic code.
 */
unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
                                                     unsigned long end_pfn)
{
        unsigned long pfn;
        unsigned long nr_pages = 0;

        mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
        for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
                if (nid != early_pfn_to_nid(pfn))
                        continue;

                if (pfn_present(pfn))
                        nr_pages += PAGES_PER_SECTION;
        }

        return nr_pages * sizeof(struct page);
}

/*
 * Subtle, we encode the real pfn into the mem_map such that
 * the identity pfn - section_mem_map will return the actual
 * physical page frame number.
 */
static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
{
        return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
}

/*
 * Decode mem_map from the coded memmap
 */
struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
{
        /* mask off the extra low bits of information */
        coded_mem_map &= SECTION_MAP_MASK;
        return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
}

static int __meminit sparse_init_one_section(struct mem_section *ms,
                unsigned long pnum, struct page *mem_map,
                unsigned long *pageblock_bitmap)
{
        if (!present_section(ms))
                return -EINVAL;

        ms->section_mem_map &= ~SECTION_MAP_MASK;
        ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
                                                        SECTION_HAS_MEM_MAP;
        ms->pageblock_flags = pageblock_bitmap;

        return 1;
}

unsigned long usemap_size(void)
{
        unsigned long size_bytes;
        size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8;
        size_bytes = roundup(size_bytes, sizeof(unsigned long));
        return size_bytes;
}

#ifdef CONFIG_MEMORY_HOTPLUG
static unsigned long *__kmalloc_section_usemap(void)
{
        return kmalloc(usemap_size(), GFP_KERNEL);
}
#endif /* CONFIG_MEMORY_HOTPLUG */

#ifdef CONFIG_MEMORY_HOTREMOVE
static unsigned long * __init
sparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat)
{
        unsigned long section_nr;

        /*
         * A page may contain usemaps for other sections preventing the
         * page being freed and making a section unremovable while
         * other sections referencing the usemap retmain active. Similarly,
         * a pgdat can prevent a section being removed. If section A
         * contains a pgdat and section B contains the usemap, both
         * sections become inter-dependent. This allocates usemaps
         * from the same section as the pgdat where possible to avoid
         * this problem.
         */
        section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
        return alloc_bootmem_section(usemap_size(), section_nr);
}

static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
{
        unsigned long usemap_snr, pgdat_snr;
        static unsigned long old_usemap_snr = NR_MEM_SECTIONS;
        static unsigned long old_pgdat_snr = NR_MEM_SECTIONS;
        struct pglist_data *pgdat = NODE_DATA(nid);
        int usemap_nid;

        usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT);
        pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
        if (usemap_snr == pgdat_snr)
                return;

        if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
                /* skip redundant message */
                return;

        old_usemap_snr = usemap_snr;
        old_pgdat_snr = pgdat_snr;

        usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
        if (usemap_nid != nid) {
                printk(KERN_INFO
                       "node %d must be removed before remove section %ld\n",
                       nid, usemap_snr);
                return;
        }
        /*
         * There is a circular dependency.
         * Some platforms allow un-removable section because they will just
         * gather other removable sections for dynamic partitioning.
         * Just notify un-removable section's number here.
         */
        printk(KERN_INFO "Section %ld and %ld (node %d)", usemap_snr,
               pgdat_snr, nid);
        printk(KERN_CONT
               " have a circular dependency on usemap and pgdat allocations\n");
}
#else
static unsigned long * __init
sparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat)
{
        return NULL;
}

static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
{
}
#endif /* CONFIG_MEMORY_HOTREMOVE */

static unsigned long *__init sparse_early_usemap_alloc(unsigned long pnum)
{
        unsigned long *usemap;
        struct mem_section *ms = __nr_to_section(pnum);
        int nid = sparse_early_nid(ms);

        usemap = sparse_early_usemap_alloc_pgdat_section(NODE_DATA(nid));
        if (usemap)
                return usemap;

        usemap = alloc_bootmem_node(NODE_DATA(nid), usemap_size());
        if (usemap) {
                check_usemap_section_nr(nid, usemap);
                return usemap;
        }

        /* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */
        nid = 0;

        printk(KERN_WARNING "%s: allocation failed\n", __func__);
        return NULL;
}

#ifndef CONFIG_SPARSEMEM_VMEMMAP
struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
{
        struct page *map;

        map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
        if (map)
                return map;

        map = alloc_bootmem_pages_node(NODE_DATA(nid),
                       PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION));
        return map;
}
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */

static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
{
        struct page *map;
        struct mem_section *ms = __nr_to_section(pnum);
        int nid = sparse_early_nid(ms);

        map = sparse_mem_map_populate(pnum, nid);
        if (map)
                return map;

        printk(KERN_ERR "%s: sparsemem memory map backing failed "
                        "some memory will not be available.\n", __func__);
        ms->section_mem_map = 0;
        return NULL;
}

void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
{
}
/*
 * Allocate the accumulated non-linear sections, allocate a mem_map
 * for each and record the physical to section mapping.
 */
void __init sparse_init(void)
{
        unsigned long pnum;
        struct page *map;
        unsigned long *usemap;
        unsigned long **usemap_map;
        int size;

        /*
         * map is using big page (aka 2M in x86 64 bit)
         * usemap is less one page (aka 24 bytes)
         * so alloc 2M (with 2M align) and 24 bytes in turn will
         * make next 2M slip to one more 2M later.
         * then in big system, the memory will have a lot of holes...
         * here try to allocate 2M pages continously.
         *
         * powerpc need to call sparse_init_one_section right after each
         * sparse_early_mem_map_alloc, so allocate usemap_map at first.
         */
        size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
        usemap_map = alloc_bootmem(size);
        if (!usemap_map)
                panic("can not allocate usemap_map\n");

        for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
                if (!present_section_nr(pnum))
                        continue;
                usemap_map[pnum] = sparse_early_usemap_alloc(pnum);
        }

        for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
                if (!present_section_nr(pnum))
                        continue;

                usemap = usemap_map[pnum];
                if (!usemap)
                        continue;

                map = sparse_early_mem_map_alloc(pnum);
                if (!map)
                        continue;

                sparse_init_one_section(__nr_to_section(pnum), pnum, map,
                                                                usemap);
        }

        vmemmap_populate_print_last();

        free_bootmem(__pa(usemap_map), size);
}

#ifdef CONFIG_MEMORY_HOTPLUG
#ifdef CONFIG_SPARSEMEM_VMEMMAP
static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
                                                 unsigned long nr_pages)
{
        /* This will make the necessary allocations eventually. */
        return sparse_mem_map_populate(pnum, nid);
}
static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
{
        return; /* XXX: Not implemented yet */
}
static void free_map_bootmem(struct page *page, unsigned long nr_pages)
{
}
#else
static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
{
        struct page *page, *ret;
        unsigned long memmap_size = sizeof(struct page) * nr_pages;

        page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
        if (page)
                goto got_map_page;

        ret = vmalloc(memmap_size);
        if (ret)
                goto got_map_ptr;

        return NULL;
got_map_page:
        ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
got_map_ptr:
        memset(ret, 0, memmap_size);

        return ret;
}

static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
                                                  unsigned long nr_pages)
{
        return __kmalloc_section_memmap(nr_pages);
}

static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
{
        if (is_vmalloc_addr(memmap))
                vfree(memmap);
        else
                free_pages((unsigned long)memmap,
                           get_order(sizeof(struct page) * nr_pages));
}

static void free_map_bootmem(struct page *page, unsigned long nr_pages)
{
        unsigned long maps_section_nr, removing_section_nr, i;
        int magic;

        for (i = 0; i < nr_pages; i++, page++) {
                magic = atomic_read(&page->_mapcount);

                BUG_ON(magic == NODE_INFO);

                maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
                removing_section_nr = page->private;

                /*
                 * When this function is called, the removing section is
                 * logical offlined state. This means all pages are isolated
                 * from page allocator. If removing section's memmap is placed
                 * on the same section, it must not be freed.
                 * If it is freed, page allocator may allocate it which will
                 * be removed physically soon.
                 */
                if (maps_section_nr != removing_section_nr)
                        put_page_bootmem(page);
        }
}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */

static void free_section_usemap(struct page *memmap, unsigned long *usemap)
{
        struct page *usemap_page;
        unsigned long nr_pages;

        if (!usemap)
                return;

        usemap_page = virt_to_page(usemap);
        /*
         * Check to see if allocation came from hot-plug-add
         */
        if (PageSlab(usemap_page)) {
                kfree(usemap);
                if (memmap)
                        __kfree_section_memmap(memmap, PAGES_PER_SECTION);
                return;
        }

        /*
         * The usemap came from bootmem. This is packed with other usemaps
         * on the section which has pgdat at boot time. Just keep it as is now.
         */

        if (memmap) {
                struct page *memmap_page;
                memmap_page = virt_to_page(memmap);

                nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
                        >> PAGE_SHIFT;

                free_map_bootmem(memmap_page, nr_pages);
        }
}

/*
 * returns the number of sections whose mem_maps were properly
 * set.  If this is <=0, then that means that the passed-in
 * map was not consumed and must be freed.
 */
int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
                           int nr_pages)
{
        unsigned long section_nr = pfn_to_section_nr(start_pfn);
        struct pglist_data *pgdat = zone->zone_pgdat;
        struct mem_section *ms;
        struct page *memmap;
        unsigned long *usemap;
        unsigned long flags;
        int ret;

        /*
         * no locking for this, because it does its own
         * plus, it does a kmalloc
         */
        ret = sparse_index_init(section_nr, pgdat->node_id);
        if (ret < 0 && ret != -EEXIST)
                return ret;
        memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages);
        if (!memmap)
                return -ENOMEM;
        usemap = __kmalloc_section_usemap();
        if (!usemap) {
                __kfree_section_memmap(memmap, nr_pages);
                return -ENOMEM;
        }

        pgdat_resize_lock(pgdat, &flags);

        ms = __pfn_to_section(start_pfn);
        if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
                ret = -EEXIST;
                goto out;
        }

        ms->section_mem_map |= SECTION_MARKED_PRESENT;

        ret = sparse_init_one_section(ms, section_nr, memmap, usemap);

out:
        pgdat_resize_unlock(pgdat, &flags);
        if (ret <= 0) {
                kfree(usemap);
                __kfree_section_memmap(memmap, nr_pages);
        }
        return ret;
}

void sparse_remove_one_section(struct zone *zone, struct mem_section *ms)
{
        struct page *memmap = NULL;
        unsigned long *usemap = NULL;

        if (ms->section_mem_map) {
                usemap = ms->pageblock_flags;
                memmap = sparse_decode_mem_map(ms->section_mem_map,
                                                __section_nr(ms));
                ms->section_mem_map = 0;
                ms->pageblock_flags = NULL;
        }

        free_section_usemap(memmap, usemap);
}
#endif

/* [<][>][^][v][top][bottom][index][help] */

[funini.com] -> [kei@sodan] -> Kernel Reading