歡迎您光臨本站 註冊首頁

linux物理內存描述 (二)

←手機掃碼閱讀     火星人 @ 2014-03-03 , reply:0

linux物理內存描述 (二)

linux物理內存描述 (二)    /*
     * rarely used fields:
     */  
    const char      *name;  
} ____cacheline_internodealigned_in_smp;  
struct zone {
        /* Fields commonly accessed by the page allocator */

        /* zone watermarks, access with *_wmark_pages(zone) macros */
        /*本管理區的三個水線值:高水線(比較充足)、低水線、MIN水線。*/
        unsigned long watermark;

        /*
         * We don't know if the memory that we're going to allocate will be freeable
         * or/and it will be released eventually, so to avoid totally wasting several
         * GB of ram we must reserve some of the lower zone memory (otherwise we risk
         * to run OOM on the lower zones despite there's tons of freeable ram
         * on the higher zones). This array is recalculated at runtime if the
         * sysctl_lowmem_reserve_ratio sysctl changes.
         */
          /**
          * 當高端內存、normal內存區域中無法分配到內存時,需要從normal、DMA區域中分配內存。
          * 為了避免DMA區域被消耗光,需要額外保留一些內存供驅動使用。
          * 該欄位就是指從上級內存區退到回內存區時,需要額外保留的內存數量。
          */
        unsigned long                lowmem_reserve;

#ifdef CONFIG_NUMA
        /*所屬的NUMA節點。*/
        int node;
        /*
         * zone reclaim becomes active if more unmapped pages exist.
         */
         /*當可回收的頁超過此值時,將進行頁面回收。*/
        unsigned long                min_unmapped_pages;
        /*當管理區中,用於slab的可回收頁大於此值時,將回收slab中的緩存頁。*/
        unsigned long                min_slab_pages;
        /*
          * 每CPU的頁面緩存。
          * 當分配單個頁面時,首先從該緩存中分配頁面。這樣可以:
          *避免使用全局的鎖
          * 避免同一個頁面反覆被不同的CPU分配,引起緩存行的失效。
          * 避免將管理區中的大塊分割成碎片。
          */
        struct per_cpu_pageset        *pageset;
#else
        struct per_cpu_pageset        pageset;
#endif
        /*
         * free areas of different sizes
         */
         /*該鎖用於保護夥伴系統數據結構。即保護free_area相關數據。*/
        spinlock_t                lock;
#ifdef CONFIG_MEMORY_HOTPLUG
        /* see spanned/present_pages for more description */
        /*用於保護spanned/present_pages等變數。這些變數幾乎不會發生變化,除非發生了內存熱插撥操作。
           這幾個變數並不被lock欄位保護。並且主要用於讀,因此使用讀寫鎖。*/
        seqlock_t                span_seqlock;
#endif
        /*夥伴系統的主要變數。這個數組定義了11個隊列,每個隊列中的元素都是大小為2^n的頁面*/
        struct free_area        free_area;

#ifndef CONFIG_SPARSEMEM
        /*
         * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
         * In SPARSEMEM, this map is stored in struct mem_section
         */
         /*本管理區里的頁面標誌數組*/
        unsigned long                *pageblock_flags;
#endif /* CONFIG_SPARSEMEM */

        /*填充的未用欄位,確保後面的欄位是緩存行對齊的*/
        ZONE_PADDING(_pad1_)

        /* Fields commonly accessed by the page reclaim scanner */
        /*
          * lru相關的欄位用於內存回收。這個欄位用於保護這幾個回收相關的欄位。
          * lru用於確定哪些欄位是活躍的,哪些不是活躍的,並據此確定應當被寫回到磁碟以釋放內存。
          */
        spinlock_t                lru_lock;       
        /* 匿名活動頁、匿名不活動頁、文件活動頁、文件不活動頁鏈表頭*/
        struct zone_lru {
                struct list_head list;
        } lru;
        /*頁面回收狀態*/
        struct zone_reclaim_stat reclaim_stat;
        /*自從最後一次回收頁面以來,掃過的頁面數*/
        unsigned long                pages_scanned;           /* since last reclaim */
        unsigned long                flags;                   /* zone flags, see below */

        /* Zone statistics */
        atomic_long_t                vm_stat;

        /*
         * prev_priority holds the scanning priority for this zone.  It is
         * defined as the scanning priority at which we achieved our reclaim
         * target at the previous try_to_free_pages() or balance_pgdat()
         * invokation.
         *
         * We use prev_priority as a measure of how much stress page reclaim is
         * under - it drives the swappiness decision: whether to unmap mapped
         * pages.
         *
         * Access to both this field is quite racy even on uniprocessor.  But
         * it is expected to average out OK.
         */
        int prev_priority;

        /*
         * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
         * this zone's LRU.  Maintained by the pageout code.
         */
        unsigned int inactive_ratio;

        /*為cache對齊*/
        ZONE_PADDING(_pad2_)
        /* Rarely used or read-mostly fields */

        /*
         * wait_table                -- the array holding the hash table
         * wait_table_hash_nr_entries        -- the size of the hash table array
         * wait_table_bits        -- wait_table_size == (1 << wait_table_bits)
         *
         * The purpose of all these is to keep track of the people
         * waiting for a page to become available and make them
         * runnable again when possible. The trouble is that this
         * consumes a lot of space, especially when so few things
         * wait on pages at a given time. So instead of using
         * per-page waitqueues, we use a waitqueue hash table.
         *
         * The bucket discipline is to sleep on the same queue when
         * colliding and wake all in that wait queue when removing.
         * When something wakes, it must check to be sure its page is
         * truly available, a la thundering herd. The cost of a
         * collision is great, but given the expected load of the
         * table, they should be so rare as to be outweighed by the
         * benefits from the saved space.
         *
         * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
         * primary users of these fields, and in mm/page_alloc.c
         * free_area_init_core() performs the initialization of them.
         */
        wait_queue_head_t        * wait_table;
        unsigned long                wait_table_hash_nr_entries;
        unsigned long                wait_table_bits;

        /*
         * Discontig memory support fields.
         */
         /*管理區屬於的節點*/
        struct pglist_data        *zone_pgdat;
        /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
        /*管理區的頁面在mem_map中的偏移*/
        unsigned long                zone_start_pfn;

        /*
         * zone_start_pfn, spanned_pages and present_pages are all
         * protected by span_seqlock.  It is a seqlock because it has
         * to be read outside of zone->lock, and it is done in the main
         * allocator path.  But, it is written quite infrequently.
         *
         * The lock is declared along with zone->lock because it is
         * frequently read in proximity to zone->lock.  It's good to
         * give them a chance of being in the same cacheline.
         */
        unsigned long                spanned_pages;        /* total size, including holes */
        unsigned long                present_pages;        /* amount of memory (excluding holes) */

        /*
         * rarely used fields:
         */
        const char                *name;
} ____cacheline_internodealigned_in_smp;沒有說明的地方,內核中的英文註釋已經寫得很清楚了。

頁面
系統中每個物理頁面都有一個相關聯的page用於記錄該頁面的狀態。view plaincopy to clipboardprint?/*
* Each physical page in the system has a struct page associated with
* it to keep track of whatever it is we are using the page for at the
* moment. Note that we have no way to track which tasks are using
* a page, though if it is a pagecache page, rmap structures can tell us
* who is mapping it.
*/  
struct page {  
    unsigned long flags;        /* Atomic flags, some possibly
                     * updated asynchronously */  
    atomic_t _count;        /* Usage count, see below. */  
    union {  
        atomic_t _mapcount; /* Count of ptes mapped in mms,
                     * to show when page is mapped
                     * & limit reverse map searches.
                     */  
        struct {        /* SLUB */  
            u16 inuse;  
            u16 objects;  
        };  
    };  
    union {  
        struct {  
        unsigned long private;      /* Mapping-private opaque data:
                         * usually used for buffer_heads
                         * if PagePrivate set; used for
                         * swp_entry_t if PageSwapCache;
                         * indicates order in the buddy
                         * system if PG_buddy is set.
                         */  
        struct address_space *mapping;  /* If low bit clear, points to
                         * inode address_space, or NULL.
                         * If page mapped as anonymous
                         * memory, low bit is set, and
                         * it points to anon_vma object:
                         * see PAGE_MAPPING_ANON below.
                         */  
        };  
#if USE_SPLIT_PTLOCKS   
        spinlock_t ptl;  
#endif   
        struct kmem_cache *slab;    /* SLUB: Pointer to slab */  
    /* 如果屬於夥伴系統,並且不是夥伴系統中的第一個頁
    則指向第一個頁*/  
        struct page *first_page;    /* Compound tail pages */  
    };  
    union {/*如果是文件映射,那麼表示本頁面在文件中的位置(偏移)*/  
        pgoff_t index;      /* Our offset within mapping. */  
        void *freelist;     /* SLUB: freelist req. slab lock */  
    };  
    struct list_head lru;       /* Pageout list, eg. active_list
                     * protected by zone->lru_lock !
                     */  
    /*
     * On machines where all RAM is mapped into kernel address space,
     * we can simply calculate the virtual address. On machines with
     * highmem some memory is mapped into kernel virtual memory
     * dynamically, so we need a place to store that address.
     * Note that this field could be 16 bits on x86 ... ;)
     *
     * Architectures with slow multiplication can define
     * WANT_PAGE_VIRTUAL in asm/page.h
     */  
#if defined(WANT_PAGE_VIRTUAL)   
    void *virtual;          /* Kernel virtual address (NULL if
                       not kmapped, ie. highmem) */  
#endif /* WANT_PAGE_VIRTUAL */   
#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS   
    unsigned long debug_flags;  /* Use atomic bitops on this */  
#endif   
  
#ifdef CONFIG_KMEMCHECK   
    /*
     * kmemcheck wants to track the status of each byte in a page; this
     * is a pointer to such a status block. NULL if not tracked.
     */  
    void *shadow;  
#endif   
};  
/*
* Each physical page in the system has a struct page associated with
* it to keep track of whatever it is we are using the page for at the
* moment. Note that we have no way to track which tasks are using
* a page, though if it is a pagecache page, rmap structures can tell us
* who is mapping it.
*/
struct page {
        unsigned long flags;                /* Atomic flags, some possibly
                                         * updated asynchronously */
        atomic_t _count;                /* Usage count, see below. */
        union {
                atomic_t _mapcount;        /* Count of ptes mapped in mms,
                                         * to show when page is mapped
                                         * & limit reverse map searches.
                                         */
                struct {                /* SLUB */
                        u16 inuse;
                        u16 objects;
                };
        };
        union {
            struct {
                unsigned long private;                /* Mapping-private opaque data:
                                                  * usually used for buffer_heads
                                                 * if PagePrivate set; used for
                                                 * swp_entry_t if PageSwapCache;
                                                 * indicates order in the buddy
                                                 * system if PG_buddy is set.
                                                 */
                struct address_space *mapping;        /* If low bit clear, points to
                                                 * inode address_space, or NULL.
                                                 * If page mapped as anonymous
                                                 * memory, low bit is set, and
                                                 * it points to anon_vma object:
                                                 * see PAGE_MAPPING_ANON below.
                                                 */
            };
#if USE_SPLIT_PTLOCKS
            spinlock_t ptl;
#endif
            struct kmem_cache *slab;        /* SLUB: Pointer to slab */
        /* 如果屬於夥伴系統,並且不是夥伴系統中的第一個頁
        則指向第一個頁*/
            struct page *first_page;        /* Compound tail pages */
        };
        union {/*如果是文件映射,那麼表示本頁面在文件中的位置(偏移)*/
                pgoff_t index;                /* Our offset within mapping. */
                void *freelist;                /* SLUB: freelist req. slab lock */
        };
        struct list_head lru;                /* Pageout list, eg. active_list
                                         * protected by zone->lru_lock !
                                         */
        /*
         * On machines where all RAM is mapped into kernel address space,
         * we can simply calculate the virtual address. On machines with
         * highmem some memory is mapped into kernel virtual memory
         * dynamically, so we need a place to store that address.
         * Note that this field could be 16 bits on x86 ... ;)
         *
         * Architectures with slow multiplication can define
         * WANT_PAGE_VIRTUAL in asm/page.h
         */
#if defined(WANT_PAGE_VIRTUAL)
        void *virtual;                        /* Kernel virtual address (NULL if
                                           not kmapped, ie. highmem) */
#endif /* WANT_PAGE_VIRTUAL */
#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
        unsigned long debug_flags;        /* Use atomic bitops on this */
#endif

#ifdef CONFIG_KMEMCHECK
        /*
         * kmemcheck wants to track the status of each byte in a page; this
         * is a pointer to such a status block. NULL if not tracked.
         */
        void *shadow;
#endif
};linux中主要的結構描述體現了linux物理內存管理的設計。後面會介紹linux內存管理的各個細節。
《解決方案》

謝謝分享
《解決方案》

感謝了。學習。

[火星人 ] linux物理內存描述 (二)已經有859次圍觀

http://coctec.com/docs/service/show-post-1260.html