diff -urpN -X /home/fletch/.diff.exclude 86-pfn_to_nid/fs/proc/task_mmu.c 87-pidmaps_nodepages/fs/proc/task_mmu.c --- 86-pfn_to_nid/fs/proc/task_mmu.c Fri Jan 17 09:18:30 2003 +++ 87-pidmaps_nodepages/fs/proc/task_mmu.c Sat Feb 1 22:46:13 2003 @@ -2,6 +2,7 @@ #include #include #include +#include char *task_mem(struct mm_struct *mm, char *buffer) { @@ -111,7 +112,44 @@ int task_statm(struct mm_struct *mm, int #define MAPS_LINE_FORMAT (sizeof(void*) == 4 ? MAPS_LINE_FORMAT4 : MAPS_LINE_FORMAT8) #define MAPS_LINE_MAX (sizeof(void*) == 4 ? MAPS_LINE_MAX4 : MAPS_LINE_MAX8) -static int proc_pid_maps_get_line (char *buf, struct vm_area_struct *map) +extern pte_t __follow_page(struct mm_struct *mm, unsigned long address); +static int print_vma_nodepages(char* buf, struct mm_struct *mm, struct vm_area_struct *map) +{ + int retval = 0; + unsigned long vaddr = map->vm_start; + unsigned long vm_end = map->vm_end; + pte_t pte; + unsigned long pfn; + int pages_per_node[MAX_NR_NODES]; + int i; + + if (numnodes<=1) + goto out; + + for (i=0;ipage_table_lock); + pte = __follow_page(mm, vaddr); + spin_unlock(&mm->page_table_lock); + pfn = pte_pfn(pte); + if (pfn) /* don't count the zero page */ + pages_per_node[pfn_to_nid(pfn)]++; + } + retval += sprintf(&buf[retval]," #"); + for (i=0; ivm_file->f_dentry, map->vm_file->f_vfsmnt, buf, PAGE_SIZE); - buf[PAGE_SIZE-1] = '\n'; + //buf[PAGE_SIZE-1] = '\n'; line -= MAPS_LINE_MAX; if(line < buf) line = buf; @@ -149,15 +187,15 @@ static int proc_pid_maps_get_line (char MAPS_LINE_FORMAT, map->vm_start, map->vm_end, str, map->vm_pgoff << PAGE_SHIFT, MAJOR(dev), MINOR(dev), ino); - if(map->vm_file) { int i; for(i = len; i < MAPS_LINE_MAX; i++) line[i] = ' '; len = buf + PAGE_SIZE - line; memmove(buf, line, len); - } else - line[len++] = '\n'; + } + //else + // line[len++] = '\n'; return len; } @@ -207,7 +245,8 @@ ssize_t proc_pid_read_maps(struct task_s off -= PAGE_SIZE; goto next; } - len = proc_pid_maps_get_line(tmp, map); + len = proc_pid_maps_get_line(tmp, mm, map); + len += print_vma_nodepages(&tmp[len], mm, map); len -= off; if (len > 0) { if (retval+len > count) { diff -urpN -X /home/fletch/.diff.exclude 86-pfn_to_nid/include/asm-i386/mmzone.h 87-pidmaps_nodepages/include/asm-i386/mmzone.h --- 86-pfn_to_nid/include/asm-i386/mmzone.h Sat Feb 1 22:21:15 2003 +++ 87-pidmaps_nodepages/include/asm-i386/mmzone.h Sat Feb 1 22:46:13 2003 @@ -8,14 +8,17 @@ #include -#ifdef CONFIG_DISCONTIGMEM +#ifndef CONFIG_DISCONTIGMEM + +#define pfn_to_nid(pfn) (0) + +#else #ifdef CONFIG_X86_NUMAQ #include #elif CONFIG_X86_SUMMIT #include #else -#define pfn_to_nid(pfn) (0) #endif /* CONFIG_X86_NUMAQ */ extern struct pglist_data *node_data[]; diff -urpN -X /home/fletch/.diff.exclude 86-pfn_to_nid/mm/memory.c 87-pidmaps_nodepages/mm/memory.c --- 86-pfn_to_nid/mm/memory.c Sat Feb 1 22:04:44 2003 +++ 87-pidmaps_nodepages/mm/memory.c Sat Feb 1 22:46:13 2003 @@ -612,13 +612,12 @@ void zap_page_range(struct vm_area_struc * Do a quick page-table lookup for a single page. * mm->page_table_lock must be held. */ -struct page * -follow_page(struct mm_struct *mm, unsigned long address, int write) +pte_t +__follow_page(struct mm_struct *mm, unsigned long address) { pgd_t *pgd; pmd_t *pmd; pte_t *ptep, pte; - unsigned long pfn; pgd = pgd_offset(mm, address); if (pgd_none(*pgd) || pgd_bad(*pgd)) @@ -629,11 +628,25 @@ follow_page(struct mm_struct *mm, unsign goto out; ptep = pte_offset_map(pmd, address); - if (!ptep) + if (!ptep) { + pte.pte_low = 0; //__bad_page(); + pte.pte_high = 0; goto out; - + } pte = *ptep; pte_unmap(ptep); + +out: + return pte; +} + +struct page * +follow_page(struct mm_struct *mm, unsigned long address, int write) +{ + pte_t pte; + unsigned long pfn; + + pte = __follow_page(mm, address); if (pte_present(pte)) { if (!write || (pte_write(pte) && pte_dirty(pte))) { pfn = pte_pfn(pte); @@ -642,7 +655,6 @@ follow_page(struct mm_struct *mm, unsign } } -out: return NULL; }