From: Marcelo Tosatti Signed-off-by: Andrew Morton --- 25-akpm/mm/vmscan.c | 36 ++++++++++++++++++++++++++++++++---- 1 files changed, 32 insertions(+), 4 deletions(-) diff -puN mm/vmscan.c~vm-swapout-throttling mm/vmscan.c --- 25/mm/vmscan.c~vm-swapout-throttling Tue Aug 31 14:32:35 2004 +++ 25-akpm/mm/vmscan.c Tue Aug 31 14:32:35 2004 @@ -74,6 +74,10 @@ struct scan_control { unsigned int gfp_mask; int may_writepage; + + int inflight; + + int throttled; /* how many times have we throttled on VM inflight IO limit */ }; /* @@ -246,8 +250,30 @@ static inline int is_page_cache_freeable return page_count(page) - !!PagePrivate(page) == 2; } -static int may_write_to_queue(struct backing_dev_info *bdi) +/* + * This function calculates the maximum pinned-for-IO memory + * the page eviction threads can generate. If we hit the max, + * we throttle taking a nap. + * + * Returns true if we cant writeout. + */ +int vm_eviction_limits(struct scan_control *sc) +{ + if (sc->inflight > (totalram_pages * vm_dirty_ratio) / 100) { + if (sc->throttled < 5) { + blk_congestion_wait(WRITE, HZ/5); + sc->throttled++; + } + return 1; + } + return 0; +} + +static int may_write_to_queue(struct backing_dev_info *bdi, struct scan_control *sc) { + if (vm_eviction_limits(sc)) /* Check VM writeout limit */ + return 0; + if (current_is_kswapd()) return 1; if (current_is_pdflush()) /* This is unlikely, but why not... */ @@ -287,7 +313,7 @@ static void handle_write_error(struct ad /* * pageout is called by shrink_list() for each dirty page. Calls ->writepage(). */ -static pageout_t pageout(struct page *page, struct address_space *mapping) +static pageout_t pageout(struct page *page, struct address_space *mapping, struct scan_control *sc) { /* * If the page is dirty, only perform writeback if that write @@ -312,7 +338,7 @@ static pageout_t pageout(struct page *pa return PAGE_KEEP; if (mapping->a_ops->writepage == NULL) return PAGE_ACTIVATE; - if (!may_write_to_queue(mapping->backing_dev_info)) + if (!may_write_to_queue(mapping->backing_dev_info, sc)) return PAGE_KEEP; if (clear_page_dirty_for_io(page)) { @@ -422,7 +448,7 @@ static int shrink_list(struct list_head goto keep_locked; /* Page is dirty, try to write it out here */ - switch(pageout(page, mapping)) { + switch (pageout(page, mapping, sc)) { case PAGE_KEEP: goto keep_locked; case PAGE_ACTIVATE: @@ -813,6 +839,7 @@ shrink_zone(struct zone *zone, struct sc nr_inactive = 0; sc->nr_to_reclaim = SWAP_CLUSTER_MAX; + sc->throttled = 0; while (nr_active || nr_inactive) { if (nr_active) { @@ -825,6 +852,7 @@ shrink_zone(struct zone *zone, struct sc if (nr_inactive) { sc->nr_to_scan = min(nr_inactive, (unsigned long)SWAP_CLUSTER_MAX); + sc->inflight = read_page_state(nr_writeback); nr_inactive -= sc->nr_to_scan; shrink_cache(zone, sc); if (sc->nr_to_reclaim <= 0) _