diff options
| author | Tejun Heo <tj@kernel.org> | 2013-01-23 09:31:01 -0800 |
|---|---|---|
| committer | Tejun Heo <tj@kernel.org> | 2013-01-23 09:31:01 -0800 |
| commit | c14afb82ffff5903a701a9fb737ac20f36d1f755 (patch) | |
| tree | 304dcc7b1d7b9a5f564f7e978228e61ef41fbef2 /mm/page-writeback.c | |
| parent | async, kmod: warn on synchronous request_module() from async workers (diff) | |
| parent | Merge tag '3.8-pci-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/... (diff) | |
| download | linux-c14afb82ffff5903a701a9fb737ac20f36d1f755.tar.gz linux-c14afb82ffff5903a701a9fb737ac20f36d1f755.zip | |
Merge branch 'master' into for-3.9-async
To receive f56c3196f251012de9b3ebaff55732a9074fdaae ("async: fix
__lowest_in_progress()").
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm/page-writeback.c')
| -rw-r--r-- | mm/page-writeback.c | 25 |
1 files changed, 20 insertions, 5 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 6f4271224493..0713bfbf0954 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -201,6 +201,18 @@ static unsigned long highmem_dirtyable_memory(unsigned long total) zone_reclaimable_pages(z) - z->dirty_balance_reserve; } /* + * Unreclaimable memory (kernel memory or anonymous memory + * without swap) can bring down the dirtyable pages below + * the zone's dirty balance reserve and the above calculation + * will underflow. However we still want to add in nodes + * which are below threshold (negative values) to get a more + * accurate calculation but make sure that the total never + * underflows. + */ + if ((long)x < 0) + x = 0; + + /* * Make sure that the number of highmem pages is never larger * than the number of the total dirtyable memory. This can only * occur in very strange VM situations but we want to make sure @@ -222,8 +234,8 @@ static unsigned long global_dirtyable_memory(void) { unsigned long x; - x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages() - - dirty_balance_reserve; + x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages(); + x -= min(x, dirty_balance_reserve); if (!vm_highmem_is_dirtyable) x -= highmem_dirtyable_memory(x); @@ -290,9 +302,12 @@ static unsigned long zone_dirtyable_memory(struct zone *zone) * highmem zone can hold its share of dirty pages, so we don't * care about vm_highmem_is_dirtyable here. */ - return zone_page_state(zone, NR_FREE_PAGES) + - zone_reclaimable_pages(zone) - - zone->dirty_balance_reserve; + unsigned long nr_pages = zone_page_state(zone, NR_FREE_PAGES) + + zone_reclaimable_pages(zone); + + /* don't allow this to underflow */ + nr_pages -= min(nr_pages, zone->dirty_balance_reserve); + return nr_pages; } /** |
