+@@ -52,6 +52,12 @@ struct scan_control {
+ /* Incremented by the number of inactive pages that were scanned */
+ unsigned long nr_scanned;
+
++ /* Number of pages freed so far during a call to shrink_zones() */
++ unsigned long nr_reclaimed;
++
++ /* How many pages shrink_list() should reclaim */
++ unsigned long nr_to_reclaim;
++
+ /* This context's GFP mask */
+ gfp_t gfp_mask;
+
+@@ -72,6 +78,12 @@ struct scan_control {
+
+ int order;
+
++ /*
++ * Intend to reclaim enough contenious memory rather than to reclaim
++ * enough amount memory. I.e, it's the mode for high order allocation.
++ */
++ bool lumpy_reclaim_mode;
++
+ /* Which cgroup do we reclaim from */
+ struct mem_cgroup *mem_cgroup;
+
+@@ -549,7 +561,6 @@ void putback_lru_page(struct page *page)
+ }
+ #endif /* CONFIG_UNEVICTABLE_LRU */
+
+-
+ /*
+ * shrink_page_list() returns the number of reclaimed pages
+ */
+@@ -613,8 +624,10 @@ static unsigned long shrink_page_list(st
+
+ referenced = page_referenced(page, 1, sc->mem_cgroup);
+ /* In active use or really unfreeable? Activate it. */
+- if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
+- referenced && page_mapping_inuse(page))
++ if ( ( sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
++ referenced && page_mapping_inuse(page)
++ ) || sc->lumpy_reclaim_mode /* ignore refrences in lumpy reclaim mode */
++ )
+ goto activate_locked;
+
+ #ifdef CONFIG_SWAP
+@@ -1024,6 +1037,47 @@ int isolate_lru_page(struct page *page)