|
57 | 57 | #include <linux/ftrace_event.h>
|
58 | 58 | #include <linux/memcontrol.h>
|
59 | 59 | #include <linux/prefetch.h>
|
| 60 | +#include <linux/page-debug-flags.h> |
60 | 61 |
|
61 | 62 | #include <asm/tlbflush.h>
|
62 | 63 | #include <asm/div64.h>
|
@@ -388,6 +389,37 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
|
388 | 389 | clear_highpage(page + i);
|
389 | 390 | }
|
390 | 391 |
|
| 392 | +#ifdef CONFIG_DEBUG_PAGEALLOC |
| 393 | +unsigned int _debug_guardpage_minorder; |
| 394 | + |
| 395 | +static int __init debug_guardpage_minorder_setup(char *buf) |
| 396 | +{ |
| 397 | + unsigned long res; |
| 398 | + |
| 399 | + if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) { |
| 400 | + printk(KERN_ERR "Bad debug_guardpage_minorder value\n"); |
| 401 | + return 0; |
| 402 | + } |
| 403 | + _debug_guardpage_minorder = res; |
| 404 | + printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res); |
| 405 | + return 0; |
| 406 | +} |
| 407 | +__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup); |
| 408 | + |
| 409 | +static inline void set_page_guard_flag(struct page *page) |
| 410 | +{ |
| 411 | + __set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags); |
| 412 | +} |
| 413 | + |
| 414 | +static inline void clear_page_guard_flag(struct page *page) |
| 415 | +{ |
| 416 | + __clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags); |
| 417 | +} |
| 418 | +#else |
| 419 | +static inline void set_page_guard_flag(struct page *page) { } |
| 420 | +static inline void clear_page_guard_flag(struct page *page) { } |
| 421 | +#endif |
| 422 | + |
391 | 423 | static inline void set_page_order(struct page *page, int order)
|
392 | 424 | {
|
393 | 425 | set_page_private(page, order);
|
@@ -445,6 +477,11 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
|
445 | 477 | if (page_zone_id(page) != page_zone_id(buddy))
|
446 | 478 | return 0;
|
447 | 479 |
|
| 480 | + if (page_is_guard(buddy) && page_order(buddy) == order) { |
| 481 | + VM_BUG_ON(page_count(buddy) != 0); |
| 482 | + return 1; |
| 483 | + } |
| 484 | + |
448 | 485 | if (PageBuddy(buddy) && page_order(buddy) == order) {
|
449 | 486 | VM_BUG_ON(page_count(buddy) != 0);
|
450 | 487 | return 1;
|
@@ -501,11 +538,19 @@ static inline void __free_one_page(struct page *page,
|
501 | 538 | buddy = page + (buddy_idx - page_idx);
|
502 | 539 | if (!page_is_buddy(page, buddy, order))
|
503 | 540 | break;
|
504 |
| - |
505 |
| - /* Our buddy is free, merge with it and move up one order. */ |
506 |
| - list_del(&buddy->lru); |
507 |
| - zone->free_area[order].nr_free--; |
508 |
| - rmv_page_order(buddy); |
| 541 | + /* |
| 542 | + * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, |
| 543 | + * merge with it and move up one order. |
| 544 | + */ |
| 545 | + if (page_is_guard(buddy)) { |
| 546 | + clear_page_guard_flag(buddy); |
| 547 | + set_page_private(page, 0); |
| 548 | + __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); |
| 549 | + } else { |
| 550 | + list_del(&buddy->lru); |
| 551 | + zone->free_area[order].nr_free--; |
| 552 | + rmv_page_order(buddy); |
| 553 | + } |
509 | 554 | combined_idx = buddy_idx & page_idx;
|
510 | 555 | page = page + (combined_idx - page_idx);
|
511 | 556 | page_idx = combined_idx;
|
@@ -731,6 +776,23 @@ static inline void expand(struct zone *zone, struct page *page,
|
731 | 776 | high--;
|
732 | 777 | size >>= 1;
|
733 | 778 | VM_BUG_ON(bad_range(zone, &page[size]));
|
| 779 | + |
| 780 | +#ifdef CONFIG_DEBUG_PAGEALLOC |
| 781 | + if (high < debug_guardpage_minorder()) { |
| 782 | + /* |
| 783 | + * Mark as guard pages (or page), that will allow to |
| 784 | + * merge back to allocator when buddy will be freed. |
| 785 | + * Corresponding page table entries will not be touched, |
| 786 | + * pages will stay not present in virtual address space |
| 787 | + */ |
| 788 | + INIT_LIST_HEAD(&page[size].lru); |
| 789 | + set_page_guard_flag(&page[size]); |
| 790 | + set_page_private(&page[size], high); |
| 791 | + /* Guard pages are not available for any usage */ |
| 792 | + __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << high)); |
| 793 | + continue; |
| 794 | + } |
| 795 | +#endif |
734 | 796 | list_add(&page[size].lru, &area->free_list[migratetype]);
|
735 | 797 | area->nr_free++;
|
736 | 798 | set_page_order(&page[size], high);
|
@@ -1754,7 +1816,8 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
|
1754 | 1816 | {
|
1755 | 1817 | unsigned int filter = SHOW_MEM_FILTER_NODES;
|
1756 | 1818 |
|
1757 |
| - if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs)) |
| 1819 | + if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) || |
| 1820 | + debug_guardpage_minorder() > 0) |
1758 | 1821 | return;
|
1759 | 1822 |
|
1760 | 1823 | /*
|
|
0 commit comments