Skip to content

Commit 33dfe92

Browse files
yanggeakpm00
yangge
authored andcommitted
mm/gup: clear the LRU flag of a page before adding to LRU batch
If a large number of CMA memory are configured in system (for example, the CMA memory accounts for 50% of the system memory), starting a virtual virtual machine with device passthrough, it will call pin_user_pages_remote(..., FOLL_LONGTERM, ...) to pin memory. Normally if a page is present and in CMA area, pin_user_pages_remote() will migrate the page from CMA area to non-CMA area because of FOLL_LONGTERM flag. But the current code will cause the migration failure due to unexpected page refcounts, and eventually cause the virtual machine fail to start. If a page is added in LRU batch, its refcount increases one, remove the page from LRU batch decreases one. Page migration requires the page is not referenced by others except page mapping. Before migrating a page, we should try to drain the page from LRU batch in case the page is in it, however, folio_test_lru() is not sufficient to tell whether the page is in LRU batch or not, if the page is in LRU batch, the migration will fail. To solve the problem above, we modify the logic of adding to LRU batch. Before adding a page to LRU batch, we clear the LRU flag of the page so that we can check whether the page is in LRU batch by folio_test_lru(page). It's quite valuable, because likely we don't want to blindly drain the LRU batch simply because there is some unexpected reference on a page, as described above. This change makes the LRU flag of a page invisible for longer, which may impact some programs. For example, as long as a page is on a LRU batch, we cannot isolate it, and we cannot check if it's an LRU page. Further, a page can now only be on exactly one LRU batch. This doesn't seem to matter much, because a new page is allocated from buddy and added to the lru batch, or be isolated, it's LRU flag may also be invisible for a long time. Link: https://lkml.kernel.org/r/1720075944-27201-1-git-send-email-yangge1116@126.com Link: https://lkml.kernel.org/r/1720008153-16035-1-git-send-email-yangge1116@126.com Fixes: 9a4e9f3 ("mm: update get_user_pages_longterm to migrate pages allocated from CMA region") Signed-off-by: yangge <yangge1116@126.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: David Hildenbrand <david@redhat.com> Cc: Barry Song <21cnbao@gmail.com> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent af64977 commit 33dfe92

File tree

1 file changed

+31
-12
lines changed

1 file changed

+31
-12
lines changed

mm/swap.c

+31-12
Original file line numberDiff line numberDiff line change
@@ -211,10 +211,6 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
211211
for (i = 0; i < folio_batch_count(fbatch); i++) {
212212
struct folio *folio = fbatch->folios[i];
213213

214-
/* block memcg migration while the folio moves between lru */
215-
if (move_fn != lru_add_fn && !folio_test_clear_lru(folio))
216-
continue;
217-
218214
folio_lruvec_relock_irqsave(folio, &lruvec, &flags);
219215
move_fn(lruvec, folio);
220216

@@ -255,11 +251,16 @@ static void lru_move_tail_fn(struct lruvec *lruvec, struct folio *folio)
255251
void folio_rotate_reclaimable(struct folio *folio)
256252
{
257253
if (!folio_test_locked(folio) && !folio_test_dirty(folio) &&
258-
!folio_test_unevictable(folio) && folio_test_lru(folio)) {
254+
!folio_test_unevictable(folio)) {
259255
struct folio_batch *fbatch;
260256
unsigned long flags;
261257

262258
folio_get(folio);
259+
if (!folio_test_clear_lru(folio)) {
260+
folio_put(folio);
261+
return;
262+
}
263+
263264
local_lock_irqsave(&lru_rotate.lock, flags);
264265
fbatch = this_cpu_ptr(&lru_rotate.fbatch);
265266
folio_batch_add_and_move(fbatch, folio, lru_move_tail_fn);
@@ -352,11 +353,15 @@ static void folio_activate_drain(int cpu)
352353

353354
void folio_activate(struct folio *folio)
354355
{
355-
if (folio_test_lru(folio) && !folio_test_active(folio) &&
356-
!folio_test_unevictable(folio)) {
356+
if (!folio_test_active(folio) && !folio_test_unevictable(folio)) {
357357
struct folio_batch *fbatch;
358358

359359
folio_get(folio);
360+
if (!folio_test_clear_lru(folio)) {
361+
folio_put(folio);
362+
return;
363+
}
364+
360365
local_lock(&cpu_fbatches.lock);
361366
fbatch = this_cpu_ptr(&cpu_fbatches.activate);
362367
folio_batch_add_and_move(fbatch, folio, folio_activate_fn);
@@ -700,6 +705,11 @@ void deactivate_file_folio(struct folio *folio)
700705
return;
701706

702707
folio_get(folio);
708+
if (!folio_test_clear_lru(folio)) {
709+
folio_put(folio);
710+
return;
711+
}
712+
703713
local_lock(&cpu_fbatches.lock);
704714
fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate_file);
705715
folio_batch_add_and_move(fbatch, folio, lru_deactivate_file_fn);
@@ -716,11 +726,16 @@ void deactivate_file_folio(struct folio *folio)
716726
*/
717727
void folio_deactivate(struct folio *folio)
718728
{
719-
if (folio_test_lru(folio) && !folio_test_unevictable(folio) &&
720-
(folio_test_active(folio) || lru_gen_enabled())) {
729+
if (!folio_test_unevictable(folio) && (folio_test_active(folio) ||
730+
lru_gen_enabled())) {
721731
struct folio_batch *fbatch;
722732

723733
folio_get(folio);
734+
if (!folio_test_clear_lru(folio)) {
735+
folio_put(folio);
736+
return;
737+
}
738+
724739
local_lock(&cpu_fbatches.lock);
725740
fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate);
726741
folio_batch_add_and_move(fbatch, folio, lru_deactivate_fn);
@@ -737,12 +752,16 @@ void folio_deactivate(struct folio *folio)
737752
*/
738753
void folio_mark_lazyfree(struct folio *folio)
739754
{
740-
if (folio_test_lru(folio) && folio_test_anon(folio) &&
741-
folio_test_swapbacked(folio) && !folio_test_swapcache(folio) &&
742-
!folio_test_unevictable(folio)) {
755+
if (folio_test_anon(folio) && folio_test_swapbacked(folio) &&
756+
!folio_test_swapcache(folio) && !folio_test_unevictable(folio)) {
743757
struct folio_batch *fbatch;
744758

745759
folio_get(folio);
760+
if (!folio_test_clear_lru(folio)) {
761+
folio_put(folio);
762+
return;
763+
}
764+
746765
local_lock(&cpu_fbatches.lock);
747766
fbatch = this_cpu_ptr(&cpu_fbatches.lru_lazyfree);
748767
folio_batch_add_and_move(fbatch, folio, lru_lazyfree_fn);

0 commit comments

Comments
 (0)