e3559fb445
Changelog: https://cdn.kernel.org/pub/linux/kernel/v6.x/ChangeLog-6.1.54 Removed upstreamed: generic/backport-6.1/020-v6.3-02-UPSTREAM-mm-multi-gen-LRU-rename-lrugen-lists-to-lru.patch[1] ipq806x/patches-6.1/140-v6.5-hwspinlock-qcom-add-missing-regmap-config-for-SFPB-M.patch[2] All other patches automatically rebased. 1. https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=v6.1.54&id=a73d04c460521e45f257d28d73df096e41ece324 2. https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=v6.1.54&id=e93bc372dbc0bde133c854c03502a95617041972 Build system: x86/64 Build-tested: x86/64/AMD Cezanne Run-tested: x86/64/AMD Cezanne Signed-off-by: John Audia <therealgraysky@proton.me>
149 lines
4.7 KiB
Diff
149 lines
4.7 KiB
Diff
From e604c3ccb4dfbdde2467fccef9bb36170a392695 Mon Sep 17 00:00:00 2001
|
|
From: "T.J. Alumbaugh" <talumbau@google.com>
|
|
Date: Wed, 18 Jan 2023 00:18:27 +0000
|
|
Subject: [PATCH 18/19] UPSTREAM: mm: multi-gen LRU: simplify
|
|
lru_gen_look_around()
|
|
|
|
Update the folio generation in place with or without
|
|
current->reclaim_state->mm_walk. The LRU lock is held for longer, if
|
|
mm_walk is NULL and the number of folios to update is more than
|
|
PAGEVEC_SIZE.
|
|
|
|
This causes a measurable regression from the LRU lock contention during a
|
|
microbencmark. But a tiny regression is not worth the complexity.
|
|
|
|
Link: https://lkml.kernel.org/r/20230118001827.1040870-8-talumbau@google.com
|
|
Change-Id: I9ce18b4f4062e6c1c13c98ece9422478eb8e1846
|
|
Signed-off-by: T.J. Alumbaugh <talumbau@google.com>
|
|
Cc: Yu Zhao <yuzhao@google.com>
|
|
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
|
|
(cherry picked from commit abf086721a2f1e6897c57796f7268df1b194c750)
|
|
Bug: 274865848
|
|
Signed-off-by: T.J. Mercier <tjmercier@google.com>
|
|
---
|
|
mm/vmscan.c | 73 +++++++++++++++++------------------------------------
|
|
1 file changed, 23 insertions(+), 50 deletions(-)
|
|
|
|
--- a/mm/vmscan.c
|
|
+++ b/mm/vmscan.c
|
|
@@ -4573,13 +4573,12 @@ static void lru_gen_age_node(struct pgli
|
|
void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
|
|
{
|
|
int i;
|
|
- pte_t *pte;
|
|
unsigned long start;
|
|
unsigned long end;
|
|
- unsigned long addr;
|
|
struct lru_gen_mm_walk *walk;
|
|
int young = 0;
|
|
- unsigned long bitmap[BITS_TO_LONGS(MIN_LRU_BATCH)] = {};
|
|
+ pte_t *pte = pvmw->pte;
|
|
+ unsigned long addr = pvmw->address;
|
|
struct folio *folio = pfn_folio(pvmw->pfn);
|
|
struct mem_cgroup *memcg = folio_memcg(folio);
|
|
struct pglist_data *pgdat = folio_pgdat(folio);
|
|
@@ -4596,25 +4595,28 @@ void lru_gen_look_around(struct page_vma
|
|
/* avoid taking the LRU lock under the PTL when possible */
|
|
walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL;
|
|
|
|
- start = max(pvmw->address & PMD_MASK, pvmw->vma->vm_start);
|
|
- end = min(pvmw->address | ~PMD_MASK, pvmw->vma->vm_end - 1) + 1;
|
|
+ start = max(addr & PMD_MASK, pvmw->vma->vm_start);
|
|
+ end = min(addr | ~PMD_MASK, pvmw->vma->vm_end - 1) + 1;
|
|
|
|
if (end - start > MIN_LRU_BATCH * PAGE_SIZE) {
|
|
- if (pvmw->address - start < MIN_LRU_BATCH * PAGE_SIZE / 2)
|
|
+ if (addr - start < MIN_LRU_BATCH * PAGE_SIZE / 2)
|
|
end = start + MIN_LRU_BATCH * PAGE_SIZE;
|
|
- else if (end - pvmw->address < MIN_LRU_BATCH * PAGE_SIZE / 2)
|
|
+ else if (end - addr < MIN_LRU_BATCH * PAGE_SIZE / 2)
|
|
start = end - MIN_LRU_BATCH * PAGE_SIZE;
|
|
else {
|
|
- start = pvmw->address - MIN_LRU_BATCH * PAGE_SIZE / 2;
|
|
- end = pvmw->address + MIN_LRU_BATCH * PAGE_SIZE / 2;
|
|
+ start = addr - MIN_LRU_BATCH * PAGE_SIZE / 2;
|
|
+ end = addr + MIN_LRU_BATCH * PAGE_SIZE / 2;
|
|
}
|
|
}
|
|
|
|
- pte = pvmw->pte - (pvmw->address - start) / PAGE_SIZE;
|
|
+ /* folio_update_gen() requires stable folio_memcg() */
|
|
+ if (!mem_cgroup_trylock_pages(memcg))
|
|
+ return;
|
|
|
|
- rcu_read_lock();
|
|
arch_enter_lazy_mmu_mode();
|
|
|
|
+ pte -= (addr - start) / PAGE_SIZE;
|
|
+
|
|
for (i = 0, addr = start; addr != end; i++, addr += PAGE_SIZE) {
|
|
unsigned long pfn;
|
|
|
|
@@ -4639,56 +4641,27 @@ void lru_gen_look_around(struct page_vma
|
|
!folio_test_swapcache(folio)))
|
|
folio_mark_dirty(folio);
|
|
|
|
+ if (walk) {
|
|
+ old_gen = folio_update_gen(folio, new_gen);
|
|
+ if (old_gen >= 0 && old_gen != new_gen)
|
|
+ update_batch_size(walk, folio, old_gen, new_gen);
|
|
+
|
|
+ continue;
|
|
+ }
|
|
+
|
|
old_gen = folio_lru_gen(folio);
|
|
if (old_gen < 0)
|
|
folio_set_referenced(folio);
|
|
else if (old_gen != new_gen)
|
|
- __set_bit(i, bitmap);
|
|
+ folio_activate(folio);
|
|
}
|
|
|
|
arch_leave_lazy_mmu_mode();
|
|
- rcu_read_unlock();
|
|
+ mem_cgroup_unlock_pages();
|
|
|
|
/* feedback from rmap walkers to page table walkers */
|
|
if (suitable_to_scan(i, young))
|
|
update_bloom_filter(lruvec, max_seq, pvmw->pmd);
|
|
-
|
|
- if (!walk && bitmap_weight(bitmap, MIN_LRU_BATCH) < PAGEVEC_SIZE) {
|
|
- for_each_set_bit(i, bitmap, MIN_LRU_BATCH) {
|
|
- folio = pfn_folio(pte_pfn(pte[i]));
|
|
- folio_activate(folio);
|
|
- }
|
|
- return;
|
|
- }
|
|
-
|
|
- /* folio_update_gen() requires stable folio_memcg() */
|
|
- if (!mem_cgroup_trylock_pages(memcg))
|
|
- return;
|
|
-
|
|
- if (!walk) {
|
|
- spin_lock_irq(&lruvec->lru_lock);
|
|
- new_gen = lru_gen_from_seq(lruvec->lrugen.max_seq);
|
|
- }
|
|
-
|
|
- for_each_set_bit(i, bitmap, MIN_LRU_BATCH) {
|
|
- folio = pfn_folio(pte_pfn(pte[i]));
|
|
- if (folio_memcg_rcu(folio) != memcg)
|
|
- continue;
|
|
-
|
|
- old_gen = folio_update_gen(folio, new_gen);
|
|
- if (old_gen < 0 || old_gen == new_gen)
|
|
- continue;
|
|
-
|
|
- if (walk)
|
|
- update_batch_size(walk, folio, old_gen, new_gen);
|
|
- else
|
|
- lru_gen_update_size(lruvec, folio, old_gen, new_gen);
|
|
- }
|
|
-
|
|
- if (!walk)
|
|
- spin_unlock_irq(&lruvec->lru_lock);
|
|
-
|
|
- mem_cgroup_unlock_pages();
|
|
}
|
|
|
|
/******************************************************************************
|