summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2023-08-02 16:13:41 +0100
committerAndrew Morton <akpm@linux-foundation.org>2023-08-24 16:20:21 -0700
commit876397837d582ce72f977ac3e635ce74eebcecc9 (patch)
tree8ff3ffd26f7f0396484873e81cdc95374c49a44f
parent9ff633944165d11c53c088d9596db3da66e90396 (diff)
downloadlinux-stable-876397837d582ce72f977ac3e635ce74eebcecc9.tar.gz
linux-stable-876397837d582ce72f977ac3e635ce74eebcecc9.tar.bz2
linux-stable-876397837d582ce72f977ac3e635ce74eebcecc9.zip
ia64: implement the new page table range API
Add PFN_PTE_SHIFT, update_mmu_cache_range() and flush_dcache_folio(). Change the PG_arch_1 (aka PG_dcache_clean) flag from being per-page to per-folio, which makes arch_dma_mark_clean() and mark_clean() a little more exciting. [willy@infradead.org: fix folio_size() handling] Link: https://lkml.kernel.org/r/ZNPlOCe8F+nrzPxr@casper.infradead.org Link: https://lkml.kernel.org/r/20230802151406.3735276-14-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Acked-by: Mike Rapoport (IBM) <rppt@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--arch/ia64/hp/common/sba_iommu.c30
-rw-r--r--arch/ia64/include/asm/cacheflush.h14
-rw-r--r--arch/ia64/include/asm/pgtable.h4
-rw-r--r--arch/ia64/mm/init.c32
4 files changed, 54 insertions, 26 deletions
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 8ad6946521d8..c4d477e8bcd4 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -798,22 +798,30 @@ sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba)
#endif
#ifdef ENABLE_MARK_CLEAN
-/**
+/*
* Since DMA is i-cache coherent, any (complete) pages that were written via
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
* flush them when they get mapped into an executable vm-area.
*/
-static void
-mark_clean (void *addr, size_t size)
+static void mark_clean(void *addr, size_t size)
{
- unsigned long pg_addr, end;
-
- pg_addr = PAGE_ALIGN((unsigned long) addr);
- end = (unsigned long) addr + size;
- while (pg_addr + PAGE_SIZE <= end) {
- struct page *page = virt_to_page((void *)pg_addr);
- set_bit(PG_arch_1, &page->flags);
- pg_addr += PAGE_SIZE;
+ struct folio *folio = virt_to_folio(addr);
+ ssize_t left = size;
+ size_t offset = offset_in_folio(folio, addr);
+
+ if (offset) {
+ left -= folio_size(folio) - offset;
+ if (left <= 0)
+ return;
+ folio = folio_next(folio);
+ }
+
+ while (left >= folio_size(folio)) {
+ left -= folio_size(folio);
+ set_bit(PG_arch_1, &folio->flags);
+ if (!left)
+ break;
+ folio = folio_next(folio);
}
}
#endif
diff --git a/arch/ia64/include/asm/cacheflush.h b/arch/ia64/include/asm/cacheflush.h
index 708c0fa5d975..eac493fa9e0d 100644
--- a/arch/ia64/include/asm/cacheflush.h
+++ b/arch/ia64/include/asm/cacheflush.h
@@ -13,10 +13,16 @@
#include <asm/page.h>
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
-#define flush_dcache_page(page) \
-do { \
- clear_bit(PG_arch_1, &(page)->flags); \
-} while (0)
+static inline void flush_dcache_folio(struct folio *folio)
+{
+ clear_bit(PG_arch_1, &folio->flags);
+}
+#define flush_dcache_folio flush_dcache_folio
+
+static inline void flush_dcache_page(struct page *page)
+{
+ flush_dcache_folio(page_folio(page));
+}
extern void flush_icache_range(unsigned long start, unsigned long end);
#define flush_icache_range flush_icache_range
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 21c97e31a28a..4e5dd800ce1f 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -206,6 +206,7 @@ ia64_phys_addr_valid (unsigned long addr)
#define RGN_MAP_SHIFT (PGDIR_SHIFT + PTRS_PER_PGD_SHIFT - 3)
#define RGN_MAP_LIMIT ((1UL << RGN_MAP_SHIFT) - PAGE_SIZE) /* per region addr limit */
+#define PFN_PTE_SHIFT PAGE_SHIFT
/*
* Conversion functions: convert page frame number (pfn) and a protection value to a page
* table entry (pte).
@@ -303,8 +304,6 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
*ptep = pteval;
}
-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
-
/*
* Make page protection values cacheable, uncacheable, or write-
* combining. Note that "protection" is really a misnomer here as the
@@ -396,6 +395,7 @@ pte_same (pte_t a, pte_t b)
return pte_val(a) == pte_val(b);
}
+#define update_mmu_cache_range(vmf, vma, address, ptep, nr) do { } while (0)
#define update_mmu_cache(vma, address, ptep) do { } while (0)
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 7f5353e28516..05b0f2f0c073 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -50,30 +50,44 @@ void
__ia64_sync_icache_dcache (pte_t pte)
{
unsigned long addr;
- struct page *page;
+ struct folio *folio;
- page = pte_page(pte);
- addr = (unsigned long) page_address(page);
+ folio = page_folio(pte_page(pte));
+ addr = (unsigned long)folio_address(folio);
- if (test_bit(PG_arch_1, &page->flags))
+ if (test_bit(PG_arch_1, &folio->flags))
return; /* i-cache is already coherent with d-cache */
- flush_icache_range(addr, addr + page_size(page));
- set_bit(PG_arch_1, &page->flags); /* mark page as clean */
+ flush_icache_range(addr, addr + folio_size(folio));
+ set_bit(PG_arch_1, &folio->flags); /* mark page as clean */
}
/*
- * Since DMA is i-cache coherent, any (complete) pages that were written via
+ * Since DMA is i-cache coherent, any (complete) folios that were written via
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
* flush them when they get mapped into an executable vm-area.
*/
void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
{
unsigned long pfn = PHYS_PFN(paddr);
+ struct folio *folio = page_folio(pfn_to_page(pfn));
+ ssize_t left = size;
+ size_t offset = offset_in_folio(folio, paddr);
- do {
+ if (offset) {
+ left -= folio_size(folio) - offset;
+ if (left <= 0)
+ return;
+ folio = folio_next(folio);
+ }
+
+ while (left >= (ssize_t)folio_size(folio)) {
+ left -= folio_size(folio);
set_bit(PG_arch_1, &pfn_to_page(pfn)->flags);
- } while (++pfn <= PHYS_PFN(paddr + size - 1));
+ if (!left)
+ break;
+ folio = folio_next(folio);
+ }
}
inline void