summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/io-pgtable-arm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/io-pgtable-arm.c')
-rw-r--r--drivers/iommu/io-pgtable-arm.c168
1 files changed, 140 insertions, 28 deletions
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index f7828a7aad41..0e67f1721a3d 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -21,6 +21,7 @@
#include <asm/barrier.h>
#include "io-pgtable-arm.h"
+#include "iommu-pages.h"
#define ARM_LPAE_MAX_ADDR_BITS 52
#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
@@ -75,6 +76,7 @@
#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
+#define ARM_LPAE_PTE_DBM (((arm_lpae_iopte)1) << 51)
#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
#define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
#define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
@@ -84,7 +86,7 @@
#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
/* Ignore the contiguous bit for block splitting */
-#define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
+#define ARM_LPAE_PTE_ATTR_HI_MASK (ARM_LPAE_PTE_XN | ARM_LPAE_PTE_DBM)
#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
ARM_LPAE_PTE_ATTR_HI_MASK)
/* Software bit for solving coherency races */
@@ -92,7 +94,11 @@
/* Stage-1 PTE */
#define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
-#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
+#define ARM_LPAE_PTE_AP_RDONLY_BIT 7
+#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)1) << \
+ ARM_LPAE_PTE_AP_RDONLY_BIT)
+#define ARM_LPAE_PTE_AP_WR_CLEAN_MASK (ARM_LPAE_PTE_AP_RDONLY | \
+ ARM_LPAE_PTE_DBM)
#define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
#define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
@@ -138,6 +144,12 @@
#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
+#define iopte_writeable_dirty(pte) \
+ (((pte) & ARM_LPAE_PTE_AP_WR_CLEAN_MASK) == ARM_LPAE_PTE_DBM)
+
+#define iopte_set_writeable_clean(ptep) \
+ set_bit(ARM_LPAE_PTE_AP_RDONLY_BIT, (unsigned long *)(ptep))
+
struct arm_lpae_io_pgtable {
struct io_pgtable iop;
@@ -159,6 +171,13 @@ static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
return iopte_type(pte) == ARM_LPAE_PTE_TYPE_BLOCK;
}
+static inline bool iopte_table(arm_lpae_iopte pte, int lvl)
+{
+ if (lvl == (ARM_LPAE_MAX_LEVELS - 1))
+ return false;
+ return iopte_type(pte) == ARM_LPAE_PTE_TYPE_TABLE;
+}
+
static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
struct arm_lpae_io_pgtable *data)
{
@@ -198,14 +217,10 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
VM_BUG_ON((gfp & __GFP_HIGHMEM));
- if (cfg->alloc) {
+ if (cfg->alloc)
pages = cfg->alloc(cookie, size, gfp);
- } else {
- struct page *p;
-
- p = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, order);
- pages = p ? page_address(p) : NULL;
- }
+ else
+ pages = iommu_alloc_pages_node(dev_to_node(dev), gfp, order);
if (!pages)
return NULL;
@@ -233,7 +248,7 @@ out_free:
if (cfg->free)
cfg->free(cookie, pages, size);
else
- free_pages((unsigned long)pages, order);
+ iommu_free_pages(pages, order);
return NULL;
}
@@ -249,7 +264,7 @@ static void __arm_lpae_free_pages(void *pages, size_t size,
if (cfg->free)
cfg->free(cookie, pages, size);
else
- free_pages((unsigned long)pages, get_order(size));
+ iommu_free_pages(pages, get_order(size));
}
static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
@@ -259,13 +274,13 @@ static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
sizeof(*ptep) * num_entries, DMA_TO_DEVICE);
}
-static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg)
+static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg, int num_entries)
{
+ for (int i = 0; i < num_entries; i++)
+ ptep[i] = 0;
- *ptep = 0;
-
- if (!cfg->coherent_walk)
- __arm_lpae_sync_pte(ptep, 1, cfg);
+ if (!cfg->coherent_walk && num_entries)
+ __arm_lpae_sync_pte(ptep, num_entries, cfg);
}
static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
@@ -425,6 +440,8 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
pte = ARM_LPAE_PTE_nG;
if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
pte |= ARM_LPAE_PTE_AP_RDONLY;
+ else if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_HD)
+ pte |= ARM_LPAE_PTE_DBM;
if (!(prot & IOMMU_PRIV))
pte |= ARM_LPAE_PTE_AP_UNPRIV;
} else {
@@ -498,9 +515,8 @@ static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
if (WARN_ON(iaext || paddr >> cfg->oas))
return -ERANGE;
- /* If no access, then nothing to do */
if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
- return 0;
+ return -EINVAL;
prot = arm_lpae_prot_to_pte(data, iommu_prot);
ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl,
@@ -637,26 +653,29 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
max_entries = ARM_LPAE_PTES_PER_TABLE(data) - unmap_idx_start;
num_entries = min_t(int, pgcount, max_entries);
- while (i < num_entries) {
- pte = READ_ONCE(*ptep);
+ /* Find and handle non-leaf entries */
+ for (i = 0; i < num_entries; i++) {
+ pte = READ_ONCE(ptep[i]);
if (WARN_ON(!pte))
break;
- __arm_lpae_clear_pte(ptep, &iop->cfg);
-
if (!iopte_leaf(pte, lvl, iop->fmt)) {
+ __arm_lpae_clear_pte(&ptep[i], &iop->cfg, 1);
+
/* Also flush any partial walks */
io_pgtable_tlb_flush_walk(iop, iova + i * size, size,
ARM_LPAE_GRANULE(data));
__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
- } else if (!iommu_iotlb_gather_queued(gather)) {
- io_pgtable_tlb_add_page(iop, gather, iova + i * size, size);
}
-
- ptep++;
- i++;
}
+ /* Clear the remaining entries */
+ __arm_lpae_clear_pte(ptep, &iop->cfg, i);
+
+ if (gather && !iommu_iotlb_gather_queued(gather))
+ for (int j = 0; j < i; j++)
+ io_pgtable_tlb_add_page(iop, gather, iova + j * size, size);
+
return i * size;
} else if (iopte_leaf(pte, lvl, iop->fmt)) {
/*
@@ -729,6 +748,97 @@ found_translation:
return iopte_to_paddr(pte, data) | iova;
}
+struct io_pgtable_walk_data {
+ struct iommu_dirty_bitmap *dirty;
+ unsigned long flags;
+ u64 addr;
+ const u64 end;
+};
+
+static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data,
+ struct io_pgtable_walk_data *walk_data,
+ arm_lpae_iopte *ptep,
+ int lvl);
+
+static int io_pgtable_visit_dirty(struct arm_lpae_io_pgtable *data,
+ struct io_pgtable_walk_data *walk_data,
+ arm_lpae_iopte *ptep, int lvl)
+{
+ struct io_pgtable *iop = &data->iop;
+ arm_lpae_iopte pte = READ_ONCE(*ptep);
+
+ if (iopte_leaf(pte, lvl, iop->fmt)) {
+ size_t size = ARM_LPAE_BLOCK_SIZE(lvl, data);
+
+ if (iopte_writeable_dirty(pte)) {
+ iommu_dirty_bitmap_record(walk_data->dirty,
+ walk_data->addr, size);
+ if (!(walk_data->flags & IOMMU_DIRTY_NO_CLEAR))
+ iopte_set_writeable_clean(ptep);
+ }
+ walk_data->addr += size;
+ return 0;
+ }
+
+ if (WARN_ON(!iopte_table(pte, lvl)))
+ return -EINVAL;
+
+ ptep = iopte_deref(pte, data);
+ return __arm_lpae_iopte_walk_dirty(data, walk_data, ptep, lvl + 1);
+}
+
+static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data,
+ struct io_pgtable_walk_data *walk_data,
+ arm_lpae_iopte *ptep,
+ int lvl)
+{
+ u32 idx;
+ int max_entries, ret;
+
+ if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
+ return -EINVAL;
+
+ if (lvl == data->start_level)
+ max_entries = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
+ else
+ max_entries = ARM_LPAE_PTES_PER_TABLE(data);
+
+ for (idx = ARM_LPAE_LVL_IDX(walk_data->addr, lvl, data);
+ (idx < max_entries) && (walk_data->addr < walk_data->end); ++idx) {
+ ret = io_pgtable_visit_dirty(data, walk_data, ptep + idx, lvl);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int arm_lpae_read_and_clear_dirty(struct io_pgtable_ops *ops,
+ unsigned long iova, size_t size,
+ unsigned long flags,
+ struct iommu_dirty_bitmap *dirty)
+{
+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
+ struct io_pgtable_walk_data walk_data = {
+ .dirty = dirty,
+ .flags = flags,
+ .addr = iova,
+ .end = iova + size,
+ };
+ arm_lpae_iopte *ptep = data->pgd;
+ int lvl = data->start_level;
+
+ if (WARN_ON(!size))
+ return -EINVAL;
+ if (WARN_ON((iova + size - 1) & ~(BIT(cfg->ias) - 1)))
+ return -EINVAL;
+ if (data->iop.fmt != ARM_64_LPAE_S1)
+ return -EINVAL;
+
+ return __arm_lpae_iopte_walk_dirty(data, &walk_data, ptep, lvl);
+}
+
static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
{
unsigned long granule, page_sizes;
@@ -807,6 +917,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
.map_pages = arm_lpae_map_pages,
.unmap_pages = arm_lpae_unmap_pages,
.iova_to_phys = arm_lpae_iova_to_phys,
+ .read_and_clear_dirty = arm_lpae_read_and_clear_dirty,
};
return data;
@@ -822,7 +933,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_ARM_TTBR1 |
- IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
+ IO_PGTABLE_QUIRK_ARM_OUTER_WBWA |
+ IO_PGTABLE_QUIRK_ARM_HD))
return NULL;
data = arm_lpae_alloc_pgtable(cfg);