diff options
author | Zhen Lei <thunder.leizhen@huawei.com> | 2018-09-20 17:10:24 +0100 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2018-10-01 13:01:33 +0100 |
commit | b6b65ca20bc93d14319f9b5cf98fd3c19a4244e3 (patch) | |
tree | ce01effa867ef81b01481259e0fcccb02ad66dd0 /drivers/iommu/io-pgtable-arm.c | |
parent | 68a6efe86f6a16e25556a2aff40efad41097b486 (diff) | |
download | linux-stable-b6b65ca20bc93d14319f9b5cf98fd3c19a4244e3.tar.gz linux-stable-b6b65ca20bc93d14319f9b5cf98fd3c19a4244e3.tar.bz2 linux-stable-b6b65ca20bc93d14319f9b5cf98fd3c19a4244e3.zip |
iommu/io-pgtable-arm: Add support for non-strict mode
Non-strict mode is simply a case of skipping 'regular' leaf TLBIs, since
the sync is already factored out into ops->iotlb_sync at the core API
level. Non-leaf invalidations where we change the page table structure
itself still have to be issued synchronously in order to maintain walk
caches correctly.
To save having to reason about it too much, make sure the invalidation
in arm_lpae_split_blk_unmap() just performs its own unconditional sync
to minimise the window in which we're technically violating the break-
before-make requirement on a live mapping. This might work out redundant
with an outer-level sync for strict unmaps, but we'll never be splitting
blocks on a DMA fastpath anyway.
Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com>
[rm: tweak comment, commit message, split_blk_unmap logic and barriers]
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'drivers/iommu/io-pgtable-arm.c')
-rw-r--r-- | drivers/iommu/io-pgtable-arm.c | 14 |
1 files changed, 12 insertions, 2 deletions
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 2f79efd16a05..237cacd4a62b 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -576,6 +576,7 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, tablep = iopte_deref(pte, data); } else if (unmap_idx >= 0) { io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true); + io_pgtable_tlb_sync(&data->iop); return size; } @@ -609,6 +610,13 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, io_pgtable_tlb_sync(iop); ptep = iopte_deref(pte, data); __arm_lpae_free_pgtable(data, lvl + 1, ptep); + } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) { + /* + * Order the PTE update against queueing the IOVA, to + * guarantee that a flush callback from a different CPU + * has observed it before the TLBIALL can be issued. + */ + smp_wmb(); } else { io_pgtable_tlb_add_flush(iop, iova, size, size, true); } @@ -771,7 +779,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) u64 reg; struct arm_lpae_io_pgtable *data; - if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA)) + if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA | + IO_PGTABLE_QUIRK_NON_STRICT)) return NULL; data = arm_lpae_alloc_pgtable(cfg); @@ -863,7 +872,8 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) struct arm_lpae_io_pgtable *data; /* The NS quirk doesn't apply at stage 2 */ - if (cfg->quirks & ~IO_PGTABLE_QUIRK_NO_DMA) + if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NO_DMA | + IO_PGTABLE_QUIRK_NON_STRICT)) return NULL; data = arm_lpae_alloc_pgtable(cfg); |