diff options
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/barrier.h | 2 | ||||
-rw-r--r-- | include/asm-generic/pgtable.h | 10 | ||||
-rw-r--r-- | include/asm-generic/qspinlock.h | 9 |
3 files changed, 15 insertions, 6 deletions
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index b42afada1280..0f45f93ef692 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -93,7 +93,7 @@ #endif /* CONFIG_SMP */ #ifndef smp_store_mb -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) #endif #ifndef smp_mb__before_atomic diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 14b0ff32fb9f..3a6803cb0ec9 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -569,7 +569,7 @@ static inline int track_pfn_copy(struct vm_area_struct *vma) } /* - * untrack_pfn_vma is called while unmapping a pfnmap for a region. + * untrack_pfn is called while unmapping a pfnmap for a region. * untrack can be called for a specific region indicated by pfn and size or * can be for the entire vma (in which case pfn, size are zero). */ @@ -577,6 +577,13 @@ static inline void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, unsigned long size) { } + +/* + * untrack_pfn_moved is called while mremapping a pfnmap for a new region. + */ +static inline void untrack_pfn_moved(struct vm_area_struct *vma) +{ +} #else extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, unsigned long pfn, unsigned long addr, @@ -586,6 +593,7 @@ extern int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, extern int track_pfn_copy(struct vm_area_struct *vma); extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, unsigned long size); +extern void untrack_pfn_moved(struct vm_area_struct *vma); #endif #ifdef __HAVE_COLOR_ZERO_PAGE diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index e2aadbc7151f..39e1cb201b8e 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -12,8 +12,9 @@ * GNU General Public License for more details. * * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. + * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP * - * Authors: Waiman Long <waiman.long@hp.com> + * Authors: Waiman Long <waiman.long@hpe.com> */ #ifndef __ASM_GENERIC_QSPINLOCK_H #define __ASM_GENERIC_QSPINLOCK_H @@ -62,7 +63,7 @@ static __always_inline int queued_spin_is_contended(struct qspinlock *lock) static __always_inline int queued_spin_trylock(struct qspinlock *lock) { if (!atomic_read(&lock->val) && - (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) == 0)) + (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0)) return 1; return 0; } @@ -77,7 +78,7 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock) { u32 val; - val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL); + val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL); if (likely(val == 0)) return; queued_spin_lock_slowpath(lock, val); @@ -93,7 +94,7 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock) /* * smp_mb__before_atomic() in order to guarantee release semantics */ - smp_mb__before_atomic_dec(); + smp_mb__before_atomic(); atomic_sub(_Q_LOCKED_VAL, &lock->val); } #endif |