diff options
author | Byungchul Park <byungchul.park@lge.com> | 2017-08-07 16:12:54 +0900 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-08-10 12:29:08 +0200 |
commit | 28a903f63ec0811ead70ad0f8665e838d207a25e (patch) | |
tree | bc060a5056284d7e030c22158a6708f97c25ad51 /kernel | |
parent | 23f873d8f9526ed7e49a1a02a45f8afb9ae5fb84 (diff) | |
download | linux-28a903f63ec0811ead70ad0f8665e838d207a25e.tar.gz linux-28a903f63ec0811ead70ad0f8665e838d207a25e.tar.bz2 linux-28a903f63ec0811ead70ad0f8665e838d207a25e.zip |
locking/lockdep: Handle non(or multi)-acquisition of a crosslock
No acquisition might be in progress on commit of a crosslock. Completion
operations enabling crossrelease are the case like:
CONTEXT X CONTEXT Y
--------- ---------
trigger completion context
complete AX
commit AX
wait_for_complete AX
acquire AX
wait
where AX is a crosslock.
When no acquisition is in progress, we should not perform commit because
the lock does not exist, which might cause incorrect memory access. So
we have to track the number of acquisitions of a crosslock to handle it.
Moreover, in case that more than one acquisition of a crosslock are
overlapped like:
CONTEXT W CONTEXT X CONTEXT Y CONTEXT Z
--------- --------- --------- ---------
acquire AX (gen_id: 1)
acquire A
acquire AX (gen_id: 10)
acquire B
commit AX
acquire C
commit AX
where A, B and C are typical locks and AX is a crosslock.
Current crossrelease code performs commits in Y and Z with gen_id = 10.
However, we can use gen_id = 1 to do it, since not only 'acquire AX in X'
but 'acquire AX in W' also depends on each acquisition in Y and Z until
their commits. So make it use gen_id = 1 instead of 10 on their commits,
which adds an additional dependency 'AX -> A' in the example above.
Signed-off-by: Byungchul Park <byungchul.park@lge.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: akpm@linux-foundation.org
Cc: boqun.feng@gmail.com
Cc: kernel-team@lge.com
Cc: kirill@shutemov.name
Cc: npiggin@gmail.com
Cc: walken@google.com
Cc: willy@infradead.org
Link: http://lkml.kernel.org/r/1502089981-21272-8-git-send-email-byungchul.park@lge.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/locking/lockdep.c | 82 |
1 files changed, 56 insertions, 26 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index eda8114ef793..7f97871d48d5 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -4867,11 +4867,28 @@ static int add_xlock(struct held_lock *hlock) xlock = &((struct lockdep_map_cross *)hlock->instance)->xlock; + /* + * When acquisitions for a crosslock are overlapped, we use + * nr_acquire to perform commit for them, based on cross_gen_id + * of the first acquisition, which allows to add additional + * dependencies. + * + * Moreover, when no acquisition of a crosslock is in progress, + * we should not perform commit because the lock might not exist + * any more, which might cause incorrect memory access. So we + * have to track the number of acquisitions of a crosslock. + * + * depend_after() is necessary to initialize only the first + * valid xlock so that the xlock can be used on its commit. + */ + if (xlock->nr_acquire++ && depend_after(&xlock->hlock)) + goto unlock; + gen_id = (unsigned int)atomic_inc_return(&cross_gen_id); xlock->hlock = *hlock; xlock->hlock.gen_id = gen_id; +unlock: graph_unlock(); - return 1; } @@ -4967,35 +4984,37 @@ static void commit_xhlocks(struct cross_lock *xlock) if (!graph_lock()) return; - for (i = 0; i < MAX_XHLOCKS_NR; i++) { - struct hist_lock *xhlock = &xhlock(cur - i); + if (xlock->nr_acquire) { + for (i = 0; i < MAX_XHLOCKS_NR; i++) { + struct hist_lock *xhlock = &xhlock(cur - i); - if (!xhlock_valid(xhlock)) - break; + if (!xhlock_valid(xhlock)) + break; - if (before(xhlock->hlock.gen_id, xlock->hlock.gen_id)) - break; + if (before(xhlock->hlock.gen_id, xlock->hlock.gen_id)) + break; - if (!same_context_xhlock(xhlock)) - break; + if (!same_context_xhlock(xhlock)) + break; - /* - * Filter out the cases that the ring buffer was - * overwritten and the previous entry has a bigger - * hist_id than the following one, which is impossible - * otherwise. - */ - if (unlikely(before(xhlock->hist_id, prev_hist_id))) - break; + /* + * Filter out the cases that the ring buffer was + * overwritten and the previous entry has a bigger + * hist_id than the following one, which is impossible + * otherwise. + */ + if (unlikely(before(xhlock->hist_id, prev_hist_id))) + break; - prev_hist_id = xhlock->hist_id; + prev_hist_id = xhlock->hist_id; - /* - * commit_xhlock() returns 0 with graph_lock already - * released if fail. - */ - if (!commit_xhlock(xlock, xhlock)) - return; + /* + * commit_xhlock() returns 0 with graph_lock already + * released if fail. + */ + if (!commit_xhlock(xlock, xhlock)) + return; + } } graph_unlock(); @@ -5039,16 +5058,27 @@ void lock_commit_crosslock(struct lockdep_map *lock) EXPORT_SYMBOL_GPL(lock_commit_crosslock); /* - * Return: 1 - crosslock, done; + * Return: 0 - failure; + * 1 - crosslock, done; * 2 - normal lock, continue to held_lock[] ops. */ static int lock_release_crosslock(struct lockdep_map *lock) { - return cross_lock(lock) ? 1 : 2; + if (cross_lock(lock)) { + if (!graph_lock()) + return 0; + ((struct lockdep_map_cross *)lock)->xlock.nr_acquire--; + graph_unlock(); + return 1; + } + return 2; } static void cross_init(struct lockdep_map *lock, int cross) { + if (cross) + ((struct lockdep_map_cross *)lock)->xlock.nr_acquire = 0; + lock->cross = cross; /* |