summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCarlos Llamas <cmllamas@google.com>2023-05-02 20:12:17 +0000
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-05-30 14:17:23 +0100
commitab8d8e4b888635f1e8007215fdc2e4b0a7504f3b (patch)
treec511c433376d2201f44f2cab440ecdf46c88a03a
parentfa92f3276b91fbfaf4970fba71dbb0799e7a1d3c (diff)
downloadlinux-stable-ab8d8e4b888635f1e8007215fdc2e4b0a7504f3b.tar.gz
linux-stable-ab8d8e4b888635f1e8007215fdc2e4b0a7504f3b.tar.bz2
linux-stable-ab8d8e4b888635f1e8007215fdc2e4b0a7504f3b.zip
Revert "binder_alloc: add missing mmap_lock calls when using the VMA"
commit b15655b12ddca7ade09807f790bafb6fab61b50a upstream. This reverts commit 44e602b4e52f70f04620bbbf4fe46ecb40170bde. This caused a performance regression particularly when pages are getting reclaimed. We don't need to acquire the mmap_lock to determine when the binder buffer has been fully initialized. A subsequent patch will bring back the lockless approach for this. [cmllamas: resolved trivial conflicts with renaming of alloc->mm] Fixes: 44e602b4e52f ("binder_alloc: add missing mmap_lock calls when using the VMA") Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: stable@vger.kernel.org Signed-off-by: Carlos Llamas <cmllamas@google.com> Link: https://lore.kernel.org/r/20230502201220.1756319-1-cmllamas@google.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/android/binder_alloc.c31
1 files changed, 10 insertions, 21 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 55a3c3c2409f..92c814ec44fe 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -380,15 +380,12 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
size_t size, data_offsets_size;
int ret;
- mmap_read_lock(alloc->mm);
if (!binder_alloc_get_vma(alloc)) {
- mmap_read_unlock(alloc->mm);
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf, no vma\n",
alloc->pid);
return ERR_PTR(-ESRCH);
}
- mmap_read_unlock(alloc->mm);
data_offsets_size = ALIGN(data_size, sizeof(void *)) +
ALIGN(offsets_size, sizeof(void *));
@@ -916,25 +913,17 @@ void binder_alloc_print_pages(struct seq_file *m,
* Make sure the binder_alloc is fully initialized, otherwise we might
* read inconsistent state.
*/
-
- mmap_read_lock(alloc->mm);
- if (binder_alloc_get_vma(alloc) == NULL) {
- mmap_read_unlock(alloc->mm);
- goto uninitialized;
- }
-
- mmap_read_unlock(alloc->mm);
- for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- page = &alloc->pages[i];
- if (!page->page_ptr)
- free++;
- else if (list_empty(&page->lru))
- active++;
- else
- lru++;
+ if (binder_alloc_get_vma(alloc) != NULL) {
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ page = &alloc->pages[i];
+ if (!page->page_ptr)
+ free++;
+ else if (list_empty(&page->lru))
+ active++;
+ else
+ lru++;
+ }
}
-
-uninitialized:
mutex_unlock(&alloc->mutex);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);