Skip to content
Snippets Groups Projects
Commit e12f1f1c authored by Joel Fernandes's avatar Joel Fernandes
Browse files

binder: always allocate/map first BINDER_MIN_ALLOC pages


Certain usecases like camera are constantly allocating and freeing
binder buffers beyond the first 4k resulting in mmap_sem contention.

If we expand the allocated range from 4k to something higher, we can
reduce the contention. Tests show that 6 pages is enough to cause very
little update_page_range operations and reduces contention.

Bug: 36727951

Change-Id: I28bc3fb9b33c764c257e28487712fce2a3c1078b
Reported-by: default avatarTim Murray <timmurray@google.com>
Signed-off-by: default avatarJoel Fernandes <joelaf@google.com>
parent 4504c13e
No related branches found
No related tags found
No related merge requests found
......@@ -30,6 +30,8 @@
#include "binder_alloc.h"
#include "binder_trace.h"
#define BINDER_MIN_ALLOC (6 * PAGE_SIZE)
static DEFINE_MUTEX(binder_alloc_mmap_lock);
enum {
......@@ -156,7 +158,7 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
return NULL;
}
static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
static int __binder_update_page_range(struct binder_alloc *alloc, int allocate,
void *start, void *end,
struct vm_area_struct *vma)
{
......@@ -258,6 +260,20 @@ err_no_vma:
return vma ? -ENOMEM : -ESRCH;
}
static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
void *start, void *end,
struct vm_area_struct *vma)
{
/*
* For regular updates, move up start if needed since MIN_ALLOC pages
* are always mapped
*/
if (start - alloc->buffer < BINDER_MIN_ALLOC)
start = alloc->buffer + BINDER_MIN_ALLOC;
return __binder_update_page_range(alloc, allocate, start, end, vma);
}
struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
......@@ -588,8 +604,8 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
}
alloc->buffer_size = vma->vm_end - vma->vm_start;
if (binder_update_page_range(alloc, 1, alloc->buffer,
alloc->buffer + PAGE_SIZE, vma)) {
if (__binder_update_page_range(alloc, 1, alloc->buffer,
alloc->buffer + BINDER_MIN_ALLOC, vma)) {
ret = -ENOMEM;
failure_string = "alloc small buf";
goto err_alloc_small_buf_failed;
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment