From 3913a5255391b734cd4bd1c0324fde02e6c1551d Mon Sep 17 00:00:00 2001 From: Sherry Yang <sherryy@android.com> Date: Thu, 3 Aug 2017 11:33:53 -0700 Subject: [PATCH] android: binder: Move buffer out of area shared with user space Binder driver allocates buffer meta data in a region that is mapped in user space. These meta data contain pointers in the kernel. This patch allocates buffer meta data on the kernel heap that is not mapped in user space, and uses a pointer to refer to the data mapped. Also move alloc->buffers initialization from mmap to init since it's now used even when mmap failed or was not called. Bug: 36007193 Change-Id: I691d522f46388b00400615c8bc55f0dceee821c8 Signed-off-by: Sherry Yang <sherryy@android.com> --- drivers/staging/android/binder_alloc.c | 146 +++++++++++------- drivers/staging/android/binder_alloc.h | 2 +- .../staging/android/binder_alloc_selftest.c | 11 +- 3 files changed, 94 insertions(+), 65 deletions(-) diff --git a/drivers/staging/android/binder_alloc.c b/drivers/staging/android/binder_alloc.c index 043ec56111ed..4d59c9b175cf 100644 --- a/drivers/staging/android/binder_alloc.c +++ b/drivers/staging/android/binder_alloc.c @@ -62,9 +62,9 @@ static size_t binder_buffer_size(struct binder_alloc *alloc, struct binder_buffer *buffer) { if (list_is_last(&buffer->entry, &alloc->buffers)) - return alloc->buffer + - alloc->buffer_size - (void *)buffer->data; - return (size_t)binder_buffer_next(buffer) - (size_t)buffer->data; + return (u8 *)alloc->buffer + + alloc->buffer_size - (u8 *)buffer->data; + return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data; } static void binder_insert_free_buffer(struct binder_alloc *alloc, @@ -114,9 +114,9 @@ static void binder_insert_allocated_buffer(struct binder_alloc *alloc, buffer = rb_entry(parent, struct binder_buffer, rb_node); BUG_ON(buffer->free); - if (new_buffer < buffer) + if (new_buffer->data < buffer->data) p = &parent->rb_left; - else if (new_buffer > buffer) + else if (new_buffer->data > buffer->data) p = &parent->rb_right; else BUG(); @@ -130,20 +130,19 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, { struct rb_node *n; struct binder_buffer *buffer; - struct binder_buffer *kern_ptr; + void *kern_ptr; mutex_lock(&alloc->mutex); - kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset - - offsetof(struct binder_buffer, data)); + kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset); n = alloc->allocated_buffers.rb_node; while (n) { buffer = rb_entry(n, struct binder_buffer, rb_node); BUG_ON(buffer->free); - if (kern_ptr < buffer) + if (kern_ptr < buffer->data) n = n->rb_left; - else if (kern_ptr > buffer) + else if (kern_ptr > buffer->data) n = n->rb_right; else { /* @@ -332,6 +331,9 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, goto error_unlock; } + /* Pad 0-size buffers so they get assigned unique addresses */ + size = max(size, sizeof(void *)); + n = alloc->free_buffers.rb_node; while (n) { buffer = rb_entry(n, struct binder_buffer, rb_node); @@ -393,14 +395,9 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, has_page_addr = (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); - if (n == NULL) { - if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) - buffer_size = size; /* no room for other buffers */ - else - buffer_size = size + sizeof(struct binder_buffer); - } + WARN_ON(n && buffer_size != size); end_page_addr = - (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); + (void *)PAGE_ALIGN((uintptr_t)buffer->data + size); if (end_page_addr > has_page_addr) end_page_addr = has_page_addr; ret = binder_update_page_range(alloc, 1, @@ -409,18 +406,26 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, eret = ERR_PTR(ret); goto error_unlock; } - - rb_erase(best_fit, &alloc->free_buffers); - buffer->free = 0; - buffer->free_in_progress = 0; - binder_insert_allocated_buffer(alloc, buffer); if (buffer_size != size) { - struct binder_buffer *new_buffer = (void *)buffer->data + size; - + struct binder_buffer *new_buffer; + + new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + if (!new_buffer) { + pr_err("%s: %d failed to alloc new buffer struct\n", + __func__, alloc->pid); + eret = ERR_PTR(-ENOMEM); + goto err_alloc_buf_struct_failed; + } + new_buffer->data = (u8 *)buffer->data + size; list_add(&new_buffer->entry, &buffer->entry); new_buffer->free = 1; binder_insert_free_buffer(alloc, new_buffer); } + + rb_erase(best_fit, &alloc->free_buffers); + buffer->free = 0; + buffer->free_in_progress = 0; + binder_insert_allocated_buffer(alloc, buffer); binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: binder_alloc_buf size %zd got %pK\n", alloc->pid, size, buffer); @@ -438,6 +443,10 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, return buffer; +err_alloc_buf_struct_failed: + binder_update_page_range(alloc, 0, + (void *)PAGE_ALIGN((uintptr_t)buffer->data), + end_page_addr, NULL); error_unlock: mutex_unlock(&alloc->mutex); return eret; @@ -445,56 +454,59 @@ error_unlock: static void *buffer_start_page(struct binder_buffer *buffer) { - return (void *)((uintptr_t)buffer & PAGE_MASK); + return (void *)((uintptr_t)buffer->data & PAGE_MASK); } -static void *buffer_end_page(struct binder_buffer *buffer) +static void *prev_buffer_end_page(struct binder_buffer *buffer) { - return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); + return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK); } static void binder_delete_free_buffer(struct binder_alloc *alloc, struct binder_buffer *buffer) { struct binder_buffer *prev, *next = NULL; - int free_page_end = 1; - int free_page_start = 1; - + bool to_free = true; BUG_ON(alloc->buffers.next == &buffer->entry); prev = binder_buffer_prev(buffer); BUG_ON(!prev->free); - if (buffer_end_page(prev) == buffer_start_page(buffer)) { - free_page_start = 0; - if (buffer_end_page(prev) == buffer_end_page(buffer)) - free_page_end = 0; + if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) { + to_free = false; binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: merge free, buffer %pK share page with %pK\n", - alloc->pid, buffer, prev); + "%d: merge free, buffer %pK share page with %pK\n", + alloc->pid, buffer->data, prev->data); } if (!list_is_last(&buffer->entry, &alloc->buffers)) { next = binder_buffer_next(buffer); - if (buffer_start_page(next) == buffer_end_page(buffer)) { - free_page_end = 0; - if (buffer_start_page(next) == - buffer_start_page(buffer)) - free_page_start = 0; + if (buffer_start_page(next) == buffer_start_page(buffer)) { + to_free = false; binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: merge free, buffer %pK share page with %pK\n", - alloc->pid, buffer, prev); + "%d: merge free, buffer %pK share page with %pK\n", + alloc->pid, + buffer->data, + next->data); } } - list_del(&buffer->entry); - if (free_page_start || free_page_end) { + + if (PAGE_ALIGNED(buffer->data)) { binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n", - alloc->pid, buffer, free_page_start ? "" : " end", - free_page_end ? "" : " start", prev, next); - binder_update_page_range(alloc, 0, free_page_start ? - buffer_start_page(buffer) : buffer_end_page(buffer), - (free_page_end ? buffer_end_page(buffer) : - buffer_start_page(buffer)) + PAGE_SIZE, NULL); + "%d: merge free, buffer start %pK is page aligned\n", + alloc->pid, buffer->data); + to_free = false; } + + if (to_free) { + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: merge free, buffer %pK do not share page with %pK or %pK\n", + alloc->pid, buffer->data, + prev->data, next->data); + binder_update_page_range(alloc, 0, buffer_start_page(buffer), + buffer_start_page(buffer) + PAGE_SIZE, + NULL); + } + list_del(&buffer->entry); + kfree(buffer); } static void binder_free_buf_locked(struct binder_alloc *alloc, @@ -515,8 +527,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, BUG_ON(buffer->free); BUG_ON(size > buffer_size); BUG_ON(buffer->transaction != NULL); - BUG_ON((void *)buffer < alloc->buffer); - BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size); + BUG_ON(buffer->data < alloc->buffer); + BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size); if (buffer->async_transaction) { alloc->free_async_space += size + sizeof(struct binder_buffer); @@ -608,14 +620,20 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, } alloc->buffer_size = vma->vm_end - vma->vm_start; + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + if (!buffer) { + ret = -ENOMEM; + failure_string = "alloc buffer struct"; + goto err_alloc_buf_struct_failed; + } + if (__binder_update_page_range(alloc, 1, alloc->buffer, alloc->buffer + BINDER_MIN_ALLOC, vma)) { ret = -ENOMEM; failure_string = "alloc small buf"; goto err_alloc_small_buf_failed; } - buffer = alloc->buffer; - INIT_LIST_HEAD(&alloc->buffers); + buffer->data = alloc->buffer; list_add(&buffer->entry, &alloc->buffers); buffer->free = 1; binder_insert_free_buffer(alloc, buffer); @@ -628,6 +646,8 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, return 0; err_alloc_small_buf_failed: + kfree(buffer); +err_alloc_buf_struct_failed: kfree(alloc->pages); alloc->pages = NULL; err_alloc_pages_failed: @@ -647,14 +667,13 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) { struct rb_node *n; int buffers, page_count; + struct binder_buffer *buffer; BUG_ON(alloc->vma); buffers = 0; mutex_lock(&alloc->mutex); while ((n = rb_first(&alloc->allocated_buffers))) { - struct binder_buffer *buffer; - buffer = rb_entry(n, struct binder_buffer, rb_node); /* Transactiopn should already have been freed */ @@ -664,6 +683,16 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) buffers++; } + while (!list_empty(&alloc->buffers)) { + buffer = list_first_entry(&alloc->buffers, + struct binder_buffer, entry); + WARN_ON(!buffer->free); + + list_del(&buffer->entry); + WARN_ON_ONCE(!list_empty(&alloc->buffers)); + kfree(buffer); + } + page_count = 0; if (alloc->pages) { int i; @@ -739,5 +768,6 @@ void binder_alloc_init(struct binder_alloc *alloc) alloc->tsk = current->group_leader; alloc->pid = current->group_leader->pid; mutex_init(&alloc->mutex); + INIT_LIST_HEAD(&alloc->buffers); } diff --git a/drivers/staging/android/binder_alloc.h b/drivers/staging/android/binder_alloc.h index 073e3d45f553..cb6e94265c64 100644 --- a/drivers/staging/android/binder_alloc.h +++ b/drivers/staging/android/binder_alloc.h @@ -47,7 +47,7 @@ struct binder_buffer { size_t data_size; size_t offsets_size; size_t extra_buffers_size; - uint8_t data[0]; + void *data; }; struct binder_alloc { diff --git a/drivers/staging/android/binder_alloc_selftest.c b/drivers/staging/android/binder_alloc_selftest.c index 2757187e7184..daff2e675925 100644 --- a/drivers/staging/android/binder_alloc_selftest.c +++ b/drivers/staging/android/binder_alloc_selftest.c @@ -102,8 +102,9 @@ static bool check_buffer_pages_allocated(struct binder_alloc *alloc, void *page_addr, *end; int page_index; - end = (void *)PAGE_ALIGN((uintptr_t)buffer + size); - for (page_addr = buffer; page_addr < end; page_addr += PAGE_SIZE) { + end = (void *)PAGE_ALIGN((uintptr_t)buffer->data + size); + page_addr = buffer->data; + for (; page_addr < end; page_addr += PAGE_SIZE) { page_index = (page_addr - alloc->buffer) / PAGE_SIZE; if (!alloc->pages[page_index]) { pr_err("incorrect alloc state at page index %d\n", @@ -208,8 +209,7 @@ static void binder_selftest_alloc_size(struct binder_alloc *alloc, */ if (BINDER_MIN_ALLOC) front_sizes[0] += BINDER_MIN_ALLOC - PAGE_SIZE; - back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1] - - sizeof(struct binder_buffer) * BUFFER_NUM; + back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1]; binder_selftest_free_seq(alloc, front_sizes, seq, 0); binder_selftest_free_seq(alloc, back_sizes, seq, 0); } @@ -227,8 +227,7 @@ static void binder_selftest_alloc_offset(struct binder_alloc *alloc, prev = index == 0 ? 0 : end_offset[index - 1]; end = prev; - BUILD_BUG_ON((BUFFER_MIN_SIZE + sizeof(struct binder_buffer)) - * BUFFER_NUM >= PAGE_SIZE); + BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE); for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) { if (align % 2) -- GitLab