diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index aaf103b74e16df6c9213cdfe75d7e55f591c5054..1cb36e22c1ed3a701a94032496448a7dcbe27e99 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -44,6 +44,16 @@ config ANDROID_BINDER_DEVICES
 	  created. Each binder device has its own context manager, and is
 	  therefore logically separated from the other devices.
 
+config ANDROID_BINDER_IPC_SELFTEST
+	bool "Android Binder IPC Driver Selftest"
+	depends on ANDROID_BINDER_IPC
+	---help---
+	  This feature allows binder selftest to run.
+
+	  Binder selftest checks the allocation and free of binder buffers
+	  exhaustively with combinations of various buffer sizes and
+	  alignments.
+
 config ASHMEM
 	bool "Enable the Anonymous Shared Memory Subsystem"
 	default n
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index 29642b930201fca30e290242bbacf78160aa6485..d9e08b688ec61df84a0f4aa195635df967e681b4 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -4,6 +4,7 @@ obj-y					+= ion/
 obj-$(CONFIG_FIQ_DEBUGGER)		+= fiq_debugger/
 
 obj-$(CONFIG_ANDROID_BINDER_IPC)	+= binder.o binder_alloc.o
+obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
 obj-$(CONFIG_ASHMEM)			+= ashmem.o
 obj-$(CONFIG_ANDROID_LOGGER)           += logger.o
 obj-$(CONFIG_ANDROID_TIMED_OUTPUT)	+= timed_output.o
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index decd536fdc2220127e052d1081efe1d537d342c3..4be71bbbbaf00b4b6380b6b351d40cc7ff7b2808 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -3978,6 +3978,8 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 	unsigned int size = _IOC_SIZE(cmd);
 	void __user *ubuf = (void __user *)arg;
 
+	binder_selftest_alloc(&proc->alloc);
+
 	trace_binder_ioctl(cmd, arg);
 
 	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
diff --git a/drivers/staging/android/binder_alloc.c b/drivers/staging/android/binder_alloc.c
index 7228178f5846fe9cfebcfb0e23ba5b02325d310b..043ec56111ede927aea57fa907cb3ee1f45d7981 100644
--- a/drivers/staging/android/binder_alloc.c
+++ b/drivers/staging/android/binder_alloc.c
@@ -30,8 +30,6 @@
 #include "binder_alloc.h"
 #include "binder_trace.h"
 
-#define BINDER_MIN_ALLOC (6 * PAGE_SIZE)
-
 static DEFINE_MUTEX(binder_alloc_mmap_lock);
 
 enum {
diff --git a/drivers/staging/android/binder_alloc.h b/drivers/staging/android/binder_alloc.h
index 96f7c7f2ff388ae5fcb5fed7884c934c95086951..073e3d45f553160fbb7e46b35096fb5abc9e8fa0 100644
--- a/drivers/staging/android/binder_alloc.h
+++ b/drivers/staging/android/binder_alloc.h
@@ -27,6 +27,8 @@
 #include <linux/vmalloc.h>
 #include <linux/slab.h>
 
+#define BINDER_MIN_ALLOC (6 * PAGE_SIZE)
+
 struct binder_transaction;
 
 struct binder_buffer {
@@ -65,6 +67,11 @@ struct binder_alloc {
 	int pid;
 };
 
+#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
+void binder_selftest_alloc(struct binder_alloc *alloc);
+#else
+static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
+#endif
 extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
 						  size_t data_size,
 						  size_t offsets_size,
diff --git a/drivers/staging/android/binder_alloc_selftest.c b/drivers/staging/android/binder_alloc_selftest.c
new file mode 100644
index 0000000000000000000000000000000000000000..2757187e71842687871b22cf6ab8a631300073bc
--- /dev/null
+++ b/drivers/staging/android/binder_alloc_selftest.c
@@ -0,0 +1,270 @@
+/* binder_alloc_selftest.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mm_types.h>
+#include <linux/err.h>
+#include "binder_alloc.h"
+
+#define BUFFER_NUM 5
+#define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
+
+static bool binder_selftest_run = true;
+static int binder_selftest_failures;
+static DEFINE_MUTEX(binder_selftest_lock);
+
+/**
+ * enum buf_end_align_type - Page alignment of a buffer
+ * end with regard to the end of the previous buffer.
+ *
+ * In the pictures below, buf2 refers to the buffer we
+ * are aligning. buf1 refers to previous buffer by addr.
+ * Symbol [ means the start of a buffer, ] means the end
+ * of a buffer, and | means page boundaries.
+ */
+enum buf_end_align_type {
+	/**
+	 * @SAME_PAGE_UNALIGNED: The end of this buffer is on
+	 * the same page as the end of the previous buffer and
+	 * is not page aligned. Examples:
+	 * buf1 ][ buf2 ][ ...
+	 * buf1 ]|[ buf2 ][ ...
+	 */
+	SAME_PAGE_UNALIGNED = 0,
+	/**
+	 * @SAME_PAGE_ALIGNED: The end of this buffer is on
+	 * the same page as the end of the previous buffer and
+	 * is page aligned. Examples:
+	 * buf1 ][ buf2 ]| ...
+	 * buf1 ]|[ buf2 ]| ...
+	 */
+	SAME_PAGE_ALIGNED,
+	/**
+	 * @NEXT_PAGE_UNALIGNED: The end of this buffer is on
+	 * the page next to the end of the previous buffer and
+	 * is not page aligned. Examples:
+	 * buf1 ][ buf2 | buf2 ][ ...
+	 * buf1 ]|[ buf2 | buf2 ][ ...
+	 */
+	NEXT_PAGE_UNALIGNED,
+	/**
+	 * @NEXT_PAGE_ALIGNED: The end of this buffer is on
+	 * the page next to the end of the previous buffer and
+	 * is page aligned. Examples:
+	 * buf1 ][ buf2 | buf2 ]| ...
+	 * buf1 ]|[ buf2 | buf2 ]| ...
+	 */
+	NEXT_PAGE_ALIGNED,
+	/**
+	 * @NEXT_NEXT_UNALIGNED: The end of this buffer is on
+	 * the page that follows the page after the end of the
+	 * previous buffer and is not page aligned. Examples:
+	 * buf1 ][ buf2 | buf2 | buf2 ][ ...
+	 * buf1 ]|[ buf2 | buf2 | buf2 ][ ...
+	 */
+	NEXT_NEXT_UNALIGNED,
+	LOOP_END,
+};
+
+static void pr_err_size_seq(size_t *sizes, int *seq)
+{
+	int i;
+
+	pr_err("alloc sizes: ");
+	for (i = 0; i < BUFFER_NUM; i++)
+		pr_cont("[%zu]", sizes[i]);
+	pr_cont("\n");
+	pr_err("free seq: ");
+	for (i = 0; i < BUFFER_NUM; i++)
+		pr_cont("[%d]", seq[i]);
+	pr_cont("\n");
+}
+
+static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
+					 struct binder_buffer *buffer,
+					 size_t size)
+{
+	void *page_addr, *end;
+	int page_index;
+
+	end = (void *)PAGE_ALIGN((uintptr_t)buffer + size);
+	for (page_addr = buffer; page_addr < end; page_addr += PAGE_SIZE) {
+		page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
+		if (!alloc->pages[page_index]) {
+			pr_err("incorrect alloc state at page index %d\n",
+			       page_index);
+			return false;
+		}
+	}
+	return true;
+}
+
+static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
+				      struct binder_buffer *buffers[],
+				      size_t *sizes, int *seq)
+{
+	int i;
+
+	for (i = 0; i < BUFFER_NUM; i++) {
+		buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
+		if (IS_ERR(buffers[i]) ||
+		    !check_buffer_pages_allocated(alloc, buffers[i],
+						  sizes[i])) {
+			pr_err_size_seq(sizes, seq);
+			binder_selftest_failures++;
+		}
+	}
+}
+
+static void binder_selftest_free_buf(struct binder_alloc *alloc,
+				     struct binder_buffer *buffers[],
+				     size_t *sizes, int *seq)
+{
+	int i;
+
+	for (i = 0; i < BUFFER_NUM; i++)
+		binder_alloc_free_buf(alloc, buffers[seq[i]]);
+
+	for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
+		if ((!alloc->pages[i]) == (i * PAGE_SIZE < BINDER_MIN_ALLOC)) {
+			pr_err("incorrect free state at page index %d\n", i);
+			binder_selftest_failures++;
+		}
+	}
+}
+
+static void binder_selftest_alloc_free(struct binder_alloc *alloc,
+				       size_t *sizes, int *seq)
+{
+	struct binder_buffer *buffers[BUFFER_NUM];
+
+	binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
+	binder_selftest_free_buf(alloc, buffers, sizes, seq);
+}
+
+static bool is_dup(int *seq, int index, int val)
+{
+	int i;
+
+	for (i = 0; i < index; i++) {
+		if (seq[i] == val)
+			return true;
+	}
+	return false;
+}
+
+/* Generate BUFFER_NUM factorial free orders. */
+static void binder_selftest_free_seq(struct binder_alloc *alloc,
+				     size_t *sizes, int *seq, int index)
+{
+	int i;
+
+	if (index == BUFFER_NUM) {
+		binder_selftest_alloc_free(alloc, sizes, seq);
+		return;
+	}
+	for (i = 0; i < BUFFER_NUM; i++) {
+		if (is_dup(seq, index, i))
+			continue;
+		seq[index] = i;
+		binder_selftest_free_seq(alloc, sizes, seq, index + 1);
+	}
+}
+
+static void binder_selftest_alloc_size(struct binder_alloc *alloc,
+				       size_t *end_offset)
+{
+	int i;
+	int seq[BUFFER_NUM] = {0};
+	size_t front_sizes[BUFFER_NUM];
+	size_t back_sizes[BUFFER_NUM];
+	size_t last_offset, offset = 0;
+
+	for (i = 0; i < BUFFER_NUM; i++) {
+		last_offset = offset;
+		offset = end_offset[i];
+		front_sizes[i] = offset - last_offset;
+		back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
+	}
+	/*
+	 * Buffers share the first or last few pages.
+	 * Only BUFFER_NUM - 1 buffer sizes are adjustable since
+	 * we need one giant buffer before getting to the last page.
+	 */
+	if (BINDER_MIN_ALLOC)
+		front_sizes[0] += BINDER_MIN_ALLOC - PAGE_SIZE;
+	back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1]
+		- sizeof(struct binder_buffer) * BUFFER_NUM;
+	binder_selftest_free_seq(alloc, front_sizes, seq, 0);
+	binder_selftest_free_seq(alloc, back_sizes, seq, 0);
+}
+
+static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
+					 size_t *end_offset, int index)
+{
+	int align;
+	size_t end, prev;
+
+	if (index == BUFFER_NUM) {
+		binder_selftest_alloc_size(alloc, end_offset);
+		return;
+	}
+	prev = index == 0 ? 0 : end_offset[index - 1];
+	end = prev;
+
+	BUILD_BUG_ON((BUFFER_MIN_SIZE + sizeof(struct binder_buffer))
+		     * BUFFER_NUM >= PAGE_SIZE);
+
+	for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
+		if (align % 2)
+			end = ALIGN(end, PAGE_SIZE);
+		else
+			end += BUFFER_MIN_SIZE;
+		end_offset[index] = end;
+		binder_selftest_alloc_offset(alloc, end_offset, index + 1);
+	}
+}
+
+/**
+ * binder_selftest_alloc() - Test alloc and free of buffer pages.
+ * @alloc: Pointer to alloc struct.
+ *
+ * Allocate BUFFER_NUM buffers to cover all page alignment cases,
+ * then free them in all orders possible. Check that pages are
+ * allocated after buffer alloc and freed after freeing buffer.
+ */
+void binder_selftest_alloc(struct binder_alloc *alloc)
+{
+	size_t end_offset[BUFFER_NUM];
+
+	if (!binder_selftest_run)
+		return;
+	mutex_lock(&binder_selftest_lock);
+	if (!binder_selftest_run || !alloc->vma)
+		goto done;
+	pr_info("STARTED\n");
+	binder_selftest_alloc_offset(alloc, end_offset, 0);
+	binder_selftest_run = false;
+	if (binder_selftest_failures > 0)
+		pr_info("%d tests FAILED\n", binder_selftest_failures);
+	else
+		pr_info("PASSED\n");
+
+done:
+	mutex_unlock(&binder_selftest_lock);
+}