From 38cc776d974c4b7883067d9aa5c7785528b4ecd2 Mon Sep 17 00:00:00 2001
From: Taniya Das <tdas@codeaurora.org>
Date: Thu, 12 Jun 2014 19:42:06 +0530
Subject: [PATCH] arm: Skip DMA zeroing completely only if specified

With the dma attribute 'SKIP_ZEROING', dma allocation skips memset and
also cache flush operations. This leaves dirty lines in the caches which
when evicted later would cause issue to the allocated buffer. For some
clients this may be okay but other clients need to do the actual sync.
Switch the code to only skip __dma_clear_buffer if both
DMA_ATTR_SKIP_ZEROING and DMA_ATTR_SKIP_CPU_SYNC are specified. If only
DMA_ATTR_SKIP_ZEROING is specified, just skip the zeroing but still do the
cache operations.

Change-Id: I81dd6bbeed7ec7f1dce28cd8d0aba7c8900ec550
Signed-off-by: Taniya Das <tdas@codeaurora.org>
Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
---
 arch/arm/mm/dma-mapping.c | 23 +++++++++++++++--------
 1 file changed, 15 insertions(+), 8 deletions(-)

diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 054cba0e59f9..f9cb0d2cc49f 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -219,7 +219,8 @@ static u64 get_coherent_dma_mask(struct device *dev)
 	return mask;
 }
 
-static void __dma_clear_buffer(struct page *page, size_t size)
+static void __dma_clear_buffer(struct page *page, size_t size,
+					struct dma_attrs *attrs)
 {
 	/*
 	 * Ensure that the allocated pages are zeroed, and that any data
@@ -230,7 +231,8 @@ static void __dma_clear_buffer(struct page *page, size_t size)
 		phys_addr_t end = base + size;
 		while (size > 0) {
 			void *ptr = kmap_atomic(page);
-			memset(ptr, 0, PAGE_SIZE);
+			if (!dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs))
+				memset(ptr, 0, PAGE_SIZE);
 			dmac_flush_range(ptr, ptr + PAGE_SIZE);
 			kunmap_atomic(ptr);
 			page++;
@@ -239,7 +241,8 @@ static void __dma_clear_buffer(struct page *page, size_t size)
 		outer_flush_range(base, end);
 	} else {
 		void *ptr = page_address(page);
-		memset(ptr, 0, size);
+		if (!dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs))
+			memset(ptr, 0, size);
 		dmac_flush_range(ptr, ptr + size);
 		outer_flush_range(__pa(ptr), __pa(ptr) + size);
 	}
@@ -265,7 +268,7 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
 	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
 		__free_page(p);
 
-	__dma_clear_buffer(page, size);
+	__dma_clear_buffer(page, size, NULL);
 
 	return page;
 }
@@ -627,8 +630,12 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
 
 	page = pfn_to_page(pfn);
 
-	if (!dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs))
-		__dma_clear_buffer(page, size);
+	/*
+	 * skip completely if we neither need to zero nor sync.
+	 */
+	if (!(dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs) &&
+	      dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs)))
+		__dma_clear_buffer(page, size, attrs);
 
 	if (PageHighMem(page)) {
 		if (no_kernel_mapping) {
@@ -1170,7 +1177,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
 
 		pfn = pfn_to_page(pfn);
 
-		__dma_clear_buffer(page, size);
+		__dma_clear_buffer(page, size, NULL);
 
 		for (i = 0; i < count; i++)
 			pages[i] = page + i;
@@ -1199,7 +1206,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
 				pages[i + j] = pages[i] + j;
 		}
 
-		__dma_clear_buffer(pages[i], PAGE_SIZE << order);
+		__dma_clear_buffer(pages[i], PAGE_SIZE << order, NULL);
 		i += 1 << order;
 		count -= 1 << order;
 	}
-- 
GitLab