diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 66df085b715bf6010bf22f074a3b607c1b02cc96..9a428a1a33f229c6f828afa6ef4bac9d901c1928 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -580,13 +580,16 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
{
unsigned long order = get_order(size);
size_t count = size >> PAGE_SHIFT;
+ unsigned long pfn;
struct page *page;
void *ptr;
- page = dma_alloc_from_contiguous(dev, count, order);
- if (!page)
+ pfn = dma_alloc_from_contiguous(dev, count, order);
+ if (!pfn)
return NULL;
+ page = pfn_to_page(pfn);
+
__dma_clear_buffer(page, size);
if (PageHighMem(page)) {
@@ -601,7 +604,7 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot,
caller);
if (!ptr) {
- dma_release_from_contiguous(dev, page, count);
+ dma_release_from_contiguous(dev, pfn, count);
return NULL;
}
}
@@ -620,7 +623,7 @@ static void __free_from_contiguous(struct device *dev, struct page *page,
__dma_free_remap(cpu_addr, size, true);
else
__dma_remap(page, size, pgprot_kernel, false);
- dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
+ dma_release_from_contiguous(dev, page_to_pfn(page), size >> PAGE_SHIFT);
}
static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
@@ -1127,11 +1130,14 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
{
unsigned long order = get_order(size);
struct page *page;
+ unsigned long pfn;
- page = dma_alloc_from_contiguous(dev, count, order);
- if (!page)
+ pfn = dma_alloc_from_contiguous(dev, count, order);
+ if (!pfn)
goto error;
+ pfn = pfn_to_page(pfn);
+
__dma_clear_buffer(page, size);
for (i = 0; i < count; i++)
@@ -1186,7 +1192,7 @@ static int __iommu_free_buffer(struct device *dev, struct page **pages,
int i;
if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
- dma_release_from_contiguous(dev, pages[0], count);
+ dma_release_from_contiguous(dev, page_to_pfn(pages[0]), count);
} else {
for (i = 0; i < count; i++)
if (pages[i])
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 46e93e5d4773bfd9ac026021121ac0b2f472b24b..0321a12d6fc55c1a9378760b72c7d914d2f774e4 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -494,17 +494,16 @@ phys_addr_t cma_get_base(struct device *dev)
* global one. Requires architecture specific get_dev_cma_area() helper
* function.
*/
-struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+unsigned long dma_alloc_from_contiguous(struct device *dev, int count,
unsigned int align)
{
- unsigned long mask, pfn, pageno, start = 0;
+ unsigned long mask, pfn = 0, pageno, start = 0;
struct cma *cma = dev_get_cma_area(dev);
- struct page *page = NULL;
int ret = 0;
int tries = 0;
if (!cma || !cma->count)
- return NULL;
+ return 0;
if (align > CONFIG_CMA_ALIGNMENT)
align = CONFIG_CMA_ALIGNMENT;
@@ -513,7 +512,7 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
count, align);
if (!count)
- return NULL;
+ return 0;
mask = (1 << align) - 1;
@@ -530,7 +529,6 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
if (ret == 0) {
bitmap_set(cma->bitmap, pageno, count);
- page = pfn_to_page(pfn);
break;
} else if (ret != -EBUSY) {
break;
@@ -545,8 +543,8 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
}
mutex_unlock(&cma_mutex);
- pr_debug("%s(): returned %p\n", __func__, page);
- return page;
+ pr_debug("%s(): returned %lx\n", __func__, pfn);
+ return pfn;
}
/**
@@ -559,18 +557,15 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
* It returns false when provided pages do not belong to contiguous area and
* true otherwise.
*/
-bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+bool dma_release_from_contiguous(struct device *dev, unsigned long pfn,
int count)
{
struct cma *cma = dev_get_cma_area(dev);
- unsigned long pfn;
- if (!cma || !pages)
+ if (!cma || !pfn)
return false;
- pr_debug("%s(page %p)\n", __func__, (void *)pages);
-
- pfn = page_to_pfn(pages);
+ pr_debug("%s(pfn %lx)\n", __func__, pfn);
if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
return false;
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
index 9975eef4cad7971633e45eb0bb7620fecdd4857c..cea8a6fd64f0bfa2ffb3abef1b40413f9cd6a0a6 100644
--- a/include/linux/dma-contiguous.h
+++ b/include/linux/dma-contiguous.h
@@ -113,9 +113,9 @@ static inline int dma_declare_contiguous_reserved(struct device *dev,
return ret;
}
-struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+unsigned long dma_alloc_from_contiguous(struct device *dev, int count,
unsigned int order);
-bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+bool dma_release_from_contiguous(struct device *dev, unsigned long pfn,
int count);
#else
@@ -132,14 +132,14 @@ int dma_declare_contiguous(struct device *dev, phys_addr_t size,
}
static inline
-struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+unsigned long dma_alloc_from_contiguous(struct device *dev, int count,
unsigned int order)
{
- return NULL;
+ return 0;
}
static inline
-bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+bool dma_release_from_contiguous(struct device *dev, unsigned long pfn,
int count)
{
return false;