Skip to main content
Sign in
Snippets Groups Projects
Commit dc96f488 authored by Laura Abbott's avatar Laura Abbott
Browse files

cma: Make locking finer grained


CMA locking is currently very coarse. The cma_mutex protects both
the bitmap and avoids concurrency with alloc_contig_range. There
are several situations which may result in a deadlock on the CMA
mutex currently, mostly involving AB/BA situations with alloc and
free. Fix this issue by protecting the bitmap with a mutex per CMA
region and use the existing mutex for protecting against concurrency
with alloc_contig_range.

Change-Id: I642ac04995f47f863c94a9dc1c787be9b1ac9ba3
Signed-off-by: default avatarLaura Abbott <lauraa@codeaurora.org>
parent 970afde7
No related branches found
No related tags found
No related merge requests found
......@@ -46,6 +46,7 @@ struct cma {
unsigned long count;
unsigned long *bitmap;
bool in_system;
struct mutex lock;
};
static DEFINE_MUTEX(cma_mutex);
......@@ -199,6 +200,7 @@ static __init struct cma *cma_create_area(unsigned long base_pfn,
if (ret)
goto error;
}
mutex_init(&cma->lock);
pr_debug("%s: returned %p\n", __func__, (void *)cma);
return cma;
......@@ -502,6 +504,13 @@ phys_addr_t cma_get_base(struct device *dev)
return cma->base_pfn << PAGE_SHIFT;
}
static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
{
mutex_lock(&cma->lock);
bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
mutex_unlock(&cma->lock);
}
/**
* dma_alloc_from_contiguous() - allocate pages from contiguous area
* @dev: Pointer to device for which the allocation is performed.
......@@ -535,23 +544,36 @@ unsigned long dma_alloc_from_contiguous(struct device *dev, int count,
mask = (1 << align) - 1;
mutex_lock(&cma_mutex);
for (;;) {
mutex_lock(&cma->lock);
pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
start, count, mask);
if (pageno >= cma->count)
if (pageno >= cma->count) {
mutex_unlock(&cma->lock);
break;
}
bitmap_set(cma->bitmap, pageno, count);
/*
* It's safe to drop the lock here. We've marked this region for
* our exclusive use. If the migration fails we will take the
* lock again and unmark it.
*/
mutex_unlock(&cma->lock);
pfn = cma->base_pfn + pageno;
if (cma->in_system)
if (cma->in_system) {
mutex_lock(&cma_mutex);
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
mutex_unlock(&cma_mutex);
}
if (ret == 0) {
bitmap_set(cma->bitmap, pageno, count);
break;
} else if (ret != -EBUSY) {
clear_cma_bitmap(cma, pfn, count);
break;
}
clear_cma_bitmap(cma, pfn, count);
tries++;
trace_dma_alloc_contiguous_retry(tries);
......@@ -561,7 +583,6 @@ unsigned long dma_alloc_from_contiguous(struct device *dev, int count,
start = pageno + mask + 1;
}
mutex_unlock(&cma_mutex);
pr_debug("%s(): returned %lx\n", __func__, pfn);
return pfn;
}
......@@ -591,11 +612,9 @@ bool dma_release_from_contiguous(struct device *dev, unsigned long pfn,
VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
mutex_lock(&cma_mutex);
bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
if (cma->in_system)
free_contig_range(pfn, count);
mutex_unlock(&cma_mutex);
clear_cma_bitmap(cma, pfn, count);
return true;
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please to comment