Skip to content

Commit 7573977

Browse files
mszyprowgregkh
authored andcommitted
mm: dmapool: use provided gfp flags for all dma_alloc_coherent() calls
commit 387870f upstream. dmapool always calls dma_alloc_coherent() with GFP_ATOMIC flag, regardless the flags provided by the caller. This causes excessive pruning of emergency memory pools without any good reason. Additionaly, on ARM architecture any driver which is using dmapools will sooner or later trigger the following error: "ERROR: 256 KiB atomic DMA coherent pool is too small! Please increase it with coherent_pool= kernel parameter!". Increasing the coherent pool size usually doesn't help much and only delays such error, because all GFP_ATOMIC DMA allocations are always served from the special, very limited memory pool. This patch changes the dmapool code to correctly use gfp flags provided by the dmapool caller. Reported-by: Soeren Moch <smoch@web.de> Reported-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Tested-by: Andrew Lunn <andrew@lunn.ch> Tested-by: Soeren Moch <smoch@web.de> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 9d2e921 commit 7573977

File tree

1 file changed

+7
-24
lines changed

1 file changed

+7
-24
lines changed

mm/dmapool.c

+7-24
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,6 @@ struct dma_pool { /* the pool */
5050
size_t allocation;
5151
size_t boundary;
5252
char name[32];
53-
wait_queue_head_t waitq;
5453
struct list_head pools;
5554
};
5655

@@ -62,8 +61,6 @@ struct dma_page { /* cacheable header for 'allocation' bytes */
6261
unsigned int offset;
6362
};
6463

65-
#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
66-
6764
static DEFINE_MUTEX(pools_lock);
6865

6966
static ssize_t
@@ -172,7 +169,6 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
172169
retval->size = size;
173170
retval->boundary = boundary;
174171
retval->allocation = allocation;
175-
init_waitqueue_head(&retval->waitq);
176172

177173
if (dev) {
178174
int ret;
@@ -227,7 +223,6 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
227223
memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
228224
#endif
229225
pool_initialise_page(pool, page);
230-
list_add(&page->page_list, &pool->page_list);
231226
page->in_use = 0;
232227
page->offset = 0;
233228
} else {
@@ -315,30 +310,21 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
315310
might_sleep_if(mem_flags & __GFP_WAIT);
316311

317312
spin_lock_irqsave(&pool->lock, flags);
318-
restart:
319313
list_for_each_entry(page, &pool->page_list, page_list) {
320314
if (page->offset < pool->allocation)
321315
goto ready;
322316
}
323-
page = pool_alloc_page(pool, GFP_ATOMIC);
324-
if (!page) {
325-
if (mem_flags & __GFP_WAIT) {
326-
DECLARE_WAITQUEUE(wait, current);
327317

328-
__set_current_state(TASK_UNINTERRUPTIBLE);
329-
__add_wait_queue(&pool->waitq, &wait);
330-
spin_unlock_irqrestore(&pool->lock, flags);
318+
/* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
319+
spin_unlock_irqrestore(&pool->lock, flags);
331320

332-
schedule_timeout(POOL_TIMEOUT_JIFFIES);
321+
page = pool_alloc_page(pool, mem_flags);
322+
if (!page)
323+
return NULL;
333324

334-
spin_lock_irqsave(&pool->lock, flags);
335-
__remove_wait_queue(&pool->waitq, &wait);
336-
goto restart;
337-
}
338-
retval = NULL;
339-
goto done;
340-
}
325+
spin_lock_irqsave(&pool->lock, flags);
341326

327+
list_add(&page->page_list, &pool->page_list);
342328
ready:
343329
page->in_use++;
344330
offset = page->offset;
@@ -348,7 +334,6 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
348334
#ifdef DMAPOOL_DEBUG
349335
memset(retval, POOL_POISON_ALLOCATED, pool->size);
350336
#endif
351-
done:
352337
spin_unlock_irqrestore(&pool->lock, flags);
353338
return retval;
354339
}
@@ -435,8 +420,6 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
435420
page->in_use--;
436421
*(int *)vaddr = page->offset;
437422
page->offset = offset;
438-
if (waitqueue_active(&pool->waitq))
439-
wake_up_locked(&pool->waitq);
440423
/*
441424
* Resist a temptation to do
442425
* if (!is_page_busy(page)) pool_free_page(pool, page);

0 commit comments

Comments
 (0)