blob: a0021eef956bf335b9d09e5a47f2494fc413d997 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * Copyright (C) 2004 - 2007 Paul Mundt
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 */
8#include <linux/mm.h>
9#include <linux/init.h>
10#include <linux/dma-noncoherent.h>
11#include <linux/module.h>
12#include <asm/cacheflush.h>
13#include <asm/addrspace.h>
14
15void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
16 gfp_t gfp, unsigned long attrs)
17{
18 void *ret, *ret_nocache;
19 int order = get_order(size);
20
21 gfp |= __GFP_ZERO;
22
23 ret = (void *)__get_free_pages(gfp, order);
24 if (!ret)
25 return NULL;
26
27 /*
28 * Pages from the page allocator may have data present in
29 * cache. So flush the cache before using uncached memory.
30 */
31 arch_sync_dma_for_device(dev, virt_to_phys(ret), size,
32 DMA_BIDIRECTIONAL);
33
34 ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
35 if (!ret_nocache) {
36 free_pages((unsigned long)ret, order);
37 return NULL;
38 }
39
40 split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order);
41
42 *dma_handle = virt_to_phys(ret);
43 if (!WARN_ON(!dev))
44 *dma_handle -= PFN_PHYS(dev->dma_pfn_offset);
45
46 return ret_nocache;
47}
48
49void arch_dma_free(struct device *dev, size_t size, void *vaddr,
50 dma_addr_t dma_handle, unsigned long attrs)
51{
52 int order = get_order(size);
53 unsigned long pfn = (dma_handle >> PAGE_SHIFT);
54 int k;
55
56 if (!WARN_ON(!dev))
57 pfn += dev->dma_pfn_offset;
58
59 for (k = 0; k < (1 << order); k++)
60 __free_pages(pfn_to_page(pfn + k), 0);
61
62 iounmap(vaddr);
63}
64
65void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
66 size_t size, enum dma_data_direction dir)
67{
68 void *addr = sh_cacheop_vaddr(phys_to_virt(paddr));
69
70 switch (dir) {
71 case DMA_FROM_DEVICE: /* invalidate only */
72 __flush_invalidate_region(addr, size);
73 break;
74 case DMA_TO_DEVICE: /* writeback only */
75 __flush_wback_region(addr, size);
76 break;
77 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
78 __flush_purge_region(addr, size);
79 break;
80 default:
81 BUG();
82 }
83}