blob: 8c5f0c332d8b64ed4cf2008a292e05a39cfa0546 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Microblaze support for cache consistent memory.
4 * Copyright (C) 2010 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2010 PetaLogix
6 * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007 */
8
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009#include <linux/kernel.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010#include <linux/string.h>
11#include <linux/types.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012#include <linux/mm.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013#include <linux/init.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000014#include <linux/dma-noncoherent.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000015#include <asm/cpuinfo.h>
David Brazdil0f672f62019-12-10 10:32:29 +000016#include <asm/cacheflush.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000017
David Brazdil0f672f62019-12-10 10:32:29 +000018void arch_dma_prep_coherent(struct page *page, size_t size)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000019{
David Brazdil0f672f62019-12-10 10:32:29 +000020 phys_addr_t paddr = page_to_phys(page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000021
David Brazdil0f672f62019-12-10 10:32:29 +000022 flush_dcache_range(paddr, paddr + size);
23}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000024
25#ifndef CONFIG_MMU
David Brazdil0f672f62019-12-10 10:32:29 +000026/*
27 * Consistent memory allocators. Used for DMA devices that want to share
28 * uncached memory with the processor core. My crufty no-MMU approach is
29 * simple. In the HW platform we can optionally mirror the DDR up above the
30 * processor cacheable region. So, memory accessed in this mirror region will
31 * not be cached. It's alloced from the same pool as normal memory, but the
32 * handle we return is shifted up into the uncached region. This will no doubt
33 * cause big problems if memory allocated here is not also freed properly. -- JW
34 *
35 * I have to use dcache values because I can't relate on ram size:
36 */
37#ifdef CONFIG_XILINX_UNCACHED_SHADOW
38#define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
39#else
40#define UNCACHED_SHADOW_MASK 0
41#endif /* CONFIG_XILINX_UNCACHED_SHADOW */
42
43void *uncached_kernel_address(void *ptr)
44{
45 unsigned long addr = (unsigned long)ptr;
46
47 addr |= UNCACHED_SHADOW_MASK;
48 if (addr > cpuinfo.dcache_base && addr < cpuinfo.dcache_high)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000049 pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
David Brazdil0f672f62019-12-10 10:32:29 +000050 return (void *)addr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000051}
52
David Brazdil0f672f62019-12-10 10:32:29 +000053void *cached_kernel_address(void *ptr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000054{
David Brazdil0f672f62019-12-10 10:32:29 +000055 unsigned long addr = (unsigned long)ptr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000056
David Brazdil0f672f62019-12-10 10:32:29 +000057 return (void *)(addr & ~UNCACHED_SHADOW_MASK);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000058}
David Brazdil0f672f62019-12-10 10:32:29 +000059#endif /* CONFIG_MMU */