Update Linux to v5.10.109
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz
Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/arch/csky/mm/Makefile b/arch/csky/mm/Makefile
index efb7eba..6e7696e 100644
--- a/arch/csky/mm/Makefile
+++ b/arch/csky/mm/Makefile
@@ -16,3 +16,4 @@
obj-y += tlb.o
obj-y += asid.o
obj-y += context.o
+obj-$(CONFIG_HAVE_TCM) += tcm.o
diff --git a/arch/csky/mm/cachev1.c b/arch/csky/mm/cachev1.c
index 494ec91..5a5a980 100644
--- a/arch/csky/mm/cachev1.c
+++ b/arch/csky/mm/cachev1.c
@@ -94,6 +94,11 @@
cache_op_all(INS_CACHE|CACHE_INV, 0);
}
+void local_icache_inv_all(void *priv)
+{
+ cache_op_all(INS_CACHE|CACHE_INV, 0);
+}
+
void dcache_wb_range(unsigned long start, unsigned long end)
{
cache_op_range(start, end, DATA_CACHE|CACHE_CLR, 0);
diff --git a/arch/csky/mm/cachev2.c b/arch/csky/mm/cachev2.c
index b61be65..7a9664a 100644
--- a/arch/csky/mm/cachev2.c
+++ b/arch/csky/mm/cachev2.c
@@ -3,15 +3,24 @@
#include <linux/spinlock.h>
#include <linux/smp.h>
+#include <linux/mm.h>
#include <asm/cache.h>
#include <asm/barrier.h>
-inline void dcache_wb_line(unsigned long start)
+/* for L1-cache */
+#define INS_CACHE (1 << 0)
+#define DATA_CACHE (1 << 1)
+#define CACHE_INV (1 << 4)
+#define CACHE_CLR (1 << 5)
+#define CACHE_OMS (1 << 6)
+
+void local_icache_inv_all(void *priv)
{
- asm volatile("dcache.cval1 %0\n"::"r"(start):"memory");
+ mtcr("cr17", INS_CACHE|CACHE_INV);
sync_is();
}
+#ifdef CONFIG_CPU_HAS_ICACHE_INS
void icache_inv_range(unsigned long start, unsigned long end)
{
unsigned long i = start & ~(L1_CACHE_BYTES - 1);
@@ -20,10 +29,50 @@
asm volatile("icache.iva %0\n"::"r"(i):"memory");
sync_is();
}
+#else
+struct cache_range {
+ unsigned long start;
+ unsigned long end;
+};
-void icache_inv_all(void)
+static DEFINE_SPINLOCK(cache_lock);
+
+static inline void cache_op_line(unsigned long i, unsigned int val)
{
- asm volatile("icache.ialls\n":::"memory");
+ mtcr("cr22", i);
+ mtcr("cr17", val);
+}
+
+void local_icache_inv_range(void *priv)
+{
+ struct cache_range *param = priv;
+ unsigned long i = param->start & ~(L1_CACHE_BYTES - 1);
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache_lock, flags);
+
+ for (; i < param->end; i += L1_CACHE_BYTES)
+ cache_op_line(i, INS_CACHE | CACHE_INV | CACHE_OMS);
+
+ spin_unlock_irqrestore(&cache_lock, flags);
+
+ sync_is();
+}
+
+void icache_inv_range(unsigned long start, unsigned long end)
+{
+ struct cache_range param = { start, end };
+
+ if (irqs_disabled())
+ local_icache_inv_range(¶m);
+ else
+ on_each_cpu(local_icache_inv_range, ¶m, 1);
+}
+#endif
+
+inline void dcache_wb_line(unsigned long start)
+{
+ asm volatile("dcache.cval1 %0\n"::"r"(start):"memory");
sync_is();
}
@@ -36,27 +85,10 @@
sync_is();
}
-void dcache_inv_range(unsigned long start, unsigned long end)
-{
- unsigned long i = start & ~(L1_CACHE_BYTES - 1);
-
- for (; i < end; i += L1_CACHE_BYTES)
- asm volatile("dcache.civa %0\n"::"r"(i):"memory");
- sync_is();
-}
-
void cache_wbinv_range(unsigned long start, unsigned long end)
{
- unsigned long i = start & ~(L1_CACHE_BYTES - 1);
-
- for (; i < end; i += L1_CACHE_BYTES)
- asm volatile("dcache.cval1 %0\n"::"r"(i):"memory");
- sync_is();
-
- i = start & ~(L1_CACHE_BYTES - 1);
- for (; i < end; i += L1_CACHE_BYTES)
- asm volatile("icache.iva %0\n"::"r"(i):"memory");
- sync_is();
+ dcache_wb_range(start, end);
+ icache_inv_range(start, end);
}
EXPORT_SYMBOL(cache_wbinv_range);
diff --git a/arch/csky/mm/dma-mapping.c b/arch/csky/mm/dma-mapping.c
index 06e85b5..c3a775a 100644
--- a/arch/csky/mm/dma-mapping.c
+++ b/arch/csky/mm/dma-mapping.c
@@ -2,9 +2,7 @@
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#include <linux/cache.h>
-#include <linux/dma-mapping.h>
-#include <linux/dma-contiguous.h>
-#include <linux/dma-noncoherent.h>
+#include <linux/dma-map-ops.h>
#include <linux/genalloc.h>
#include <linux/highmem.h>
#include <linux/io.h>
@@ -58,8 +56,8 @@
cache_op(page_to_phys(page), size, dma_wbinv_set_zero_range);
}
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
@@ -74,8 +72,8 @@
}
}
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c
index 562c7f7..081b178 100644
--- a/arch/csky/mm/fault.c
+++ b/arch/csky/mm/fault.c
@@ -18,6 +18,7 @@
#include <linux/extable.h>
#include <linux/uaccess.h>
#include <linux/perf_event.h>
+#include <linux/kprobes.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
@@ -53,6 +54,9 @@
int fault;
unsigned long address = mmu_meh & PAGE_MASK;
+ if (kprobe_page_fault(regs, tsk->thread.trap_no))
+ return;
+
si_code = SEGV_MAPERR;
#ifndef CONFIG_CPU_HAS_TLBI
@@ -74,7 +78,7 @@
* Do _not_ use "tsk" here. We might be inside
* an interrupt in the middle of a task switch..
*/
- int offset = __pgd_offset(address);
+ int offset = pgd_index(address);
pgd_t *pgd, *pgd_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
@@ -116,7 +120,7 @@
if (in_atomic() || !mm)
goto bad_area_nosemaphore;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
@@ -137,7 +141,7 @@
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else {
- if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
+ if (unlikely(!vma_is_accessible(vma)))
goto bad_area;
}
@@ -146,7 +150,8 @@
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(vma, address, write ? FAULT_FLAG_WRITE : 0);
+ fault = handle_mm_fault(vma, address, write ? FAULT_FLAG_WRITE : 0,
+ regs);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
@@ -156,17 +161,7 @@
goto bad_area;
BUG();
}
- if (fault & VM_FAULT_MAJOR) {
- tsk->maj_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
- address);
- } else {
- tsk->min_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs,
- address);
- }
-
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
/*
@@ -174,18 +169,18 @@
* Fix it, but check if it's kernel or user first..
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
- tsk->thread.trap_no = (regs->sr >> 16) & 0xff;
+ tsk->thread.trap_no = trap_no(regs);
force_sig_fault(SIGSEGV, si_code, (void __user *)address);
return;
}
no_context:
- tsk->thread.trap_no = (regs->sr >> 16) & 0xff;
+ tsk->thread.trap_no = trap_no(regs);
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs))
@@ -198,10 +193,10 @@
bust_spinlocks(1);
pr_alert("Unable to handle kernel paging request at virtual "
"address 0x%08lx, pc: 0x%08lx\n", address, regs->pc);
- die_if_kernel("Oops", regs, write);
+ die(regs, "Oops");
out_of_memory:
- tsk->thread.trap_no = (regs->sr >> 16) & 0xff;
+ tsk->thread.trap_no = trap_no(regs);
/*
* We ran out of memory, call the OOM killer, and return the userspace
@@ -211,9 +206,9 @@
return;
do_sigbus:
- tsk->thread.trap_no = (regs->sr >> 16) & 0xff;
+ tsk->thread.trap_no = trap_no(regs);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
diff --git a/arch/csky/mm/highmem.c b/arch/csky/mm/highmem.c
index 3317b77..89c1080 100644
--- a/arch/csky/mm/highmem.c
+++ b/arch/csky/mm/highmem.c
@@ -13,59 +13,37 @@
unsigned long highstart_pfn, highend_pfn;
-void *kmap(struct page *page)
+void kmap_flush_tlb(unsigned long addr)
{
- void *addr;
-
- might_sleep();
- if (!PageHighMem(page))
- return page_address(page);
- addr = kmap_high(page);
- flush_tlb_one((unsigned long)addr);
-
- return addr;
+ flush_tlb_one(addr);
}
-EXPORT_SYMBOL(kmap);
+EXPORT_SYMBOL(kmap_flush_tlb);
-void kunmap(struct page *page)
-{
- BUG_ON(in_interrupt());
- if (!PageHighMem(page))
- return;
- kunmap_high(page);
-}
-EXPORT_SYMBOL(kunmap);
-
-void *kmap_atomic(struct page *page)
+void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
{
unsigned long vaddr;
int idx, type;
- preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(*(kmap_pte - idx)));
#endif
- set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
+ set_pte(kmap_pte-idx, mk_pte(page, prot));
flush_tlb_one((unsigned long)vaddr);
return (void *)vaddr;
}
-EXPORT_SYMBOL(kmap_atomic);
+EXPORT_SYMBOL(kmap_atomic_high_prot);
-void __kunmap_atomic(void *kvaddr)
+void kunmap_atomic_high(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
int idx;
if (vaddr < FIXADDR_START)
- goto out;
+ return;
#ifdef CONFIG_DEBUG_HIGHMEM
idx = KM_TYPE_NR*smp_processor_id() + kmap_atomic_idx();
@@ -78,11 +56,8 @@
(void) idx; /* to kill a warning */
#endif
kmap_atomic_idx_pop();
-out:
- pagefault_enable();
- preempt_enable();
}
-EXPORT_SYMBOL(__kunmap_atomic);
+EXPORT_SYMBOL(kunmap_atomic_high);
/*
* This is the same as kmap_atomic() but can map memory that doesn't
@@ -104,98 +79,29 @@
return (void *) vaddr;
}
-struct page *kmap_atomic_to_page(void *ptr)
-{
- unsigned long idx, vaddr = (unsigned long)ptr;
- pte_t *pte;
-
- if (vaddr < FIXADDR_START)
- return virt_to_page(ptr);
-
- idx = virt_to_fix(vaddr);
- pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
- return pte_page(*pte);
-}
-
-static void __init fixrange_init(unsigned long start, unsigned long end,
- pgd_t *pgd_base)
-{
-#ifdef CONFIG_HIGHMEM
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- int i, j, k;
- unsigned long vaddr;
-
- vaddr = start;
- i = __pgd_offset(vaddr);
- j = __pud_offset(vaddr);
- k = __pmd_offset(vaddr);
- pgd = pgd_base + i;
-
- for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
- pud = (pud_t *)pgd;
- for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
- pmd = (pmd_t *)pud;
- for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
- if (pmd_none(*pmd)) {
- pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
- if (!pte)
- panic("%s: Failed to allocate %lu bytes align=%lx\n",
- __func__, PAGE_SIZE,
- PAGE_SIZE);
-
- set_pmd(pmd, __pmd(__pa(pte)));
- BUG_ON(pte != pte_offset_kernel(pmd, 0));
- }
- vaddr += PMD_SIZE;
- }
- k = 0;
- }
- j = 0;
- }
-#endif
-}
-
-void __init fixaddr_kmap_pages_init(void)
+static void __init kmap_pages_init(void)
{
unsigned long vaddr;
- pgd_t *pgd_base;
-#ifdef CONFIG_HIGHMEM
pgd_t *pgd;
pmd_t *pmd;
pud_t *pud;
pte_t *pte;
-#endif
- pgd_base = swapper_pg_dir;
- /*
- * Fixed mappings:
- */
- vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
- fixrange_init(vaddr, 0, pgd_base);
-
-#ifdef CONFIG_HIGHMEM
- /*
- * Permanent kmaps:
- */
vaddr = PKMAP_BASE;
- fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
+ fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir);
- pgd = swapper_pg_dir + __pgd_offset(vaddr);
+ pgd = swapper_pg_dir + pgd_index(vaddr);
pud = (pud_t *)pgd;
pmd = pmd_offset(pud, vaddr);
pte = pte_offset_kernel(pmd, vaddr);
pkmap_page_table = pte;
-#endif
}
void __init kmap_init(void)
{
unsigned long vaddr;
- fixaddr_kmap_pages_init();
+ kmap_pages_init();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN);
diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c
index 00e9627..af62712 100644
--- a/arch/csky/mm/init.c
+++ b/arch/csky/mm/init.c
@@ -19,11 +19,11 @@
#include <linux/swap.h>
#include <linux/proc_fs.h>
#include <linux/pfn.h>
+#include <linux/initrd.h>
#include <asm/setup.h>
#include <asm/cachectl.h>
#include <asm/dma.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#include <asm/sections.h>
@@ -36,6 +36,45 @@
__page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
+#ifdef CONFIG_BLK_DEV_INITRD
+static void __init setup_initrd(void)
+{
+ unsigned long size;
+
+ if (initrd_start >= initrd_end) {
+ pr_err("initrd not found or empty");
+ goto disable;
+ }
+
+ if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
+ pr_err("initrd extends beyond end of memory");
+ goto disable;
+ }
+
+ size = initrd_end - initrd_start;
+
+ if (memblock_is_region_reserved(__pa(initrd_start), size)) {
+ pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region",
+ __pa(initrd_start), size);
+ goto disable;
+ }
+
+ memblock_reserve(__pa(initrd_start), size);
+
+ pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
+ (void *)(initrd_start), size);
+
+ initrd_below_start_ok = 1;
+
+ return;
+
+disable:
+ initrd_start = initrd_end = 0;
+
+ pr_err(" - disabling initrd\n");
+}
+#endif
+
void __init mem_init(void)
{
#ifdef CONFIG_HIGHMEM
@@ -47,6 +86,10 @@
#endif
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
+#ifdef CONFIG_BLK_DEV_INITRD
+ setup_initrd();
+#endif
+
memblock_free_all();
#ifdef CONFIG_HIGHMEM
@@ -102,3 +145,50 @@
/* Setup page mask to 4k */
write_mmu_pagemask(0);
}
+
+void __init fixrange_init(unsigned long start, unsigned long end,
+ pgd_t *pgd_base)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ int i, j, k;
+ unsigned long vaddr;
+
+ vaddr = start;
+ i = pgd_index(vaddr);
+ j = pud_index(vaddr);
+ k = pmd_index(vaddr);
+ pgd = pgd_base + i;
+
+ for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
+ pud = (pud_t *)pgd;
+ for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
+ pmd = (pmd_t *)pud;
+ for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
+ if (pmd_none(*pmd)) {
+ pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
+ __func__, PAGE_SIZE,
+ PAGE_SIZE);
+
+ set_pmd(pmd, __pmd(__pa(pte)));
+ BUG_ON(pte != pte_offset_kernel(pmd, 0));
+ }
+ vaddr += PMD_SIZE;
+ }
+ k = 0;
+ }
+ j = 0;
+ }
+}
+
+void __init fixaddr_init(void)
+{
+ unsigned long vaddr;
+
+ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
+ fixrange_init(vaddr, vaddr + PMD_SIZE, swapper_pg_dir);
+}
diff --git a/arch/csky/mm/ioremap.c b/arch/csky/mm/ioremap.c
index e13cd34..70c8268 100644
--- a/arch/csky/mm/ioremap.c
+++ b/arch/csky/mm/ioremap.c
@@ -3,60 +3,8 @@
#include <linux/export.h>
#include <linux/mm.h>
-#include <linux/vmalloc.h>
#include <linux/io.h>
-#include <asm/pgtable.h>
-
-static void __iomem *__ioremap_caller(phys_addr_t addr, size_t size,
- pgprot_t prot, void *caller)
-{
- phys_addr_t last_addr;
- unsigned long offset, vaddr;
- struct vm_struct *area;
-
- last_addr = addr + size - 1;
- if (!size || last_addr < addr)
- return NULL;
-
- offset = addr & (~PAGE_MASK);
- addr &= PAGE_MASK;
- size = PAGE_ALIGN(size + offset);
-
- area = get_vm_area_caller(size, VM_IOREMAP, caller);
- if (!area)
- return NULL;
-
- vaddr = (unsigned long)area->addr;
-
- if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
- free_vm_area(area);
- return NULL;
- }
-
- return (void __iomem *)(vaddr + offset);
-}
-
-void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot)
-{
- return __ioremap_caller(phys_addr, size, prot,
- __builtin_return_address(0));
-}
-EXPORT_SYMBOL(__ioremap);
-
-void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
-{
- return __ioremap_caller(phys_addr, size, PAGE_KERNEL,
- __builtin_return_address(0));
-}
-EXPORT_SYMBOL(ioremap_cache);
-
-void iounmap(void __iomem *addr)
-{
- vunmap((void *)((unsigned long)addr & PAGE_MASK));
-}
-EXPORT_SYMBOL(iounmap);
-
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
diff --git a/arch/csky/mm/syscache.c b/arch/csky/mm/syscache.c
index c4645e4..cd847ad 100644
--- a/arch/csky/mm/syscache.c
+++ b/arch/csky/mm/syscache.c
@@ -3,7 +3,7 @@
#include <linux/syscalls.h>
#include <asm/page.h>
-#include <asm/cache.h>
+#include <asm/cacheflush.h>
#include <asm/cachectl.h>
SYSCALL_DEFINE3(cacheflush,
@@ -12,17 +12,17 @@
int, cache)
{
switch (cache) {
- case ICACHE:
- icache_inv_range((unsigned long)addr,
- (unsigned long)addr + bytes);
- break;
+ case BCACHE:
case DCACHE:
dcache_wb_range((unsigned long)addr,
(unsigned long)addr + bytes);
- break;
- case BCACHE:
- cache_wbinv_range((unsigned long)addr,
- (unsigned long)addr + bytes);
+ if (cache != BCACHE)
+ break;
+ fallthrough;
+ case ICACHE:
+ flush_icache_mm_range(current->mm,
+ (unsigned long)addr,
+ (unsigned long)addr + bytes);
break;
default:
return -EINVAL;
diff --git a/arch/csky/mm/tcm.c b/arch/csky/mm/tcm.c
new file mode 100644
index 0000000..ddeb363
--- /dev/null
+++ b/arch/csky/mm/tcm.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/highmem.h>
+#include <linux/genalloc.h>
+#include <asm/tlbflush.h>
+#include <asm/fixmap.h>
+
+#if (CONFIG_ITCM_RAM_BASE == 0xffffffff)
+#error "You should define ITCM_RAM_BASE"
+#endif
+
+#ifdef CONFIG_HAVE_DTCM
+#if (CONFIG_DTCM_RAM_BASE == 0xffffffff)
+#error "You should define DTCM_RAM_BASE"
+#endif
+
+#if (CONFIG_DTCM_RAM_BASE == CONFIG_ITCM_RAM_BASE)
+#error "You should define correct DTCM_RAM_BASE"
+#endif
+#endif
+
+extern char __tcm_start, __tcm_end, __dtcm_start;
+
+static struct gen_pool *tcm_pool;
+
+static void __init tcm_mapping_init(void)
+{
+ pte_t *tcm_pte;
+ unsigned long vaddr, paddr;
+ int i;
+
+ paddr = CONFIG_ITCM_RAM_BASE;
+
+ if (pfn_valid(PFN_DOWN(CONFIG_ITCM_RAM_BASE)))
+ goto panic;
+
+#ifndef CONFIG_HAVE_DTCM
+ for (i = 0; i < TCM_NR_PAGES; i++) {
+#else
+ for (i = 0; i < CONFIG_ITCM_NR_PAGES; i++) {
+#endif
+ vaddr = __fix_to_virt(FIX_TCM - i);
+
+ tcm_pte =
+ pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr);
+
+ set_pte(tcm_pte, pfn_pte(__phys_to_pfn(paddr), PAGE_KERNEL));
+
+ flush_tlb_one(vaddr);
+
+ paddr = paddr + PAGE_SIZE;
+ }
+
+#ifdef CONFIG_HAVE_DTCM
+ if (pfn_valid(PFN_DOWN(CONFIG_DTCM_RAM_BASE)))
+ goto panic;
+
+ paddr = CONFIG_DTCM_RAM_BASE;
+
+ for (i = 0; i < CONFIG_DTCM_NR_PAGES; i++) {
+ vaddr = __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES - i);
+
+ tcm_pte =
+ pte_offset_kernel((pmd_t *) pgd_offset_k(vaddr), vaddr);
+
+ set_pte(tcm_pte, pfn_pte(__phys_to_pfn(paddr), PAGE_KERNEL));
+
+ flush_tlb_one(vaddr);
+
+ paddr = paddr + PAGE_SIZE;
+ }
+#endif
+
+#ifndef CONFIG_HAVE_DTCM
+ memcpy((void *)__fix_to_virt(FIX_TCM),
+ &__tcm_start, &__tcm_end - &__tcm_start);
+
+ pr_info("%s: mapping tcm va:0x%08lx to pa:0x%08x\n",
+ __func__, __fix_to_virt(FIX_TCM), CONFIG_ITCM_RAM_BASE);
+
+ pr_info("%s: __tcm_start va:0x%08lx size:%d\n",
+ __func__, (unsigned long)&__tcm_start, &__tcm_end - &__tcm_start);
+#else
+ memcpy((void *)__fix_to_virt(FIX_TCM),
+ &__tcm_start, &__dtcm_start - &__tcm_start);
+
+ pr_info("%s: mapping itcm va:0x%08lx to pa:0x%08x\n",
+ __func__, __fix_to_virt(FIX_TCM), CONFIG_ITCM_RAM_BASE);
+
+ pr_info("%s: __itcm_start va:0x%08lx size:%d\n",
+ __func__, (unsigned long)&__tcm_start, &__dtcm_start - &__tcm_start);
+
+ memcpy((void *)__fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES),
+ &__dtcm_start, &__tcm_end - &__dtcm_start);
+
+ pr_info("%s: mapping dtcm va:0x%08lx to pa:0x%08x\n",
+ __func__, __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES),
+ CONFIG_DTCM_RAM_BASE);
+
+ pr_info("%s: __dtcm_start va:0x%08lx size:%d\n",
+ __func__, (unsigned long)&__dtcm_start, &__tcm_end - &__dtcm_start);
+
+#endif
+ return;
+panic:
+ panic("TCM init error");
+}
+
+void *tcm_alloc(size_t len)
+{
+ unsigned long vaddr;
+
+ if (!tcm_pool)
+ return NULL;
+
+ vaddr = gen_pool_alloc(tcm_pool, len);
+ if (!vaddr)
+ return NULL;
+
+ return (void *) vaddr;
+}
+EXPORT_SYMBOL(tcm_alloc);
+
+void tcm_free(void *addr, size_t len)
+{
+ gen_pool_free(tcm_pool, (unsigned long) addr, len);
+}
+EXPORT_SYMBOL(tcm_free);
+
+static int __init tcm_setup_pool(void)
+{
+#ifndef CONFIG_HAVE_DTCM
+ u32 pool_size = (u32) (TCM_NR_PAGES * PAGE_SIZE)
+ - (u32) (&__tcm_end - &__tcm_start);
+
+ u32 tcm_pool_start = __fix_to_virt(FIX_TCM)
+ + (u32) (&__tcm_end - &__tcm_start);
+#else
+ u32 pool_size = (u32) (CONFIG_DTCM_NR_PAGES * PAGE_SIZE)
+ - (u32) (&__tcm_end - &__dtcm_start);
+
+ u32 tcm_pool_start = __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES)
+ + (u32) (&__tcm_end - &__dtcm_start);
+#endif
+ int ret;
+
+ tcm_pool = gen_pool_create(2, -1);
+
+ ret = gen_pool_add(tcm_pool, tcm_pool_start, pool_size, -1);
+ if (ret) {
+ pr_err("%s: gen_pool add failed!\n", __func__);
+ return ret;
+ }
+
+ pr_info("%s: Added %d bytes @ 0x%08x to memory pool\n",
+ __func__, pool_size, tcm_pool_start);
+
+ return 0;
+}
+
+static int __init tcm_init(void)
+{
+ tcm_mapping_init();
+
+ tcm_setup_pool();
+
+ return 0;
+}
+arch_initcall(tcm_init);
diff --git a/arch/csky/mm/tlb.c b/arch/csky/mm/tlb.c
index eb3ba6c..ed15123 100644
--- a/arch/csky/mm/tlb.c
+++ b/arch/csky/mm/tlb.c
@@ -7,7 +7,6 @@
#include <linux/sched.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/setup.h>
/*