1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef __ABI_CSKY_CACHEFLUSH_H
4#define __ABI_CSKY_CACHEFLUSH_H
5
6/* Keep includes the same across arches.  */
7#include <linux/mm.h>
8
9/*
10 * The cache doesn't need to be flushed when TLB entries change when
11 * the cache is mapped to physical memory, not virtual memory
12 */
13#define flush_cache_all()			do { } while (0)
14#define flush_cache_mm(mm)			do { } while (0)
15#define flush_cache_dup_mm(mm)			do { } while (0)
16#define flush_cache_range(vma, start, end)	do { } while (0)
17#define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
18
19#define PG_dcache_clean		PG_arch_1
20
21static inline void flush_dcache_folio(struct folio *folio)
22{
23	if (test_bit(PG_dcache_clean, &folio->flags))
24		clear_bit(PG_dcache_clean, &folio->flags);
25}
26#define flush_dcache_folio flush_dcache_folio
27
28#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
29static inline void flush_dcache_page(struct page *page)
30{
31	flush_dcache_folio(page_folio(page));
32}
33
34#define flush_dcache_mmap_lock(mapping)		do { } while (0)
35#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
36
37#define flush_icache_range(start, end)		cache_wbinv_range(start, end)
38
39void flush_icache_mm_range(struct mm_struct *mm,
40			unsigned long start, unsigned long end);
41void flush_icache_deferred(struct mm_struct *mm);
42
43#define flush_cache_vmap(start, end)		do { } while (0)
44#define flush_cache_vmap_early(start, end)	do { } while (0)
45#define flush_cache_vunmap(start, end)		do { } while (0)
46
47#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
48do { \
49	memcpy(dst, src, len); \
50	if (vma->vm_flags & VM_EXEC) { \
51		dcache_wb_range((unsigned long)dst, \
52				(unsigned long)dst + len); \
53		flush_icache_mm_range(current->mm, \
54				(unsigned long)dst, \
55				(unsigned long)dst + len); \
56		} \
57} while (0)
58#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
59	memcpy(dst, src, len)
60
61#endif /* __ABI_CSKY_CACHEFLUSH_H */
62