cacheflush.h 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * (C) 2001 - 2013 Tensilica Inc.
  7. */
  8. #ifndef _XTENSA_CACHEFLUSH_H
  9. #define _XTENSA_CACHEFLUSH_H
  10. #include <linux/mm.h>
  11. #include <asm/processor.h>
  12. #include <asm/page.h>
  13. /*
  14. * Lo-level routines for cache flushing.
  15. *
  16. * invalidate data or instruction cache:
  17. *
  18. * __invalidate_icache_all()
  19. * __invalidate_icache_page(adr)
  20. * __invalidate_dcache_page(adr)
  21. * __invalidate_icache_range(from,size)
  22. * __invalidate_dcache_range(from,size)
  23. *
  24. * flush data cache:
  25. *
  26. * __flush_dcache_page(adr)
  27. *
  28. * flush and invalidate data cache:
  29. *
  30. * __flush_invalidate_dcache_all()
  31. * __flush_invalidate_dcache_page(adr)
  32. * __flush_invalidate_dcache_range(from,size)
  33. *
  34. * specials for cache aliasing:
  35. *
  36. * __flush_invalidate_dcache_page_alias(vaddr,paddr)
  37. * __invalidate_dcache_page_alias(vaddr,paddr)
  38. * __invalidate_icache_page_alias(vaddr,paddr)
  39. */
  40. extern void __invalidate_dcache_all(void);
  41. extern void __invalidate_icache_all(void);
  42. extern void __invalidate_dcache_page(unsigned long);
  43. extern void __invalidate_icache_page(unsigned long);
  44. extern void __invalidate_icache_range(unsigned long, unsigned long);
  45. extern void __invalidate_dcache_range(unsigned long, unsigned long);
  46. #if XCHAL_DCACHE_IS_WRITEBACK
  47. extern void __flush_invalidate_dcache_all(void);
  48. extern void __flush_dcache_page(unsigned long);
  49. extern void __flush_dcache_range(unsigned long, unsigned long);
  50. extern void __flush_invalidate_dcache_page(unsigned long);
  51. extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
  52. #else
  53. static inline void __flush_dcache_page(unsigned long va)
  54. {
  55. }
  56. static inline void __flush_dcache_range(unsigned long va, unsigned long sz)
  57. {
  58. }
  59. # define __flush_invalidate_dcache_all() __invalidate_dcache_all()
  60. # define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p)
  61. # define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s)
  62. #endif
  63. #if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
  64. extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
  65. extern void __invalidate_dcache_page_alias(unsigned long, unsigned long);
  66. #else
  67. static inline void __flush_invalidate_dcache_page_alias(unsigned long virt,
  68. unsigned long phys) { }
  69. static inline void __invalidate_dcache_page_alias(unsigned long virt,
  70. unsigned long phys) { }
  71. #endif
  72. #if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE)
  73. extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
  74. #else
  75. static inline void __invalidate_icache_page_alias(unsigned long virt,
  76. unsigned long phys) { }
  77. #endif
  78. /*
  79. * We have physically tagged caches - nothing to do here -
  80. * unless we have cache aliasing.
  81. *
  82. * Pages can get remapped. Because this might change the 'color' of that page,
  83. * we have to flush the cache before the PTE is changed.
  84. * (see also Documentation/cachetlb.txt)
  85. */
  86. #if defined(CONFIG_MMU) && \
  87. ((DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP))
  88. #ifdef CONFIG_SMP
  89. void flush_cache_all(void);
  90. void flush_cache_range(struct vm_area_struct*, ulong, ulong);
  91. void flush_icache_range(unsigned long start, unsigned long end);
  92. void flush_cache_page(struct vm_area_struct*,
  93. unsigned long, unsigned long);
  94. #else
  95. #define flush_cache_all local_flush_cache_all
  96. #define flush_cache_range local_flush_cache_range
  97. #define flush_icache_range local_flush_icache_range
  98. #define flush_cache_page local_flush_cache_page
  99. #endif
  100. #define local_flush_cache_all() \
  101. do { \
  102. __flush_invalidate_dcache_all(); \
  103. __invalidate_icache_all(); \
  104. } while (0)
  105. #define flush_cache_mm(mm) flush_cache_all()
  106. #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
  107. #define flush_cache_vmap(start,end) flush_cache_all()
  108. #define flush_cache_vunmap(start,end) flush_cache_all()
  109. #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
  110. extern void flush_dcache_page(struct page*);
  111. void local_flush_cache_range(struct vm_area_struct *vma,
  112. unsigned long start, unsigned long end);
  113. void local_flush_cache_page(struct vm_area_struct *vma,
  114. unsigned long address, unsigned long pfn);
  115. #else
  116. #define flush_cache_all() do { } while (0)
  117. #define flush_cache_mm(mm) do { } while (0)
  118. #define flush_cache_dup_mm(mm) do { } while (0)
  119. #define flush_cache_vmap(start,end) do { } while (0)
  120. #define flush_cache_vunmap(start,end) do { } while (0)
  121. #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
  122. #define flush_dcache_page(page) do { } while (0)
  123. #define flush_icache_range local_flush_icache_range
  124. #define flush_cache_page(vma, addr, pfn) do { } while (0)
  125. #define flush_cache_range(vma, start, end) do { } while (0)
  126. #endif
  127. /* Ensure consistency between data and instruction cache. */
  128. #define local_flush_icache_range(start, end) \
  129. do { \
  130. __flush_dcache_range(start, (end) - (start)); \
  131. __invalidate_icache_range(start,(end) - (start)); \
  132. } while (0)
  133. /* This is not required, see Documentation/cachetlb.txt */
  134. #define flush_icache_page(vma,page) do { } while (0)
  135. #define flush_dcache_mmap_lock(mapping) do { } while (0)
  136. #define flush_dcache_mmap_unlock(mapping) do { } while (0)
  137. #if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
  138. extern void copy_to_user_page(struct vm_area_struct*, struct page*,
  139. unsigned long, void*, const void*, unsigned long);
  140. extern void copy_from_user_page(struct vm_area_struct*, struct page*,
  141. unsigned long, void*, const void*, unsigned long);
  142. #else
  143. #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
  144. do { \
  145. memcpy(dst, src, len); \
  146. __flush_dcache_range((unsigned long) dst, len); \
  147. __invalidate_icache_range((unsigned long) dst, len); \
  148. } while (0)
  149. #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
  150. memcpy(dst, src, len)
  151. #endif
  152. #endif /* _XTENSA_CACHEFLUSH_H */