tlb.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278
  1. /*
  2. * arch/xtensa/mm/tlb.c
  3. *
  4. * Logic that manipulates the Xtensa MMU. Derived from MIPS.
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file "COPYING" in the main directory of this archive
  8. * for more details.
  9. *
  10. * Copyright (C) 2001 - 2003 Tensilica Inc.
  11. *
  12. * Joe Taylor
  13. * Chris Zankel <[email protected]>
  14. * Marc Gauthier
  15. */
  16. #include <linux/mm.h>
  17. #include <asm/processor.h>
  18. #include <asm/mmu_context.h>
  19. #include <asm/tlbflush.h>
  20. #include <asm/cacheflush.h>
  21. static inline void __flush_itlb_all (void)
  22. {
  23. int w, i;
  24. for (w = 0; w < ITLB_ARF_WAYS; w++) {
  25. for (i = 0; i < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); i++) {
  26. int e = w + (i << PAGE_SHIFT);
  27. invalidate_itlb_entry_no_isync(e);
  28. }
  29. }
  30. asm volatile ("isync\n");
  31. }
  32. static inline void __flush_dtlb_all (void)
  33. {
  34. int w, i;
  35. for (w = 0; w < DTLB_ARF_WAYS; w++) {
  36. for (i = 0; i < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); i++) {
  37. int e = w + (i << PAGE_SHIFT);
  38. invalidate_dtlb_entry_no_isync(e);
  39. }
  40. }
  41. asm volatile ("isync\n");
  42. }
  43. void local_flush_tlb_all(void)
  44. {
  45. __flush_itlb_all();
  46. __flush_dtlb_all();
  47. }
  48. /* If mm is current, we simply assign the current task a new ASID, thus,
  49. * invalidating all previous tlb entries. If mm is someone else's user mapping,
  50. * wie invalidate the context, thus, when that user mapping is swapped in,
  51. * a new context will be assigned to it.
  52. */
  53. void local_flush_tlb_mm(struct mm_struct *mm)
  54. {
  55. int cpu = smp_processor_id();
  56. if (mm == current->active_mm) {
  57. unsigned long flags;
  58. local_irq_save(flags);
  59. mm->context.asid[cpu] = NO_CONTEXT;
  60. activate_context(mm, cpu);
  61. local_irq_restore(flags);
  62. } else {
  63. mm->context.asid[cpu] = NO_CONTEXT;
  64. mm->context.cpu = -1;
  65. }
  66. }
  67. #define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
  68. #define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
  69. #if _ITLB_ENTRIES > _DTLB_ENTRIES
  70. # define _TLB_ENTRIES _ITLB_ENTRIES
  71. #else
  72. # define _TLB_ENTRIES _DTLB_ENTRIES
  73. #endif
  74. void local_flush_tlb_range(struct vm_area_struct *vma,
  75. unsigned long start, unsigned long end)
  76. {
  77. int cpu = smp_processor_id();
  78. struct mm_struct *mm = vma->vm_mm;
  79. unsigned long flags;
  80. if (mm->context.asid[cpu] == NO_CONTEXT)
  81. return;
  82. #if 0
  83. printk("[tlbrange<%02lx,%08lx,%08lx>]\n",
  84. (unsigned long)mm->context.asid[cpu], start, end);
  85. #endif
  86. local_irq_save(flags);
  87. if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
  88. int oldpid = get_rasid_register();
  89. set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
  90. start &= PAGE_MASK;
  91. if (vma->vm_flags & VM_EXEC)
  92. while(start < end) {
  93. invalidate_itlb_mapping(start);
  94. invalidate_dtlb_mapping(start);
  95. start += PAGE_SIZE;
  96. }
  97. else
  98. while(start < end) {
  99. invalidate_dtlb_mapping(start);
  100. start += PAGE_SIZE;
  101. }
  102. set_rasid_register(oldpid);
  103. } else {
  104. local_flush_tlb_mm(mm);
  105. }
  106. local_irq_restore(flags);
  107. }
  108. void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
  109. {
  110. int cpu = smp_processor_id();
  111. struct mm_struct* mm = vma->vm_mm;
  112. unsigned long flags;
  113. int oldpid;
  114. if (mm->context.asid[cpu] == NO_CONTEXT)
  115. return;
  116. local_irq_save(flags);
  117. oldpid = get_rasid_register();
  118. set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
  119. if (vma->vm_flags & VM_EXEC)
  120. invalidate_itlb_mapping(page);
  121. invalidate_dtlb_mapping(page);
  122. set_rasid_register(oldpid);
  123. local_irq_restore(flags);
  124. }
  125. void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
  126. {
  127. if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET &&
  128. end - start < _TLB_ENTRIES << PAGE_SHIFT) {
  129. start &= PAGE_MASK;
  130. while (start < end) {
  131. invalidate_itlb_mapping(start);
  132. invalidate_dtlb_mapping(start);
  133. start += PAGE_SIZE;
  134. }
  135. } else {
  136. local_flush_tlb_all();
  137. }
  138. }
  139. #ifdef CONFIG_DEBUG_TLB_SANITY
  140. static unsigned get_pte_for_vaddr(unsigned vaddr)
  141. {
  142. struct task_struct *task = get_current();
  143. struct mm_struct *mm = task->mm;
  144. pgd_t *pgd;
  145. pmd_t *pmd;
  146. pte_t *pte;
  147. if (!mm)
  148. mm = task->active_mm;
  149. pgd = pgd_offset(mm, vaddr);
  150. if (pgd_none_or_clear_bad(pgd))
  151. return 0;
  152. pmd = pmd_offset(pgd, vaddr);
  153. if (pmd_none_or_clear_bad(pmd))
  154. return 0;
  155. pte = pte_offset_map(pmd, vaddr);
  156. if (!pte)
  157. return 0;
  158. return pte_val(*pte);
  159. }
  160. enum {
  161. TLB_SUSPICIOUS = 1,
  162. TLB_INSANE = 2,
  163. };
  164. static void tlb_insane(void)
  165. {
  166. BUG_ON(1);
  167. }
  168. static void tlb_suspicious(void)
  169. {
  170. WARN_ON(1);
  171. }
  172. /*
  173. * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE),
  174. * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE.
  175. *
  176. * Check that valid TLB entries either have the same PA as the PTE, or PTE is
  177. * marked as non-present. Non-present PTE and the page with non-zero refcount
  178. * and zero mapcount is normal for batched TLB flush operation. Zero refcount
  179. * means that the page was freed prematurely. Non-zero mapcount is unusual,
  180. * but does not necessary means an error, thus marked as suspicious.
  181. */
  182. static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
  183. {
  184. unsigned tlbidx = w | (e << PAGE_SHIFT);
  185. unsigned r0 = dtlb ?
  186. read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
  187. unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
  188. unsigned pte = get_pte_for_vaddr(vpn);
  189. unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
  190. unsigned tlb_asid = r0 & ASID_MASK;
  191. bool kernel = tlb_asid == 1;
  192. int rc = 0;
  193. if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) {
  194. pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n",
  195. dtlb ? 'D' : 'I', w, e, vpn,
  196. kernel ? "kernel" : "user");
  197. rc |= TLB_INSANE;
  198. }
  199. if (tlb_asid == mm_asid) {
  200. unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) :
  201. read_itlb_translation(tlbidx);
  202. if ((pte ^ r1) & PAGE_MASK) {
  203. pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
  204. dtlb ? 'D' : 'I', w, e, r0, r1, pte);
  205. if (pte == 0 || !pte_present(__pte(pte))) {
  206. struct page *p = pfn_to_page(r1 >> PAGE_SHIFT);
  207. pr_err("page refcount: %d, mapcount: %d\n",
  208. page_count(p),
  209. page_mapcount(p));
  210. if (!page_count(p))
  211. rc |= TLB_INSANE;
  212. else if (page_mapcount(p))
  213. rc |= TLB_SUSPICIOUS;
  214. } else {
  215. rc |= TLB_INSANE;
  216. }
  217. }
  218. }
  219. return rc;
  220. }
  221. void check_tlb_sanity(void)
  222. {
  223. unsigned long flags;
  224. unsigned w, e;
  225. int bug = 0;
  226. local_irq_save(flags);
  227. for (w = 0; w < DTLB_ARF_WAYS; ++w)
  228. for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e)
  229. bug |= check_tlb_entry(w, e, true);
  230. for (w = 0; w < ITLB_ARF_WAYS; ++w)
  231. for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e)
  232. bug |= check_tlb_entry(w, e, false);
  233. if (bug & TLB_INSANE)
  234. tlb_insane();
  235. if (bug & TLB_SUSPICIOUS)
  236. tlb_suspicious();
  237. local_irq_restore(flags);
  238. }
  239. #endif /* CONFIG_DEBUG_TLB_SANITY */