mmu_context.h 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157
  1. /*
  2. * Switch an MMU context.
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * Copyright (C) 2001 - 2013 Tensilica Inc.
  9. */
  10. #ifndef _XTENSA_MMU_CONTEXT_H
  11. #define _XTENSA_MMU_CONTEXT_H
  12. #ifndef CONFIG_MMU
  13. #include <asm/nommu_context.h>
  14. #else
  15. #include <linux/stringify.h>
  16. #include <linux/sched.h>
  17. #include <asm/vectors.h>
  18. #include <asm/pgtable.h>
  19. #include <asm/cacheflush.h>
  20. #include <asm/tlbflush.h>
  21. #include <asm-generic/mm_hooks.h>
  22. #include <asm-generic/percpu.h>
  23. #if (XCHAL_HAVE_TLBS != 1)
  24. # error "Linux must have an MMU!"
  25. #endif
  26. DECLARE_PER_CPU(unsigned long, asid_cache);
  27. #define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu)
  28. /*
  29. * NO_CONTEXT is the invalid ASID value that we don't ever assign to
  30. * any user or kernel context. We use the reserved values in the
  31. * ASID_INSERT macro below.
  32. *
  33. * 0 invalid
  34. * 1 kernel
  35. * 2 reserved
  36. * 3 reserved
  37. * 4...255 available
  38. */
  39. #define NO_CONTEXT 0
  40. #define ASID_USER_FIRST 4
  41. #define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
  42. #define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8))
  43. void init_mmu(void);
  44. static inline void set_rasid_register (unsigned long val)
  45. {
  46. __asm__ __volatile__ (" wsr %0, rasid\n\t"
  47. " isync\n" : : "a" (val));
  48. }
  49. static inline unsigned long get_rasid_register (void)
  50. {
  51. unsigned long tmp;
  52. __asm__ __volatile__ (" rsr %0, rasid\n\t" : "=a" (tmp));
  53. return tmp;
  54. }
  55. static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu)
  56. {
  57. unsigned long asid = cpu_asid_cache(cpu);
  58. if ((++asid & ASID_MASK) == 0) {
  59. /*
  60. * Start new asid cycle; continue counting with next
  61. * incarnation bits; skipping over 0, 1, 2, 3.
  62. */
  63. local_flush_tlb_all();
  64. asid += ASID_USER_FIRST;
  65. }
  66. cpu_asid_cache(cpu) = asid;
  67. mm->context.asid[cpu] = asid;
  68. mm->context.cpu = cpu;
  69. }
  70. static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
  71. {
  72. /*
  73. * Check if our ASID is of an older version and thus invalid.
  74. */
  75. if (mm) {
  76. unsigned long asid = mm->context.asid[cpu];
  77. if (asid == NO_CONTEXT ||
  78. ((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK))
  79. get_new_mmu_context(mm, cpu);
  80. }
  81. }
  82. static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
  83. {
  84. get_mmu_context(mm, cpu);
  85. set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
  86. invalidate_page_directory();
  87. }
  88. /*
  89. * Initialize the context related info for a new mm_struct
  90. * instance. Valid cpu values are 0..(NR_CPUS-1), so initializing
  91. * to -1 says the process has never run on any core.
  92. */
  93. static inline int init_new_context(struct task_struct *tsk,
  94. struct mm_struct *mm)
  95. {
  96. int cpu;
  97. for_each_possible_cpu(cpu) {
  98. mm->context.asid[cpu] = NO_CONTEXT;
  99. }
  100. mm->context.cpu = -1;
  101. return 0;
  102. }
  103. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  104. struct task_struct *tsk)
  105. {
  106. unsigned int cpu = smp_processor_id();
  107. int migrated = next->context.cpu != cpu;
  108. /* Flush the icache if we migrated to a new core. */
  109. if (migrated) {
  110. __invalidate_icache_all();
  111. next->context.cpu = cpu;
  112. }
  113. if (migrated || prev != next)
  114. activate_context(next, cpu);
  115. }
  116. #define activate_mm(prev, next) switch_mm((prev), (next), NULL)
  117. #define deactivate_mm(tsk, mm) do { } while (0)
  118. /*
  119. * Destroy context related info for an mm_struct that is about
  120. * to be put to rest.
  121. */
  122. static inline void destroy_context(struct mm_struct *mm)
  123. {
  124. invalidate_page_directory();
  125. }
  126. static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
  127. {
  128. /* Nothing to do. */
  129. }
  130. #endif /* CONFIG_MMU */
  131. #endif /* _XTENSA_MMU_CONTEXT_H */