spitfire.h 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351
  1. /* spitfire.h: SpitFire/BlackBird/Cheetah inline MMU operations.
  2. *
  3. * Copyright (C) 1996 David S. Miller ([email protected])
  4. */
  5. #ifndef _SPARC64_SPITFIRE_H
  6. #define _SPARC64_SPITFIRE_H
  7. #ifdef CONFIG_SPARC64
  8. #include <asm/asi.h>
  9. /* The following register addresses are accessible via ASI_DMMU
  10. * and ASI_IMMU, that is there is a distinct and unique copy of
  11. * each these registers for each TLB.
  12. */
  13. #define TSB_TAG_TARGET 0x0000000000000000 /* All chips */
  14. #define TLB_SFSR 0x0000000000000018 /* All chips */
  15. #define TSB_REG 0x0000000000000028 /* All chips */
  16. #define TLB_TAG_ACCESS 0x0000000000000030 /* All chips */
  17. #define VIRT_WATCHPOINT 0x0000000000000038 /* All chips */
  18. #define PHYS_WATCHPOINT 0x0000000000000040 /* All chips */
  19. #define TSB_EXTENSION_P 0x0000000000000048 /* Ultra-III and later */
  20. #define TSB_EXTENSION_S 0x0000000000000050 /* Ultra-III and later, D-TLB only */
  21. #define TSB_EXTENSION_N 0x0000000000000058 /* Ultra-III and later */
  22. #define TLB_TAG_ACCESS_EXT 0x0000000000000060 /* Ultra-III+ and later */
  23. /* These registers only exist as one entity, and are accessed
  24. * via ASI_DMMU only.
  25. */
  26. #define PRIMARY_CONTEXT 0x0000000000000008
  27. #define SECONDARY_CONTEXT 0x0000000000000010
  28. #define DMMU_SFAR 0x0000000000000020
  29. #define VIRT_WATCHPOINT 0x0000000000000038
  30. #define PHYS_WATCHPOINT 0x0000000000000040
  31. #define SPITFIRE_HIGHEST_LOCKED_TLBENT (64 - 1)
  32. #define CHEETAH_HIGHEST_LOCKED_TLBENT (16 - 1)
  33. #define L1DCACHE_SIZE 0x4000
  34. #define SUN4V_CHIP_INVALID 0x00
  35. #define SUN4V_CHIP_NIAGARA1 0x01
  36. #define SUN4V_CHIP_NIAGARA2 0x02
  37. #define SUN4V_CHIP_NIAGARA3 0x03
  38. #define SUN4V_CHIP_NIAGARA4 0x04
  39. #define SUN4V_CHIP_NIAGARA5 0x05
  40. #define SUN4V_CHIP_SPARC_M6 0x06
  41. #define SUN4V_CHIP_SPARC_M7 0x07
  42. #define SUN4V_CHIP_SPARC64X 0x8a
  43. #define SUN4V_CHIP_SPARC_SN 0x8b
  44. #define SUN4V_CHIP_UNKNOWN 0xff
  45. #ifndef __ASSEMBLY__
  46. enum ultra_tlb_layout {
  47. spitfire = 0,
  48. cheetah = 1,
  49. cheetah_plus = 2,
  50. hypervisor = 3,
  51. };
  52. extern enum ultra_tlb_layout tlb_type;
  53. extern int sun4v_chip_type;
  54. extern int cheetah_pcache_forced_on;
  55. void cheetah_enable_pcache(void);
  56. #define sparc64_highest_locked_tlbent() \
  57. (tlb_type == spitfire ? \
  58. SPITFIRE_HIGHEST_LOCKED_TLBENT : \
  59. CHEETAH_HIGHEST_LOCKED_TLBENT)
  60. extern int num_kernel_image_mappings;
  61. /* The data cache is write through, so this just invalidates the
  62. * specified line.
  63. */
  64. static inline void spitfire_put_dcache_tag(unsigned long addr, unsigned long tag)
  65. {
  66. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  67. "membar #Sync"
  68. : /* No outputs */
  69. : "r" (tag), "r" (addr), "i" (ASI_DCACHE_TAG));
  70. }
  71. /* The instruction cache lines are flushed with this, but note that
  72. * this does not flush the pipeline. It is possible for a line to
  73. * get flushed but stale instructions to still be in the pipeline,
  74. * a flush instruction (to any address) is sufficient to handle
  75. * this issue after the line is invalidated.
  76. */
  77. static inline void spitfire_put_icache_tag(unsigned long addr, unsigned long tag)
  78. {
  79. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  80. "membar #Sync"
  81. : /* No outputs */
  82. : "r" (tag), "r" (addr), "i" (ASI_IC_TAG));
  83. }
  84. static inline unsigned long spitfire_get_dtlb_data(int entry)
  85. {
  86. unsigned long data;
  87. __asm__ __volatile__("ldxa [%1] %2, %0"
  88. : "=r" (data)
  89. : "r" (entry << 3), "i" (ASI_DTLB_DATA_ACCESS));
  90. /* Clear TTE diag bits. */
  91. data &= ~0x0003fe0000000000UL;
  92. return data;
  93. }
  94. static inline unsigned long spitfire_get_dtlb_tag(int entry)
  95. {
  96. unsigned long tag;
  97. __asm__ __volatile__("ldxa [%1] %2, %0"
  98. : "=r" (tag)
  99. : "r" (entry << 3), "i" (ASI_DTLB_TAG_READ));
  100. return tag;
  101. }
  102. static inline void spitfire_put_dtlb_data(int entry, unsigned long data)
  103. {
  104. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  105. "membar #Sync"
  106. : /* No outputs */
  107. : "r" (data), "r" (entry << 3),
  108. "i" (ASI_DTLB_DATA_ACCESS));
  109. }
  110. static inline unsigned long spitfire_get_itlb_data(int entry)
  111. {
  112. unsigned long data;
  113. __asm__ __volatile__("ldxa [%1] %2, %0"
  114. : "=r" (data)
  115. : "r" (entry << 3), "i" (ASI_ITLB_DATA_ACCESS));
  116. /* Clear TTE diag bits. */
  117. data &= ~0x0003fe0000000000UL;
  118. return data;
  119. }
  120. static inline unsigned long spitfire_get_itlb_tag(int entry)
  121. {
  122. unsigned long tag;
  123. __asm__ __volatile__("ldxa [%1] %2, %0"
  124. : "=r" (tag)
  125. : "r" (entry << 3), "i" (ASI_ITLB_TAG_READ));
  126. return tag;
  127. }
  128. static inline void spitfire_put_itlb_data(int entry, unsigned long data)
  129. {
  130. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  131. "membar #Sync"
  132. : /* No outputs */
  133. : "r" (data), "r" (entry << 3),
  134. "i" (ASI_ITLB_DATA_ACCESS));
  135. }
  136. static inline void spitfire_flush_dtlb_nucleus_page(unsigned long page)
  137. {
  138. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  139. "membar #Sync"
  140. : /* No outputs */
  141. : "r" (page | 0x20), "i" (ASI_DMMU_DEMAP));
  142. }
  143. static inline void spitfire_flush_itlb_nucleus_page(unsigned long page)
  144. {
  145. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  146. "membar #Sync"
  147. : /* No outputs */
  148. : "r" (page | 0x20), "i" (ASI_IMMU_DEMAP));
  149. }
  150. /* Cheetah has "all non-locked" tlb flushes. */
  151. static inline void cheetah_flush_dtlb_all(void)
  152. {
  153. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  154. "membar #Sync"
  155. : /* No outputs */
  156. : "r" (0x80), "i" (ASI_DMMU_DEMAP));
  157. }
  158. static inline void cheetah_flush_itlb_all(void)
  159. {
  160. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  161. "membar #Sync"
  162. : /* No outputs */
  163. : "r" (0x80), "i" (ASI_IMMU_DEMAP));
  164. }
  165. /* Cheetah has a 4-tlb layout so direct access is a bit different.
  166. * The first two TLBs are fully assosciative, hold 16 entries, and are
  167. * used only for locked and >8K sized translations. One exists for
  168. * data accesses and one for instruction accesses.
  169. *
  170. * The third TLB is for data accesses to 8K non-locked translations, is
  171. * 2 way assosciative, and holds 512 entries. The fourth TLB is for
  172. * instruction accesses to 8K non-locked translations, is 2 way
  173. * assosciative, and holds 128 entries.
  174. *
  175. * Cheetah has some bug where bogus data can be returned from
  176. * ASI_{D,I}TLB_DATA_ACCESS loads, doing the load twice fixes
  177. * the problem for me. -DaveM
  178. */
  179. static inline unsigned long cheetah_get_ldtlb_data(int entry)
  180. {
  181. unsigned long data;
  182. __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
  183. "ldxa [%1] %2, %0"
  184. : "=r" (data)
  185. : "r" ((0 << 16) | (entry << 3)),
  186. "i" (ASI_DTLB_DATA_ACCESS));
  187. return data;
  188. }
  189. static inline unsigned long cheetah_get_litlb_data(int entry)
  190. {
  191. unsigned long data;
  192. __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
  193. "ldxa [%1] %2, %0"
  194. : "=r" (data)
  195. : "r" ((0 << 16) | (entry << 3)),
  196. "i" (ASI_ITLB_DATA_ACCESS));
  197. return data;
  198. }
  199. static inline unsigned long cheetah_get_ldtlb_tag(int entry)
  200. {
  201. unsigned long tag;
  202. __asm__ __volatile__("ldxa [%1] %2, %0"
  203. : "=r" (tag)
  204. : "r" ((0 << 16) | (entry << 3)),
  205. "i" (ASI_DTLB_TAG_READ));
  206. return tag;
  207. }
  208. static inline unsigned long cheetah_get_litlb_tag(int entry)
  209. {
  210. unsigned long tag;
  211. __asm__ __volatile__("ldxa [%1] %2, %0"
  212. : "=r" (tag)
  213. : "r" ((0 << 16) | (entry << 3)),
  214. "i" (ASI_ITLB_TAG_READ));
  215. return tag;
  216. }
  217. static inline void cheetah_put_ldtlb_data(int entry, unsigned long data)
  218. {
  219. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  220. "membar #Sync"
  221. : /* No outputs */
  222. : "r" (data),
  223. "r" ((0 << 16) | (entry << 3)),
  224. "i" (ASI_DTLB_DATA_ACCESS));
  225. }
  226. static inline void cheetah_put_litlb_data(int entry, unsigned long data)
  227. {
  228. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  229. "membar #Sync"
  230. : /* No outputs */
  231. : "r" (data),
  232. "r" ((0 << 16) | (entry << 3)),
  233. "i" (ASI_ITLB_DATA_ACCESS));
  234. }
  235. static inline unsigned long cheetah_get_dtlb_data(int entry, int tlb)
  236. {
  237. unsigned long data;
  238. __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
  239. "ldxa [%1] %2, %0"
  240. : "=r" (data)
  241. : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_DATA_ACCESS));
  242. return data;
  243. }
  244. static inline unsigned long cheetah_get_dtlb_tag(int entry, int tlb)
  245. {
  246. unsigned long tag;
  247. __asm__ __volatile__("ldxa [%1] %2, %0"
  248. : "=r" (tag)
  249. : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_TAG_READ));
  250. return tag;
  251. }
  252. static inline void cheetah_put_dtlb_data(int entry, unsigned long data, int tlb)
  253. {
  254. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  255. "membar #Sync"
  256. : /* No outputs */
  257. : "r" (data),
  258. "r" ((tlb << 16) | (entry << 3)),
  259. "i" (ASI_DTLB_DATA_ACCESS));
  260. }
  261. static inline unsigned long cheetah_get_itlb_data(int entry)
  262. {
  263. unsigned long data;
  264. __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
  265. "ldxa [%1] %2, %0"
  266. : "=r" (data)
  267. : "r" ((2 << 16) | (entry << 3)),
  268. "i" (ASI_ITLB_DATA_ACCESS));
  269. return data;
  270. }
  271. static inline unsigned long cheetah_get_itlb_tag(int entry)
  272. {
  273. unsigned long tag;
  274. __asm__ __volatile__("ldxa [%1] %2, %0"
  275. : "=r" (tag)
  276. : "r" ((2 << 16) | (entry << 3)), "i" (ASI_ITLB_TAG_READ));
  277. return tag;
  278. }
  279. static inline void cheetah_put_itlb_data(int entry, unsigned long data)
  280. {
  281. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  282. "membar #Sync"
  283. : /* No outputs */
  284. : "r" (data), "r" ((2 << 16) | (entry << 3)),
  285. "i" (ASI_ITLB_DATA_ACCESS));
  286. }
  287. #endif /* !(__ASSEMBLY__) */
  288. #endif /* CONFIG_SPARC64 */
  289. #endif /* !(_SPARC64_SPITFIRE_H) */