switch_to_32.h 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106
  1. #ifndef __SPARC_SWITCH_TO_H
  2. #define __SPARC_SWITCH_TO_H
  3. #include <asm/smp.h>
  4. extern struct thread_info *current_set[NR_CPUS];
  5. /*
  6. * Flush windows so that the VM switch which follows
  7. * would not pull the stack from under us.
  8. *
  9. * SWITCH_ENTER and SWITH_DO_LAZY_FPU do not work yet (e.g. SMP does not work)
  10. * XXX WTF is the above comment? Found in late teen 2.4.x.
  11. */
  12. #ifdef CONFIG_SMP
  13. #define SWITCH_ENTER(prv) \
  14. do { \
  15. if (test_tsk_thread_flag(prv, TIF_USEDFPU)) { \
  16. put_psr(get_psr() | PSR_EF); \
  17. fpsave(&(prv)->thread.float_regs[0], &(prv)->thread.fsr, \
  18. &(prv)->thread.fpqueue[0], &(prv)->thread.fpqdepth); \
  19. clear_tsk_thread_flag(prv, TIF_USEDFPU); \
  20. (prv)->thread.kregs->psr &= ~PSR_EF; \
  21. } \
  22. } while(0)
  23. #define SWITCH_DO_LAZY_FPU(next) /* */
  24. #else
  25. #define SWITCH_ENTER(prv) /* */
  26. #define SWITCH_DO_LAZY_FPU(nxt) \
  27. do { \
  28. if (last_task_used_math != (nxt)) \
  29. (nxt)->thread.kregs->psr&=~PSR_EF; \
  30. } while(0)
  31. #endif
  32. #define prepare_arch_switch(next) do { \
  33. __asm__ __volatile__( \
  34. ".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \
  35. "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
  36. "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
  37. "save %sp, -0x40, %sp\n\t" \
  38. "restore; restore; restore; restore; restore; restore; restore"); \
  39. } while(0)
  40. /* Much care has gone into this code, do not touch it.
  41. *
  42. * We need to loadup regs l0/l1 for the newly forked child
  43. * case because the trap return path relies on those registers
  44. * holding certain values, gcc is told that they are clobbered.
  45. * Gcc needs registers for 3 values in and 1 value out, so we
  46. * clobber every non-fixed-usage register besides l2/l3/o4/o5. -DaveM
  47. *
  48. * Hey Dave, that do not touch sign is too much of an incentive
  49. * - Anton & Pete
  50. */
  51. #define switch_to(prev, next, last) do { \
  52. SWITCH_ENTER(prev); \
  53. SWITCH_DO_LAZY_FPU(next); \
  54. cpumask_set_cpu(smp_processor_id(), mm_cpumask(next->active_mm)); \
  55. __asm__ __volatile__( \
  56. "sethi %%hi(here - 0x8), %%o7\n\t" \
  57. "mov %%g6, %%g3\n\t" \
  58. "or %%o7, %%lo(here - 0x8), %%o7\n\t" \
  59. "rd %%psr, %%g4\n\t" \
  60. "std %%sp, [%%g6 + %4]\n\t" \
  61. "rd %%wim, %%g5\n\t" \
  62. "wr %%g4, 0x20, %%psr\n\t" \
  63. "nop\n\t" \
  64. "std %%g4, [%%g6 + %3]\n\t" \
  65. "ldd [%2 + %3], %%g4\n\t" \
  66. "mov %2, %%g6\n\t" \
  67. ".globl patchme_store_new_current\n" \
  68. "patchme_store_new_current:\n\t" \
  69. "st %2, [%1]\n\t" \
  70. "wr %%g4, 0x20, %%psr\n\t" \
  71. "nop\n\t" \
  72. "nop\n\t" \
  73. "nop\n\t" /* LEON needs all 3 nops: load to %sp depends on CWP. */ \
  74. "ldd [%%g6 + %4], %%sp\n\t" \
  75. "wr %%g5, 0x0, %%wim\n\t" \
  76. "ldd [%%sp + 0x00], %%l0\n\t" \
  77. "ldd [%%sp + 0x38], %%i6\n\t" \
  78. "wr %%g4, 0x0, %%psr\n\t" \
  79. "nop\n\t" \
  80. "nop\n\t" \
  81. "jmpl %%o7 + 0x8, %%g0\n\t" \
  82. " ld [%%g3 + %5], %0\n\t" \
  83. "here:\n" \
  84. : "=&r" (last) \
  85. : "r" (&(current_set[hard_smp_processor_id()])), \
  86. "r" (task_thread_info(next)), \
  87. "i" (TI_KPSR), \
  88. "i" (TI_KSP), \
  89. "i" (TI_TASK) \
  90. : "g1", "g2", "g3", "g4", "g5", "g7", \
  91. "l0", "l1", "l3", "l4", "l5", "l6", "l7", \
  92. "i0", "i1", "i2", "i3", "i4", "i5", \
  93. "o0", "o1", "o2", "o3", "o7"); \
  94. } while(0)
  95. void fpsave(unsigned long *fpregs, unsigned long *fsr,
  96. void *fpqueue, unsigned long *fpqdepth);
  97. void synchronize_user_stack(void);
  98. #endif /* __SPARC_SWITCH_TO_H */