vgettimeofday.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439
  1. /*
  2. * Userspace implementations of gettimeofday() and friends.
  3. *
  4. * Copyright (C) 2017 Cavium, Inc.
  5. * Copyright (C) 2015 Mentor Graphics Corporation
  6. * Copyright (C) 2012 ARM Limited
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  19. *
  20. * Author: Will Deacon <[email protected]>
  21. * Rewriten from arch64 version into C by: Andrew Pinski <[email protected]>
  22. * Reworked and rebased over arm version by: Mark Salyzyn <[email protected]>
  23. */
  24. #include <asm/barrier.h>
  25. #include <linux/compiler.h> /* for notrace */
  26. #include <linux/math64.h> /* for __iter_div_u64_rem() */
  27. #include <uapi/linux/time.h> /* for struct timespec */
  28. #include "compiler.h"
  29. #include "datapage.h"
  30. #ifdef ARCH_PROVIDES_TIMER
  31. DEFINE_FALLBACK(gettimeofday, struct timeval *, tv, struct timezone *, tz)
  32. #endif
  33. DEFINE_FALLBACK(clock_gettime, clockid_t, clock, struct timespec *, ts)
  34. DEFINE_FALLBACK(clock_getres, clockid_t, clock, struct timespec *, ts)
  35. #ifdef USE_SYSCALL
  36. #if defined(__LP64__)
  37. # define USE_SYSCALL_MASK (USE_SYSCALL | USE_SYSCALL_64)
  38. #else
  39. # define USE_SYSCALL_MASK (USE_SYSCALL | USE_SYSCALL_32)
  40. #endif
  41. #else
  42. # define USE_SYSCALL_MASK ((uint32_t)-1)
  43. #endif
  44. static notrace u32 vdso_read_begin(const struct vdso_data *vd)
  45. {
  46. u32 seq;
  47. do {
  48. seq = READ_ONCE(vd->tb_seq_count);
  49. if ((seq & 1) == 0)
  50. break;
  51. cpu_relax();
  52. } while (true);
  53. smp_rmb(); /* Pairs with second smp_wmb in update_vsyscall */
  54. return seq;
  55. }
  56. static notrace int vdso_read_retry(const struct vdso_data *vd, u32 start)
  57. {
  58. u32 seq;
  59. smp_rmb(); /* Pairs with first smp_wmb in update_vsyscall */
  60. seq = READ_ONCE(vd->tb_seq_count);
  61. return seq != start;
  62. }
  63. static notrace int do_realtime_coarse(const struct vdso_data *vd,
  64. struct timespec *ts)
  65. {
  66. u32 seq;
  67. do {
  68. seq = vdso_read_begin(vd);
  69. ts->tv_sec = vd->xtime_coarse_sec;
  70. ts->tv_nsec = vd->xtime_coarse_nsec;
  71. } while (vdso_read_retry(vd, seq));
  72. return 0;
  73. }
  74. static notrace int do_monotonic_coarse(const struct vdso_data *vd,
  75. struct timespec *ts)
  76. {
  77. struct timespec tomono;
  78. u32 seq;
  79. u64 nsec;
  80. do {
  81. seq = vdso_read_begin(vd);
  82. ts->tv_sec = vd->xtime_coarse_sec;
  83. ts->tv_nsec = vd->xtime_coarse_nsec;
  84. tomono.tv_sec = vd->wtm_clock_sec;
  85. tomono.tv_nsec = vd->wtm_clock_nsec;
  86. } while (vdso_read_retry(vd, seq));
  87. ts->tv_sec += tomono.tv_sec;
  88. /* open coding timespec_add_ns */
  89. ts->tv_sec += __iter_div_u64_rem(ts->tv_nsec + tomono.tv_nsec,
  90. NSEC_PER_SEC, &nsec);
  91. ts->tv_nsec = nsec;
  92. return 0;
  93. }
  94. #ifdef ARCH_PROVIDES_TIMER
  95. /*
  96. * Returns the clock delta, in nanoseconds left-shifted by the clock
  97. * shift.
  98. */
  99. static notrace u64 get_clock_shifted_nsec(const u64 cycle_last,
  100. const u32 mult,
  101. const u64 mask)
  102. {
  103. u64 res;
  104. /* Read the virtual counter. */
  105. res = arch_vdso_read_counter();
  106. res = res - cycle_last;
  107. res &= mask;
  108. return res * mult;
  109. }
  110. static notrace int do_realtime(const struct vdso_data *vd, struct timespec *ts)
  111. {
  112. u32 seq, mult, shift;
  113. u64 nsec, cycle_last;
  114. #ifdef ARCH_CLOCK_FIXED_MASK
  115. static const u64 mask = ARCH_CLOCK_FIXED_MASK;
  116. #else
  117. u64 mask;
  118. #endif
  119. vdso_xtime_clock_sec_t sec;
  120. do {
  121. seq = vdso_read_begin(vd);
  122. if (vd->use_syscall & USE_SYSCALL_MASK)
  123. return -1;
  124. cycle_last = vd->cs_cycle_last;
  125. mult = vd->cs_mono_mult;
  126. shift = vd->cs_shift;
  127. #ifndef ARCH_CLOCK_FIXED_MASK
  128. mask = vd->cs_mask;
  129. #endif
  130. sec = vd->xtime_clock_sec;
  131. nsec = vd->xtime_clock_snsec;
  132. } while (unlikely(vdso_read_retry(vd, seq)));
  133. nsec += get_clock_shifted_nsec(cycle_last, mult, mask);
  134. nsec >>= shift;
  135. /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
  136. ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
  137. ts->tv_nsec = nsec;
  138. return 0;
  139. }
  140. static notrace int do_monotonic(const struct vdso_data *vd, struct timespec *ts)
  141. {
  142. u32 seq, mult, shift;
  143. u64 nsec, cycle_last;
  144. #ifdef ARCH_CLOCK_FIXED_MASK
  145. static const u64 mask = ARCH_CLOCK_FIXED_MASK;
  146. #else
  147. u64 mask;
  148. #endif
  149. vdso_wtm_clock_nsec_t wtm_nsec;
  150. __kernel_time_t sec;
  151. do {
  152. seq = vdso_read_begin(vd);
  153. if (vd->use_syscall & USE_SYSCALL_MASK)
  154. return -1;
  155. cycle_last = vd->cs_cycle_last;
  156. mult = vd->cs_mono_mult;
  157. shift = vd->cs_shift;
  158. #ifndef ARCH_CLOCK_FIXED_MASK
  159. mask = vd->cs_mask;
  160. #endif
  161. sec = vd->xtime_clock_sec;
  162. nsec = vd->xtime_clock_snsec;
  163. sec += vd->wtm_clock_sec;
  164. wtm_nsec = vd->wtm_clock_nsec;
  165. } while (unlikely(vdso_read_retry(vd, seq)));
  166. nsec += get_clock_shifted_nsec(cycle_last, mult, mask);
  167. nsec >>= shift;
  168. nsec += wtm_nsec;
  169. /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
  170. ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
  171. ts->tv_nsec = nsec;
  172. return 0;
  173. }
  174. static notrace int do_monotonic_raw(const struct vdso_data *vd,
  175. struct timespec *ts)
  176. {
  177. u32 seq, mult, shift;
  178. u64 nsec, cycle_last;
  179. #ifdef ARCH_CLOCK_FIXED_MASK
  180. static const u64 mask = ARCH_CLOCK_FIXED_MASK;
  181. #else
  182. u64 mask;
  183. #endif
  184. vdso_raw_time_sec_t sec;
  185. do {
  186. seq = vdso_read_begin(vd);
  187. if (vd->use_syscall & USE_SYSCALL_MASK)
  188. return -1;
  189. cycle_last = vd->cs_cycle_last;
  190. mult = vd->cs_raw_mult;
  191. shift = vd->cs_shift;
  192. #ifndef ARCH_CLOCK_FIXED_MASK
  193. mask = vd->cs_mask;
  194. #endif
  195. sec = vd->raw_time_sec;
  196. nsec = vd->raw_time_nsec;
  197. } while (unlikely(vdso_read_retry(vd, seq)));
  198. nsec += get_clock_shifted_nsec(cycle_last, mult, mask);
  199. nsec >>= shift;
  200. /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
  201. ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
  202. ts->tv_nsec = nsec;
  203. return 0;
  204. }
  205. static notrace int do_boottime(const struct vdso_data *vd, struct timespec *ts)
  206. {
  207. u32 seq, mult, shift;
  208. u64 nsec, cycle_last;
  209. vdso_wtm_clock_nsec_t wtm_nsec;
  210. #ifdef ARCH_CLOCK_FIXED_MASK
  211. static const u64 mask = ARCH_CLOCK_FIXED_MASK;
  212. #else
  213. u64 mask;
  214. #endif
  215. __kernel_time_t sec;
  216. do {
  217. seq = vdso_read_begin(vd);
  218. if (vd->use_syscall & USE_SYSCALL_MASK)
  219. return -1;
  220. cycle_last = vd->cs_cycle_last;
  221. mult = vd->cs_mono_mult;
  222. shift = vd->cs_shift;
  223. #ifndef ARCH_CLOCK_FIXED_MASK
  224. mask = vd->cs_mask;
  225. #endif
  226. sec = vd->xtime_clock_sec;
  227. nsec = vd->xtime_clock_snsec;
  228. sec += vd->wtm_clock_sec + vd->btm_sec;
  229. wtm_nsec = vd->wtm_clock_nsec + vd->btm_nsec;
  230. } while (unlikely(vdso_read_retry(vd, seq)));
  231. nsec += get_clock_shifted_nsec(cycle_last, mult, mask);
  232. nsec >>= shift;
  233. nsec += wtm_nsec;
  234. /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
  235. ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
  236. ts->tv_nsec = nsec;
  237. return 0;
  238. }
  239. #endif /* ARCH_PROVIDES_TIMER */
  240. notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
  241. {
  242. const struct vdso_data *vd = __get_datapage();
  243. #ifdef USE_SYSCALL
  244. if (vd->use_syscall & USE_SYSCALL_MASK) {
  245. goto fallback;
  246. }
  247. #endif
  248. switch (clock) {
  249. case CLOCK_REALTIME_COARSE:
  250. do_realtime_coarse(vd, ts);
  251. break;
  252. case CLOCK_MONOTONIC_COARSE:
  253. do_monotonic_coarse(vd, ts);
  254. break;
  255. #ifdef ARCH_PROVIDES_TIMER
  256. case CLOCK_REALTIME:
  257. if (do_realtime(vd, ts))
  258. goto fallback;
  259. break;
  260. case CLOCK_MONOTONIC:
  261. if (do_monotonic(vd, ts))
  262. goto fallback;
  263. break;
  264. case CLOCK_MONOTONIC_RAW:
  265. if (do_monotonic_raw(vd, ts))
  266. goto fallback;
  267. break;
  268. case CLOCK_BOOTTIME:
  269. if (do_boottime(vd, ts))
  270. goto fallback;
  271. break;
  272. #endif
  273. default:
  274. goto fallback;
  275. }
  276. return 0;
  277. fallback:
  278. return clock_gettime_fallback(clock, ts);
  279. }
  280. #ifdef ARCH_PROVIDES_TIMER
  281. notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
  282. {
  283. const struct vdso_data *vd = __get_datapage();
  284. if (likely(tv != NULL)) {
  285. struct timespec ts;
  286. if (do_realtime(vd, &ts))
  287. return gettimeofday_fallback(tv, tz);
  288. tv->tv_sec = ts.tv_sec;
  289. tv->tv_usec = ts.tv_nsec / 1000;
  290. }
  291. if (unlikely(tz != NULL)) {
  292. tz->tz_minuteswest = vd->tz_minuteswest;
  293. tz->tz_dsttime = vd->tz_dsttime;
  294. }
  295. return 0;
  296. }
  297. #endif
  298. int __vdso_clock_getres(clockid_t clock, struct timespec *res)
  299. {
  300. long nsec;
  301. #ifdef USE_SYSCALL
  302. const struct vdso_data *vd = __get_datapage();
  303. if (vd->use_syscall & USE_SYSCALL_MASK) {
  304. return clock_getres_fallback(clock, res);
  305. }
  306. #endif
  307. switch (clock) {
  308. case CLOCK_REALTIME_COARSE:
  309. case CLOCK_MONOTONIC_COARSE:
  310. nsec = LOW_RES_NSEC;
  311. break;
  312. #ifdef ARCH_PROVIDES_TIMER
  313. case CLOCK_REALTIME:
  314. case CLOCK_MONOTONIC:
  315. case CLOCK_MONOTONIC_RAW:
  316. case CLOCK_BOOTTIME:
  317. nsec = MONOTONIC_RES_NSEC;
  318. break;
  319. #endif
  320. default:
  321. return clock_getres_fallback(clock, res);
  322. }
  323. if (likely(res != NULL)) {
  324. res->tv_sec = 0;
  325. res->tv_nsec = nsec;
  326. }
  327. return 0;
  328. }
  329. notrace time_t __vdso_time(time_t *t)
  330. {
  331. const struct vdso_data *vd = __get_datapage();
  332. #ifdef USE_SYSCALL
  333. time_t result;
  334. if (vd->use_syscall & USE_SYSCALL_MASK) {
  335. /* Facsimile of syscall implementation (faster by a few ns) */
  336. struct timeval tv;
  337. int ret = gettimeofday_fallback(&tv, NULL);
  338. if (ret < 0)
  339. return ret;
  340. result = tv.tv_sec;
  341. } else {
  342. result = READ_ONCE(vd->xtime_coarse_sec);
  343. }
  344. #else
  345. time_t result = READ_ONCE(vd->xtime_coarse_sec);
  346. #endif
  347. if (t)
  348. *t = result;
  349. return result;
  350. }