msm_remote_spinlock.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602
  1. /* Copyright (c) 2008-2009, 2011-2016 The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #include <linux/err.h>
  14. #include <linux/kernel.h>
  15. #include <linux/string.h>
  16. #include <linux/delay.h>
  17. #include <linux/of.h>
  18. #include <linux/of_address.h>
  19. #include <linux/msm_remote_spinlock.h>
  20. #include <linux/slab.h>
  21. #include <soc/qcom/smem.h>
  22. /**
  23. * The local processor (APPS) is PID 0, but because 0 is reserved for an empty
  24. * lock, the value PID + 1 is used as the APSS token when writing to the lock.
  25. */
  26. #define SPINLOCK_TOKEN_APPS 1
  27. static int is_hw_lock_type;
  28. static DEFINE_MUTEX(ops_init_lock);
  29. struct spinlock_ops {
  30. void (*lock)(raw_remote_spinlock_t *lock);
  31. void (*unlock)(raw_remote_spinlock_t *lock);
  32. int (*trylock)(raw_remote_spinlock_t *lock);
  33. int (*release)(raw_remote_spinlock_t *lock, uint32_t pid);
  34. int (*owner)(raw_remote_spinlock_t *lock);
  35. void (*lock_rlock_id)(raw_remote_spinlock_t *lock, uint32_t tid);
  36. void (*unlock_rlock)(raw_remote_spinlock_t *lock);
  37. int (*get_hw_spinlocks_element)(raw_remote_spinlock_t *lock);
  38. };
  39. static struct spinlock_ops current_ops;
  40. static int remote_spinlock_init_address(int id, _remote_spinlock_t *lock);
  41. /* ldrex implementation ----------------------------------------------------- */
  42. static char *ldrex_compatible_string = "qcom,ipc-spinlock-ldrex";
  43. #ifdef CONFIG_ARM
  44. static void __raw_remote_ex_spin_lock(raw_remote_spinlock_t *lock)
  45. {
  46. unsigned long tmp;
  47. __asm__ __volatile__(
  48. "1: ldrex %0, [%1]\n"
  49. " teq %0, #0\n"
  50. " strexeq %0, %2, [%1]\n"
  51. " teqeq %0, #0\n"
  52. " bne 1b"
  53. : "=&r" (tmp)
  54. : "r" (&lock->lock), "r" (SPINLOCK_TOKEN_APPS)
  55. : "cc");
  56. /*
  57. * Ensure the ordering of read/write operations to ensure the
  58. * proper ownership of the lock during the lock/unlock operations
  59. */
  60. smp_mb();
  61. }
  62. static int __raw_remote_ex_spin_trylock(raw_remote_spinlock_t *lock)
  63. {
  64. unsigned long tmp;
  65. __asm__ __volatile__(
  66. " ldrex %0, [%1]\n"
  67. " teq %0, #0\n"
  68. " strexeq %0, %2, [%1]\n"
  69. : "=&r" (tmp)
  70. : "r" (&lock->lock), "r" (SPINLOCK_TOKEN_APPS)
  71. : "cc");
  72. if (tmp == 0) {
  73. /*
  74. * Ensure the ordering of read/write operations to ensure the
  75. * proper ownership of the lock during the lock/unlock
  76. * operations
  77. */
  78. smp_mb();
  79. return 1;
  80. }
  81. return 0;
  82. }
  83. static void __raw_remote_ex_spin_unlock(raw_remote_spinlock_t *lock)
  84. {
  85. int lock_owner;
  86. /*
  87. * Ensure the ordering of read/write operations to ensure the
  88. * proper ownership of the lock during the lock/unlock operations
  89. */
  90. smp_mb();
  91. lock_owner = readl_relaxed(&lock->lock);
  92. if (lock_owner != SPINLOCK_TOKEN_APPS) {
  93. pr_err("%s: spinlock not owned by Apps (actual owner is %d)\n",
  94. __func__, lock_owner);
  95. }
  96. __asm__ __volatile__(
  97. " str %1, [%0]\n"
  98. :
  99. : "r" (&lock->lock), "r" (0)
  100. : "cc");
  101. }
  102. #else
  103. static void __raw_remote_ex_spin_lock(raw_remote_spinlock_t *lock)
  104. {
  105. }
  106. static int __raw_remote_ex_spin_trylock(raw_remote_spinlock_t *lock)
  107. {
  108. return 0;
  109. }
  110. static void __raw_remote_ex_spin_unlock(raw_remote_spinlock_t *lock)
  111. {
  112. }
  113. #endif /* CONFIG_ARM */
  114. /* end ldrex implementation ------------------------------------------------- */
  115. /* sfpb implementation ------------------------------------------------------ */
  116. static uint32_t lock_count;
  117. static phys_addr_t reg_base;
  118. static uint32_t reg_size;
  119. static uint32_t lock_offset; /* offset into the hardware block before lock 0 */
  120. static uint32_t lock_size;
  121. static void *hw_mutex_reg_base;
  122. static DEFINE_MUTEX(hw_map_init_lock);
  123. static int *hw_spinlocks;
  124. static char *sfpb_compatible_string = "qcom,ipc-spinlock-sfpb";
  125. static int init_hw_mutex(struct device_node *node)
  126. {
  127. struct resource r;
  128. int rc;
  129. rc = of_address_to_resource(node, 0, &r);
  130. if (rc)
  131. BUG();
  132. rc = of_property_read_u32(node, "qcom,num-locks", &lock_count);
  133. if (rc)
  134. BUG();
  135. reg_base = r.start;
  136. reg_size = (uint32_t)(resource_size(&r));
  137. lock_offset = 0;
  138. lock_size = reg_size / lock_count;
  139. return 0;
  140. }
  141. static void find_and_init_hw_mutex(void)
  142. {
  143. struct device_node *node;
  144. node = of_find_compatible_node(NULL, NULL, sfpb_compatible_string);
  145. BUG_ON(node == NULL);
  146. init_hw_mutex(node);
  147. hw_mutex_reg_base = ioremap(reg_base, reg_size);
  148. BUG_ON(hw_mutex_reg_base == NULL);
  149. hw_spinlocks = kcalloc(lock_count, sizeof(int), GFP_KERNEL);
  150. BUG_ON(hw_spinlocks == NULL);
  151. }
  152. static int remote_spinlock_init_address_hw(int id, _remote_spinlock_t *lock)
  153. {
  154. /*
  155. * Optimistic locking. Init only needs to be done once by the first
  156. * caller. After that, serializing inits between different callers
  157. * is unnecessary. The second check after the lock ensures init
  158. * wasn't previously completed by someone else before the lock could
  159. * be grabbed.
  160. */
  161. if (!hw_mutex_reg_base) {
  162. mutex_lock(&hw_map_init_lock);
  163. if (!hw_mutex_reg_base)
  164. find_and_init_hw_mutex();
  165. mutex_unlock(&hw_map_init_lock);
  166. }
  167. if (id >= lock_count)
  168. return -EINVAL;
  169. *lock = hw_mutex_reg_base + lock_offset + id * lock_size;
  170. return 0;
  171. }
  172. static unsigned int remote_spinlock_get_lock_id(raw_remote_spinlock_t *lock)
  173. {
  174. unsigned int id;
  175. BUG_ON((uintptr_t)lock < (uintptr_t)hw_mutex_reg_base);
  176. BUG_ON(((uintptr_t)lock - (uintptr_t)hw_mutex_reg_base) < lock_offset);
  177. id = (unsigned int)((uintptr_t)lock - (uintptr_t)hw_mutex_reg_base -
  178. lock_offset) / lock_size;
  179. BUG_ON(id >= lock_count);
  180. return id;
  181. }
  182. static void __raw_remote_sfpb_spin_lock(raw_remote_spinlock_t *lock)
  183. {
  184. int owner;
  185. unsigned int id = remote_spinlock_get_lock_id(lock);
  186. /*
  187. * Wait for other local processor task to release spinlock if it
  188. * already has the remote spinlock locked. This can only happen in
  189. * test cases since the local spinlock will prevent this when using the
  190. * public APIs.
  191. */
  192. while (readl_relaxed(lock) == SPINLOCK_TOKEN_APPS)
  193. ;
  194. /* acquire remote spinlock */
  195. do {
  196. writel_relaxed(SPINLOCK_TOKEN_APPS, lock);
  197. /*
  198. * Ensure the ordering of read/write operations to ensure the
  199. * proper ownership of the lock during the lock/unlock
  200. * operations
  201. */
  202. smp_mb();
  203. owner = readl_relaxed(lock);
  204. hw_spinlocks[id] = owner;
  205. } while (owner != SPINLOCK_TOKEN_APPS);
  206. }
  207. static int __raw_remote_sfpb_spin_trylock(raw_remote_spinlock_t *lock)
  208. {
  209. int owner;
  210. unsigned int id = remote_spinlock_get_lock_id(lock);
  211. /*
  212. * If the local processor owns the spinlock, return failure. This can
  213. * only happen in test cases since the local spinlock will prevent this
  214. * when using the public APIs.
  215. */
  216. if (readl_relaxed(lock) == SPINLOCK_TOKEN_APPS)
  217. return 0;
  218. writel_relaxed(SPINLOCK_TOKEN_APPS, lock);
  219. /*
  220. * Ensure the ordering of read/write operations to ensure the
  221. * proper ownership of the lock during the lock/unlock operations
  222. */
  223. smp_mb();
  224. owner = readl_relaxed(lock);
  225. hw_spinlocks[id] = owner;
  226. return owner == SPINLOCK_TOKEN_APPS;
  227. }
  228. static void __raw_remote_sfpb_spin_unlock(raw_remote_spinlock_t *lock)
  229. {
  230. int lock_owner;
  231. lock_owner = readl_relaxed(lock);
  232. if (lock_owner != SPINLOCK_TOKEN_APPS) {
  233. pr_err("%s: spinlock not owned by Apps (actual owner is %d)\n",
  234. __func__, lock_owner);
  235. }
  236. writel_relaxed(0, lock);
  237. /*
  238. * Ensure the ordering of read/write operations to ensure the
  239. * proper ownership of the lock during the lock/unlock operations
  240. */
  241. smp_mb();
  242. }
  243. static void __raw_remote_sfpb_spin_lock_rlock_id(raw_remote_spinlock_t *lock,
  244. uint32_t tid)
  245. {
  246. if (unlikely(!tid)) {
  247. pr_err("%s: unsupported rlock tid=0\n", __func__);
  248. BUG();
  249. }
  250. do {
  251. writel_relaxed(tid, lock);
  252. /*
  253. * Ensure the ordering of read/write operations to ensure the
  254. * proper ownership of the lock during the lock/unlock
  255. * operations
  256. */
  257. smp_mb();
  258. } while (readl_relaxed(lock) != tid);
  259. }
  260. static void __raw_remote_sfpb_spin_unlock_rlock(raw_remote_spinlock_t *lock)
  261. {
  262. writel_relaxed(0, lock);
  263. /*
  264. * Ensure the ordering of read/write operations to ensure the
  265. * proper ownership of the lock during the lock/unlock operations
  266. */
  267. smp_mb();
  268. }
  269. static int __raw_remote_sfpb_get_hw_spinlocks_element(
  270. raw_remote_spinlock_t *lock)
  271. {
  272. return hw_spinlocks[remote_spinlock_get_lock_id(lock)];
  273. }
  274. /* end sfpb implementation -------------------------------------------------- */
  275. /* common spinlock API ------------------------------------------------------ */
  276. /**
  277. * Release spinlock if it is owned by @pid.
  278. *
  279. * This is only to be used for situations where the processor owning
  280. * the spinlock has crashed and the spinlock must be released.
  281. *
  282. * @lock: lock structure
  283. * @pid: processor ID of processor to release
  284. */
  285. static int __raw_remote_gen_spin_release(raw_remote_spinlock_t *lock,
  286. uint32_t pid)
  287. {
  288. int ret = 1;
  289. /*
  290. * Since 0 is reserved for an empty lock and the PIDs start at 0, the
  291. * value PID + 1 is written to the lock.
  292. */
  293. if (readl_relaxed(&lock->lock) == (pid + 1)) {
  294. writel_relaxed(0, &lock->lock);
  295. /*
  296. * Ensure the ordering of read/write operations to ensure the
  297. * proper ownership of the lock during the lock/unlock
  298. * operations
  299. */
  300. wmb();
  301. ret = 0;
  302. }
  303. return ret;
  304. }
  305. /**
  306. * Return owner of the spinlock.
  307. *
  308. * @lock: pointer to lock structure
  309. * @returns: >= 0 owned PID; < 0 for error case
  310. *
  311. * Used for testing. PID's are assumed to be 31 bits or less.
  312. */
  313. static int __raw_remote_gen_spin_owner(raw_remote_spinlock_t *lock)
  314. {
  315. int owner;
  316. /*
  317. * Ensure the ordering of read/write operations to ensure the
  318. * proper ownership of the lock during the lock/unlock operations
  319. */
  320. rmb();
  321. owner = readl_relaxed(&lock->lock);
  322. if (owner)
  323. return owner - 1;
  324. else
  325. return -ENODEV;
  326. }
  327. static int dt_node_is_valid(const struct device_node *node)
  328. {
  329. const char *status;
  330. int statlen;
  331. status = of_get_property(node, "status", &statlen);
  332. if (status == NULL)
  333. return 1;
  334. if (statlen > 0) {
  335. if (!strcmp(status, "okay") || !strcmp(status, "ok"))
  336. return 1;
  337. }
  338. return 0;
  339. }
  340. static void initialize_ops(void)
  341. {
  342. struct device_node *node;
  343. /*
  344. * of_find_compatible_node() returns a valid pointer even if
  345. * the status property is "disabled", so the validity needs
  346. * to be checked
  347. */
  348. node = of_find_compatible_node(NULL, NULL, sfpb_compatible_string);
  349. if (node && dt_node_is_valid(node)) {
  350. current_ops.lock = __raw_remote_sfpb_spin_lock;
  351. current_ops.unlock = __raw_remote_sfpb_spin_unlock;
  352. current_ops.trylock = __raw_remote_sfpb_spin_trylock;
  353. current_ops.release = __raw_remote_gen_spin_release;
  354. current_ops.owner = __raw_remote_gen_spin_owner;
  355. current_ops.lock_rlock_id =
  356. __raw_remote_sfpb_spin_lock_rlock_id;
  357. current_ops.unlock_rlock = __raw_remote_sfpb_spin_unlock_rlock;
  358. current_ops.get_hw_spinlocks_element =
  359. __raw_remote_sfpb_get_hw_spinlocks_element;
  360. is_hw_lock_type = 1;
  361. return;
  362. }
  363. node = of_find_compatible_node(NULL, NULL, ldrex_compatible_string);
  364. if (node && dt_node_is_valid(node)) {
  365. current_ops.lock = __raw_remote_ex_spin_lock;
  366. current_ops.unlock = __raw_remote_ex_spin_unlock;
  367. current_ops.trylock = __raw_remote_ex_spin_trylock;
  368. current_ops.release = __raw_remote_gen_spin_release;
  369. current_ops.owner = __raw_remote_gen_spin_owner;
  370. is_hw_lock_type = 0;
  371. return;
  372. }
  373. current_ops.lock = __raw_remote_ex_spin_lock;
  374. current_ops.unlock = __raw_remote_ex_spin_unlock;
  375. current_ops.trylock = __raw_remote_ex_spin_trylock;
  376. current_ops.release = __raw_remote_gen_spin_release;
  377. current_ops.owner = __raw_remote_gen_spin_owner;
  378. is_hw_lock_type = 0;
  379. pr_warn("Falling back to LDREX remote spinlock implementation");
  380. }
  381. /**
  382. * Release all spinlocks owned by @pid.
  383. *
  384. * This is only to be used for situations where the processor owning
  385. * spinlocks has crashed and the spinlocks must be released.
  386. *
  387. * @pid - processor ID of processor to release
  388. */
  389. static void remote_spin_release_all_locks(uint32_t pid, int count)
  390. {
  391. int n;
  392. _remote_spinlock_t lock;
  393. if (pid >= REMOTE_SPINLOCK_NUM_PID) {
  394. pr_err("%s: Unsupported PID %d\n", __func__, pid);
  395. return;
  396. }
  397. for (n = 0; n < count; ++n) {
  398. if (remote_spinlock_init_address(n, &lock) == 0)
  399. _remote_spin_release(&lock, pid);
  400. }
  401. }
  402. void _remote_spin_release_all(uint32_t pid)
  403. {
  404. remote_spin_release_all_locks(pid, lock_count);
  405. }
  406. #define SMEM_SPINLOCK_COUNT 8
  407. #define SMEM_SPINLOCK_ARRAY_SIZE (SMEM_SPINLOCK_COUNT * sizeof(uint32_t))
  408. static int remote_spinlock_init_address_smem(int id, _remote_spinlock_t *lock)
  409. {
  410. _remote_spinlock_t spinlock_start;
  411. if (id >= SMEM_SPINLOCK_COUNT)
  412. return -EINVAL;
  413. spinlock_start = smem_find(SMEM_SPINLOCK_ARRAY,
  414. SMEM_SPINLOCK_ARRAY_SIZE,
  415. 0,
  416. SMEM_ANY_HOST_FLAG);
  417. if (spinlock_start == NULL)
  418. return -ENXIO;
  419. *lock = spinlock_start + id;
  420. lock_count = SMEM_SPINLOCK_COUNT;
  421. return 0;
  422. }
  423. static int remote_spinlock_init_address(int id, _remote_spinlock_t *lock)
  424. {
  425. if (is_hw_lock_type)
  426. return remote_spinlock_init_address_hw(id, lock);
  427. else
  428. return remote_spinlock_init_address_smem(id, lock);
  429. }
  430. int _remote_spin_lock_init(remote_spinlock_id_t id, _remote_spinlock_t *lock)
  431. {
  432. BUG_ON(id == NULL);
  433. /*
  434. * Optimistic locking. Init only needs to be done once by the first
  435. * caller. After that, serializing inits between different callers
  436. * is unnecessary. The second check after the lock ensures init
  437. * wasn't previously completed by someone else before the lock could
  438. * be grabbed.
  439. */
  440. if (!current_ops.lock) {
  441. mutex_lock(&ops_init_lock);
  442. if (!current_ops.lock)
  443. initialize_ops();
  444. mutex_unlock(&ops_init_lock);
  445. }
  446. if (id[0] == 'S' && id[1] == ':') {
  447. /* Single-digit lock ID follows "S:" */
  448. BUG_ON(id[3] != '\0');
  449. return remote_spinlock_init_address((((uint8_t)id[2])-'0'),
  450. lock);
  451. } else {
  452. return -EINVAL;
  453. }
  454. }
  455. /*
  456. * lock comes in as a pointer to a pointer to the lock location, so it must
  457. * be dereferenced and casted to the right type for the actual lock
  458. * implementation functions
  459. */
  460. void _remote_spin_lock(_remote_spinlock_t *lock)
  461. {
  462. if (unlikely(!current_ops.lock))
  463. BUG();
  464. current_ops.lock((raw_remote_spinlock_t *)(*lock));
  465. }
  466. EXPORT_SYMBOL(_remote_spin_lock);
  467. void _remote_spin_unlock(_remote_spinlock_t *lock)
  468. {
  469. if (unlikely(!current_ops.unlock))
  470. BUG();
  471. current_ops.unlock((raw_remote_spinlock_t *)(*lock));
  472. }
  473. EXPORT_SYMBOL(_remote_spin_unlock);
  474. int _remote_spin_trylock(_remote_spinlock_t *lock)
  475. {
  476. if (unlikely(!current_ops.trylock))
  477. BUG();
  478. return current_ops.trylock((raw_remote_spinlock_t *)(*lock));
  479. }
  480. EXPORT_SYMBOL(_remote_spin_trylock);
  481. int _remote_spin_release(_remote_spinlock_t *lock, uint32_t pid)
  482. {
  483. if (unlikely(!current_ops.release))
  484. BUG();
  485. return current_ops.release((raw_remote_spinlock_t *)(*lock), pid);
  486. }
  487. EXPORT_SYMBOL(_remote_spin_release);
  488. int _remote_spin_owner(_remote_spinlock_t *lock)
  489. {
  490. if (unlikely(!current_ops.owner))
  491. BUG();
  492. return current_ops.owner((raw_remote_spinlock_t *)(*lock));
  493. }
  494. EXPORT_SYMBOL(_remote_spin_owner);
  495. void _remote_spin_lock_rlock_id(_remote_spinlock_t *lock, uint32_t tid)
  496. {
  497. if (unlikely(!current_ops.lock_rlock_id))
  498. BUG();
  499. current_ops.lock_rlock_id((raw_remote_spinlock_t *)(*lock), tid);
  500. }
  501. EXPORT_SYMBOL(_remote_spin_lock_rlock_id);
  502. void _remote_spin_unlock_rlock(_remote_spinlock_t *lock)
  503. {
  504. if (unlikely(!current_ops.unlock_rlock))
  505. BUG();
  506. current_ops.unlock_rlock((raw_remote_spinlock_t *)(*lock));
  507. }
  508. EXPORT_SYMBOL(_remote_spin_unlock_rlock);
  509. int _remote_spin_get_hw_spinlocks_element(_remote_spinlock_t *lock)
  510. {
  511. return current_ops.get_hw_spinlocks_element(
  512. (raw_remote_spinlock_t *)(*lock));
  513. }
  514. EXPORT_SYMBOL(_remote_spin_get_hw_spinlocks_element);
  515. /* end common spinlock API -------------------------------------------------- */