core.c 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655
  1. /*
  2. * Generic OPP Interface
  3. *
  4. * Copyright (C) 2009-2010 Texas Instruments Incorporated.
  5. * Nishanth Menon
  6. * Romit Dasgupta
  7. * Kevin Hilman
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14. #include <linux/clk.h>
  15. #include <linux/errno.h>
  16. #include <linux/err.h>
  17. #include <linux/slab.h>
  18. #include <linux/device.h>
  19. #include <linux/export.h>
  20. #include <linux/regulator/consumer.h>
  21. #include "opp.h"
  22. /*
  23. * The root of the list of all opp-tables. All opp_table structures branch off
  24. * from here, with each opp_table containing the list of opps it supports in
  25. * various states of availability.
  26. */
  27. LIST_HEAD(opp_tables);
  28. /* Lock to allow exclusive modification to the device and opp lists */
  29. DEFINE_MUTEX(opp_table_lock);
  30. #define opp_rcu_lockdep_assert() \
  31. do { \
  32. RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
  33. !lockdep_is_held(&opp_table_lock), \
  34. "Missing rcu_read_lock() or " \
  35. "opp_table_lock protection"); \
  36. } while (0)
  37. static struct opp_device *_find_opp_dev(const struct device *dev,
  38. struct opp_table *opp_table)
  39. {
  40. struct opp_device *opp_dev;
  41. list_for_each_entry(opp_dev, &opp_table->dev_list, node)
  42. if (opp_dev->dev == dev)
  43. return opp_dev;
  44. return NULL;
  45. }
  46. /**
  47. * _find_opp_table() - find opp_table struct using device pointer
  48. * @dev: device pointer used to lookup OPP table
  49. *
  50. * Search OPP table for one containing matching device. Does a RCU reader
  51. * operation to grab the pointer needed.
  52. *
  53. * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
  54. * -EINVAL based on type of error.
  55. *
  56. * Locking: For readers, this function must be called under rcu_read_lock().
  57. * opp_table is a RCU protected pointer, which means that opp_table is valid
  58. * as long as we are under RCU lock.
  59. *
  60. * For Writers, this function must be called with opp_table_lock held.
  61. */
  62. struct opp_table *_find_opp_table(struct device *dev)
  63. {
  64. struct opp_table *opp_table;
  65. opp_rcu_lockdep_assert();
  66. if (IS_ERR_OR_NULL(dev)) {
  67. pr_err("%s: Invalid parameters\n", __func__);
  68. return ERR_PTR(-EINVAL);
  69. }
  70. list_for_each_entry_rcu(opp_table, &opp_tables, node)
  71. if (_find_opp_dev(dev, opp_table))
  72. return opp_table;
  73. return ERR_PTR(-ENODEV);
  74. }
  75. /**
  76. * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
  77. * @opp: opp for which voltage has to be returned for
  78. *
  79. * Return: voltage in micro volt corresponding to the opp, else
  80. * return 0
  81. *
  82. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  83. * protected pointer. This means that opp which could have been fetched by
  84. * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
  85. * under RCU lock. The pointer returned by the opp_find_freq family must be
  86. * used in the same section as the usage of this function with the pointer
  87. * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
  88. * pointer.
  89. */
  90. unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
  91. {
  92. struct dev_pm_opp *tmp_opp;
  93. unsigned long v = 0;
  94. opp_rcu_lockdep_assert();
  95. tmp_opp = rcu_dereference(opp);
  96. if (IS_ERR_OR_NULL(tmp_opp))
  97. pr_err("%s: Invalid parameters\n", __func__);
  98. else
  99. v = tmp_opp->u_volt;
  100. return v;
  101. }
  102. EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
  103. /**
  104. * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
  105. * @opp: opp for which frequency has to be returned for
  106. *
  107. * Return: frequency in hertz corresponding to the opp, else
  108. * return 0
  109. *
  110. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  111. * protected pointer. This means that opp which could have been fetched by
  112. * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
  113. * under RCU lock. The pointer returned by the opp_find_freq family must be
  114. * used in the same section as the usage of this function with the pointer
  115. * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
  116. * pointer.
  117. */
  118. unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
  119. {
  120. struct dev_pm_opp *tmp_opp;
  121. unsigned long f = 0;
  122. opp_rcu_lockdep_assert();
  123. tmp_opp = rcu_dereference(opp);
  124. if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
  125. pr_err("%s: Invalid parameters\n", __func__);
  126. else
  127. f = tmp_opp->rate;
  128. return f;
  129. }
  130. EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
  131. /**
  132. * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
  133. * @opp: opp for which turbo mode is being verified
  134. *
  135. * Turbo OPPs are not for normal use, and can be enabled (under certain
  136. * conditions) for short duration of times to finish high throughput work
  137. * quickly. Running on them for longer times may overheat the chip.
  138. *
  139. * Return: true if opp is turbo opp, else false.
  140. *
  141. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  142. * protected pointer. This means that opp which could have been fetched by
  143. * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
  144. * under RCU lock. The pointer returned by the opp_find_freq family must be
  145. * used in the same section as the usage of this function with the pointer
  146. * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
  147. * pointer.
  148. */
  149. bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
  150. {
  151. struct dev_pm_opp *tmp_opp;
  152. opp_rcu_lockdep_assert();
  153. tmp_opp = rcu_dereference(opp);
  154. if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
  155. pr_err("%s: Invalid parameters\n", __func__);
  156. return false;
  157. }
  158. return tmp_opp->turbo;
  159. }
  160. EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
  161. /**
  162. * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
  163. * @dev: device for which we do this operation
  164. *
  165. * Return: This function returns the max clock latency in nanoseconds.
  166. *
  167. * Locking: This function takes rcu_read_lock().
  168. */
  169. unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
  170. {
  171. struct opp_table *opp_table;
  172. unsigned long clock_latency_ns;
  173. rcu_read_lock();
  174. opp_table = _find_opp_table(dev);
  175. if (IS_ERR(opp_table))
  176. clock_latency_ns = 0;
  177. else
  178. clock_latency_ns = opp_table->clock_latency_ns_max;
  179. rcu_read_unlock();
  180. return clock_latency_ns;
  181. }
  182. EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
  183. /**
  184. * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
  185. * @dev: device for which we do this operation
  186. *
  187. * Return: This function returns the max voltage latency in nanoseconds.
  188. *
  189. * Locking: This function takes rcu_read_lock().
  190. */
  191. unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
  192. {
  193. struct opp_table *opp_table;
  194. struct dev_pm_opp *opp;
  195. struct regulator *reg;
  196. unsigned long latency_ns = 0;
  197. unsigned long min_uV = ~0, max_uV = 0;
  198. int ret;
  199. rcu_read_lock();
  200. opp_table = _find_opp_table(dev);
  201. if (IS_ERR(opp_table)) {
  202. rcu_read_unlock();
  203. return 0;
  204. }
  205. reg = opp_table->regulator;
  206. if (IS_ERR(reg)) {
  207. /* Regulator may not be required for device */
  208. rcu_read_unlock();
  209. return 0;
  210. }
  211. list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
  212. if (!opp->available)
  213. continue;
  214. if (opp->u_volt_min < min_uV)
  215. min_uV = opp->u_volt_min;
  216. if (opp->u_volt_max > max_uV)
  217. max_uV = opp->u_volt_max;
  218. }
  219. rcu_read_unlock();
  220. /*
  221. * The caller needs to ensure that opp_table (and hence the regulator)
  222. * isn't freed, while we are executing this routine.
  223. */
  224. ret = regulator_set_voltage_time(reg, min_uV, max_uV);
  225. if (ret > 0)
  226. latency_ns = ret * 1000;
  227. return latency_ns;
  228. }
  229. EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
  230. /**
  231. * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
  232. * nanoseconds
  233. * @dev: device for which we do this operation
  234. *
  235. * Return: This function returns the max transition latency, in nanoseconds, to
  236. * switch from one OPP to other.
  237. *
  238. * Locking: This function takes rcu_read_lock().
  239. */
  240. unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
  241. {
  242. return dev_pm_opp_get_max_volt_latency(dev) +
  243. dev_pm_opp_get_max_clock_latency(dev);
  244. }
  245. EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
  246. /**
  247. * dev_pm_opp_get_suspend_opp() - Get suspend opp
  248. * @dev: device for which we do this operation
  249. *
  250. * Return: This function returns pointer to the suspend opp if it is
  251. * defined and available, otherwise it returns NULL.
  252. *
  253. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  254. * protected pointer. The reason for the same is that the opp pointer which is
  255. * returned will remain valid for use with opp_get_{voltage, freq} only while
  256. * under the locked area. The pointer returned must be used prior to unlocking
  257. * with rcu_read_unlock() to maintain the integrity of the pointer.
  258. */
  259. struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
  260. {
  261. struct opp_table *opp_table;
  262. opp_rcu_lockdep_assert();
  263. opp_table = _find_opp_table(dev);
  264. if (IS_ERR(opp_table) || !opp_table->suspend_opp ||
  265. !opp_table->suspend_opp->available)
  266. return NULL;
  267. return opp_table->suspend_opp;
  268. }
  269. EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
  270. /**
  271. * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
  272. * @dev: device for which we do this operation
  273. *
  274. * Return: This function returns the number of available opps if there are any,
  275. * else returns 0 if none or the corresponding error value.
  276. *
  277. * Locking: This function takes rcu_read_lock().
  278. */
  279. int dev_pm_opp_get_opp_count(struct device *dev)
  280. {
  281. struct opp_table *opp_table;
  282. struct dev_pm_opp *temp_opp;
  283. int count = 0;
  284. rcu_read_lock();
  285. opp_table = _find_opp_table(dev);
  286. if (IS_ERR(opp_table)) {
  287. count = PTR_ERR(opp_table);
  288. dev_dbg(dev, "%s: OPP table not found (%d)\n",
  289. __func__, count);
  290. goto out_unlock;
  291. }
  292. list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
  293. if (temp_opp->available)
  294. count++;
  295. }
  296. out_unlock:
  297. rcu_read_unlock();
  298. return count;
  299. }
  300. EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
  301. /**
  302. * dev_pm_opp_find_freq_exact() - search for an exact frequency
  303. * @dev: device for which we do this operation
  304. * @freq: frequency to search for
  305. * @available: true/false - match for available opp
  306. *
  307. * Return: Searches for exact match in the opp table and returns pointer to the
  308. * matching opp if found, else returns ERR_PTR in case of error and should
  309. * be handled using IS_ERR. Error return values can be:
  310. * EINVAL: for bad pointer
  311. * ERANGE: no match found for search
  312. * ENODEV: if device not found in list of registered devices
  313. *
  314. * Note: available is a modifier for the search. if available=true, then the
  315. * match is for exact matching frequency and is available in the stored OPP
  316. * table. if false, the match is for exact frequency which is not available.
  317. *
  318. * This provides a mechanism to enable an opp which is not available currently
  319. * or the opposite as well.
  320. *
  321. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  322. * protected pointer. The reason for the same is that the opp pointer which is
  323. * returned will remain valid for use with opp_get_{voltage, freq} only while
  324. * under the locked area. The pointer returned must be used prior to unlocking
  325. * with rcu_read_unlock() to maintain the integrity of the pointer.
  326. */
  327. struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
  328. unsigned long freq,
  329. bool available)
  330. {
  331. struct opp_table *opp_table;
  332. struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  333. opp_rcu_lockdep_assert();
  334. opp_table = _find_opp_table(dev);
  335. if (IS_ERR(opp_table)) {
  336. int r = PTR_ERR(opp_table);
  337. dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
  338. return ERR_PTR(r);
  339. }
  340. list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
  341. if (temp_opp->available == available &&
  342. temp_opp->rate == freq) {
  343. opp = temp_opp;
  344. break;
  345. }
  346. }
  347. return opp;
  348. }
  349. EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
  350. static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
  351. unsigned long *freq)
  352. {
  353. struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  354. list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
  355. if (temp_opp->available && temp_opp->rate >= *freq) {
  356. opp = temp_opp;
  357. *freq = opp->rate;
  358. break;
  359. }
  360. }
  361. return opp;
  362. }
  363. /**
  364. * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
  365. * @dev: device for which we do this operation
  366. * @freq: Start frequency
  367. *
  368. * Search for the matching ceil *available* OPP from a starting freq
  369. * for a device.
  370. *
  371. * Return: matching *opp and refreshes *freq accordingly, else returns
  372. * ERR_PTR in case of error and should be handled using IS_ERR. Error return
  373. * values can be:
  374. * EINVAL: for bad pointer
  375. * ERANGE: no match found for search
  376. * ENODEV: if device not found in list of registered devices
  377. *
  378. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  379. * protected pointer. The reason for the same is that the opp pointer which is
  380. * returned will remain valid for use with opp_get_{voltage, freq} only while
  381. * under the locked area. The pointer returned must be used prior to unlocking
  382. * with rcu_read_unlock() to maintain the integrity of the pointer.
  383. */
  384. struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
  385. unsigned long *freq)
  386. {
  387. struct opp_table *opp_table;
  388. opp_rcu_lockdep_assert();
  389. if (!dev || !freq) {
  390. dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
  391. return ERR_PTR(-EINVAL);
  392. }
  393. opp_table = _find_opp_table(dev);
  394. if (IS_ERR(opp_table))
  395. return ERR_CAST(opp_table);
  396. return _find_freq_ceil(opp_table, freq);
  397. }
  398. EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
  399. /**
  400. * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
  401. * @dev: device for which we do this operation
  402. * @freq: Start frequency
  403. *
  404. * Search for the matching floor *available* OPP from a starting freq
  405. * for a device.
  406. *
  407. * Return: matching *opp and refreshes *freq accordingly, else returns
  408. * ERR_PTR in case of error and should be handled using IS_ERR. Error return
  409. * values can be:
  410. * EINVAL: for bad pointer
  411. * ERANGE: no match found for search
  412. * ENODEV: if device not found in list of registered devices
  413. *
  414. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  415. * protected pointer. The reason for the same is that the opp pointer which is
  416. * returned will remain valid for use with opp_get_{voltage, freq} only while
  417. * under the locked area. The pointer returned must be used prior to unlocking
  418. * with rcu_read_unlock() to maintain the integrity of the pointer.
  419. */
  420. struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
  421. unsigned long *freq)
  422. {
  423. struct opp_table *opp_table;
  424. struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  425. opp_rcu_lockdep_assert();
  426. if (!dev || !freq) {
  427. dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
  428. return ERR_PTR(-EINVAL);
  429. }
  430. opp_table = _find_opp_table(dev);
  431. if (IS_ERR(opp_table))
  432. return ERR_CAST(opp_table);
  433. list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
  434. if (temp_opp->available) {
  435. /* go to the next node, before choosing prev */
  436. if (temp_opp->rate > *freq)
  437. break;
  438. else
  439. opp = temp_opp;
  440. }
  441. }
  442. if (!IS_ERR(opp))
  443. *freq = opp->rate;
  444. return opp;
  445. }
  446. EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
  447. /*
  448. * The caller needs to ensure that opp_table (and hence the clk) isn't freed,
  449. * while clk returned here is used.
  450. */
  451. static struct clk *_get_opp_clk(struct device *dev)
  452. {
  453. struct opp_table *opp_table;
  454. struct clk *clk;
  455. rcu_read_lock();
  456. opp_table = _find_opp_table(dev);
  457. if (IS_ERR(opp_table)) {
  458. dev_err(dev, "%s: device opp doesn't exist\n", __func__);
  459. clk = ERR_CAST(opp_table);
  460. goto unlock;
  461. }
  462. clk = opp_table->clk;
  463. if (IS_ERR(clk))
  464. dev_err(dev, "%s: No clock available for the device\n",
  465. __func__);
  466. unlock:
  467. rcu_read_unlock();
  468. return clk;
  469. }
  470. static int _set_opp_voltage(struct device *dev, struct regulator *reg,
  471. unsigned long u_volt, unsigned long u_volt_min,
  472. unsigned long u_volt_max)
  473. {
  474. int ret;
  475. /* Regulator not available for device */
  476. if (IS_ERR(reg)) {
  477. dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
  478. PTR_ERR(reg));
  479. return 0;
  480. }
  481. dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, u_volt_min,
  482. u_volt, u_volt_max);
  483. ret = regulator_set_voltage_triplet(reg, u_volt_min, u_volt,
  484. u_volt_max);
  485. if (ret)
  486. dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
  487. __func__, u_volt_min, u_volt, u_volt_max, ret);
  488. return ret;
  489. }
  490. /**
  491. * dev_pm_opp_set_rate() - Configure new OPP based on frequency
  492. * @dev: device for which we do this operation
  493. * @target_freq: frequency to achieve
  494. *
  495. * This configures the power-supplies and clock source to the levels specified
  496. * by the OPP corresponding to the target_freq.
  497. *
  498. * Locking: This function takes rcu_read_lock().
  499. */
  500. int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
  501. {
  502. struct opp_table *opp_table;
  503. struct dev_pm_opp *old_opp, *opp;
  504. struct regulator *reg;
  505. struct clk *clk;
  506. unsigned long freq, old_freq;
  507. unsigned long u_volt, u_volt_min, u_volt_max;
  508. unsigned long old_u_volt, old_u_volt_min, old_u_volt_max;
  509. int ret;
  510. if (unlikely(!target_freq)) {
  511. dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
  512. target_freq);
  513. return -EINVAL;
  514. }
  515. clk = _get_opp_clk(dev);
  516. if (IS_ERR(clk))
  517. return PTR_ERR(clk);
  518. freq = clk_round_rate(clk, target_freq);
  519. if ((long)freq <= 0)
  520. freq = target_freq;
  521. old_freq = clk_get_rate(clk);
  522. /* Return early if nothing to do */
  523. if (old_freq == freq) {
  524. dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
  525. __func__, freq);
  526. return 0;
  527. }
  528. rcu_read_lock();
  529. opp_table = _find_opp_table(dev);
  530. if (IS_ERR(opp_table)) {
  531. dev_err(dev, "%s: device opp doesn't exist\n", __func__);
  532. rcu_read_unlock();
  533. return PTR_ERR(opp_table);
  534. }
  535. old_opp = _find_freq_ceil(opp_table, &old_freq);
  536. if (IS_ERR(old_opp)) {
  537. dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
  538. __func__, old_freq, PTR_ERR(old_opp));
  539. }
  540. opp = _find_freq_ceil(opp_table, &freq);
  541. if (IS_ERR(opp)) {
  542. ret = PTR_ERR(opp);
  543. dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
  544. __func__, freq, ret);
  545. rcu_read_unlock();
  546. return ret;
  547. }
  548. if (IS_ERR(old_opp)) {
  549. old_u_volt = 0;
  550. } else {
  551. old_u_volt = old_opp->u_volt;
  552. old_u_volt_min = old_opp->u_volt_min;
  553. old_u_volt_max = old_opp->u_volt_max;
  554. }
  555. u_volt = opp->u_volt;
  556. u_volt_min = opp->u_volt_min;
  557. u_volt_max = opp->u_volt_max;
  558. reg = opp_table->regulator;
  559. rcu_read_unlock();
  560. /* Scaling up? Scale voltage before frequency */
  561. if (freq >= old_freq) {
  562. ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
  563. u_volt_max);
  564. if (ret)
  565. goto restore_voltage;
  566. }
  567. /* Change frequency */
  568. dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
  569. __func__, old_freq, freq);
  570. ret = clk_set_rate(clk, freq);
  571. if (ret) {
  572. dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
  573. ret);
  574. goto restore_voltage;
  575. }
  576. /* Scaling down? Scale voltage after frequency */
  577. if (freq < old_freq) {
  578. ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
  579. u_volt_max);
  580. if (ret)
  581. goto restore_freq;
  582. }
  583. return 0;
  584. restore_freq:
  585. if (clk_set_rate(clk, old_freq))
  586. dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
  587. __func__, old_freq);
  588. restore_voltage:
  589. /* This shouldn't harm even if the voltages weren't updated earlier */
  590. if (old_u_volt) {
  591. _set_opp_voltage(dev, reg, old_u_volt, old_u_volt_min,
  592. old_u_volt_max);
  593. }
  594. return ret;
  595. }
  596. EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
  597. /* OPP-dev Helpers */
  598. static void _kfree_opp_dev_rcu(struct rcu_head *head)
  599. {
  600. struct opp_device *opp_dev;
  601. opp_dev = container_of(head, struct opp_device, rcu_head);
  602. kfree_rcu(opp_dev, rcu_head);
  603. }
  604. static void _remove_opp_dev(struct opp_device *opp_dev,
  605. struct opp_table *opp_table)
  606. {
  607. opp_debug_unregister(opp_dev, opp_table);
  608. list_del_rcu(&opp_dev->node);
  609. call_srcu(&opp_table->srcu_head.srcu, &opp_dev->rcu_head,
  610. _kfree_opp_dev_rcu);
  611. }
  612. struct opp_device *_add_opp_dev(const struct device *dev,
  613. struct opp_table *opp_table)
  614. {
  615. struct opp_device *opp_dev;
  616. int ret;
  617. opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
  618. if (!opp_dev)
  619. return NULL;
  620. /* Initialize opp-dev */
  621. opp_dev->dev = dev;
  622. list_add_rcu(&opp_dev->node, &opp_table->dev_list);
  623. /* Create debugfs entries for the opp_table */
  624. ret = opp_debug_register(opp_dev, opp_table);
  625. if (ret)
  626. dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
  627. __func__, ret);
  628. return opp_dev;
  629. }
  630. /**
  631. * _add_opp_table() - Find OPP table or allocate a new one
  632. * @dev: device for which we do this operation
  633. *
  634. * It tries to find an existing table first, if it couldn't find one, it
  635. * allocates a new OPP table and returns that.
  636. *
  637. * Return: valid opp_table pointer if success, else NULL.
  638. */
  639. static struct opp_table *_add_opp_table(struct device *dev)
  640. {
  641. struct opp_table *opp_table;
  642. struct opp_device *opp_dev;
  643. int ret;
  644. /* Check for existing table for 'dev' first */
  645. opp_table = _find_opp_table(dev);
  646. if (!IS_ERR(opp_table))
  647. return opp_table;
  648. /*
  649. * Allocate a new OPP table. In the infrequent case where a new
  650. * device is needed to be added, we pay this penalty.
  651. */
  652. opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
  653. if (!opp_table)
  654. return NULL;
  655. INIT_LIST_HEAD(&opp_table->dev_list);
  656. opp_dev = _add_opp_dev(dev, opp_table);
  657. if (!opp_dev) {
  658. kfree(opp_table);
  659. return NULL;
  660. }
  661. _of_init_opp_table(opp_table, dev);
  662. /* Set regulator to a non-NULL error value */
  663. opp_table->regulator = ERR_PTR(-ENXIO);
  664. /* Find clk for the device */
  665. opp_table->clk = clk_get(dev, NULL);
  666. if (IS_ERR(opp_table->clk)) {
  667. ret = PTR_ERR(opp_table->clk);
  668. if (ret != -EPROBE_DEFER)
  669. dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
  670. ret);
  671. }
  672. srcu_init_notifier_head(&opp_table->srcu_head);
  673. INIT_LIST_HEAD(&opp_table->opp_list);
  674. /* Secure the device table modification */
  675. list_add_rcu(&opp_table->node, &opp_tables);
  676. return opp_table;
  677. }
  678. /**
  679. * _kfree_device_rcu() - Free opp_table RCU handler
  680. * @head: RCU head
  681. */
  682. static void _kfree_device_rcu(struct rcu_head *head)
  683. {
  684. struct opp_table *opp_table = container_of(head, struct opp_table,
  685. rcu_head);
  686. kfree_rcu(opp_table, rcu_head);
  687. }
  688. /**
  689. * _remove_opp_table() - Removes a OPP table
  690. * @opp_table: OPP table to be removed.
  691. *
  692. * Removes/frees OPP table if it doesn't contain any OPPs.
  693. */
  694. static void _remove_opp_table(struct opp_table *opp_table)
  695. {
  696. struct opp_device *opp_dev;
  697. if (!list_empty(&opp_table->opp_list))
  698. return;
  699. if (opp_table->supported_hw)
  700. return;
  701. if (opp_table->prop_name)
  702. return;
  703. if (!IS_ERR(opp_table->regulator))
  704. return;
  705. /* Release clk */
  706. if (!IS_ERR(opp_table->clk))
  707. clk_put(opp_table->clk);
  708. opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
  709. node);
  710. _remove_opp_dev(opp_dev, opp_table);
  711. /* dev_list must be empty now */
  712. WARN_ON(!list_empty(&opp_table->dev_list));
  713. list_del_rcu(&opp_table->node);
  714. call_srcu(&opp_table->srcu_head.srcu, &opp_table->rcu_head,
  715. _kfree_device_rcu);
  716. }
  717. /**
  718. * _kfree_opp_rcu() - Free OPP RCU handler
  719. * @head: RCU head
  720. */
  721. static void _kfree_opp_rcu(struct rcu_head *head)
  722. {
  723. struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
  724. kfree_rcu(opp, rcu_head);
  725. }
  726. /**
  727. * _opp_remove() - Remove an OPP from a table definition
  728. * @opp_table: points back to the opp_table struct this opp belongs to
  729. * @opp: pointer to the OPP to remove
  730. * @notify: OPP_EVENT_REMOVE notification should be sent or not
  731. *
  732. * This function removes an opp definition from the opp table.
  733. *
  734. * Locking: The internal opp_table and opp structures are RCU protected.
  735. * It is assumed that the caller holds required mutex for an RCU updater
  736. * strategy.
  737. */
  738. void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp,
  739. bool notify)
  740. {
  741. /*
  742. * Notify the changes in the availability of the operable
  743. * frequency/voltage list.
  744. */
  745. if (notify)
  746. srcu_notifier_call_chain(&opp_table->srcu_head,
  747. OPP_EVENT_REMOVE, opp);
  748. opp_debug_remove_one(opp);
  749. list_del_rcu(&opp->node);
  750. call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
  751. _remove_opp_table(opp_table);
  752. }
  753. /**
  754. * dev_pm_opp_remove() - Remove an OPP from OPP table
  755. * @dev: device for which we do this operation
  756. * @freq: OPP to remove with matching 'freq'
  757. *
  758. * This function removes an opp from the opp table.
  759. *
  760. * Locking: The internal opp_table and opp structures are RCU protected.
  761. * Hence this function internally uses RCU updater strategy with mutex locks
  762. * to keep the integrity of the internal data structures. Callers should ensure
  763. * that this function is *NOT* called under RCU protection or in contexts where
  764. * mutex cannot be locked.
  765. */
  766. void dev_pm_opp_remove(struct device *dev, unsigned long freq)
  767. {
  768. struct dev_pm_opp *opp;
  769. struct opp_table *opp_table;
  770. bool found = false;
  771. /* Hold our table modification lock here */
  772. mutex_lock(&opp_table_lock);
  773. opp_table = _find_opp_table(dev);
  774. if (IS_ERR(opp_table))
  775. goto unlock;
  776. list_for_each_entry(opp, &opp_table->opp_list, node) {
  777. if (opp->rate == freq) {
  778. found = true;
  779. break;
  780. }
  781. }
  782. if (!found) {
  783. dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
  784. __func__, freq);
  785. goto unlock;
  786. }
  787. _opp_remove(opp_table, opp, true);
  788. unlock:
  789. mutex_unlock(&opp_table_lock);
  790. }
  791. EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
  792. struct dev_pm_opp *_allocate_opp(struct device *dev,
  793. struct opp_table **opp_table)
  794. {
  795. struct dev_pm_opp *opp;
  796. /* allocate new OPP node */
  797. opp = kzalloc(sizeof(*opp), GFP_KERNEL);
  798. if (!opp)
  799. return NULL;
  800. INIT_LIST_HEAD(&opp->node);
  801. *opp_table = _add_opp_table(dev);
  802. if (!*opp_table) {
  803. kfree(opp);
  804. return NULL;
  805. }
  806. return opp;
  807. }
  808. static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
  809. struct opp_table *opp_table)
  810. {
  811. struct regulator *reg = opp_table->regulator;
  812. if (!IS_ERR(reg) &&
  813. !regulator_is_supported_voltage(reg, opp->u_volt_min,
  814. opp->u_volt_max)) {
  815. pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
  816. __func__, opp->u_volt_min, opp->u_volt_max);
  817. return false;
  818. }
  819. return true;
  820. }
  821. int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
  822. struct opp_table *opp_table)
  823. {
  824. struct dev_pm_opp *opp;
  825. struct list_head *head = &opp_table->opp_list;
  826. int ret;
  827. /*
  828. * Insert new OPP in order of increasing frequency and discard if
  829. * already present.
  830. *
  831. * Need to use &opp_table->opp_list in the condition part of the 'for'
  832. * loop, don't replace it with head otherwise it will become an infinite
  833. * loop.
  834. */
  835. list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
  836. if (new_opp->rate > opp->rate) {
  837. head = &opp->node;
  838. continue;
  839. }
  840. if (new_opp->rate < opp->rate)
  841. break;
  842. /* Duplicate OPPs */
  843. dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
  844. __func__, opp->rate, opp->u_volt, opp->available,
  845. new_opp->rate, new_opp->u_volt, new_opp->available);
  846. return opp->available && new_opp->u_volt == opp->u_volt ?
  847. 0 : -EEXIST;
  848. }
  849. new_opp->opp_table = opp_table;
  850. list_add_rcu(&new_opp->node, head);
  851. ret = opp_debug_create_one(new_opp, opp_table);
  852. if (ret)
  853. dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
  854. __func__, ret);
  855. if (!_opp_supported_by_regulators(new_opp, opp_table)) {
  856. new_opp->available = false;
  857. dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
  858. __func__, new_opp->rate);
  859. }
  860. return 0;
  861. }
  862. /**
  863. * _opp_add_v1() - Allocate a OPP based on v1 bindings.
  864. * @dev: device for which we do this operation
  865. * @freq: Frequency in Hz for this OPP
  866. * @u_volt: Voltage in uVolts for this OPP
  867. * @dynamic: Dynamically added OPPs.
  868. *
  869. * This function adds an opp definition to the opp table and returns status.
  870. * The opp is made available by default and it can be controlled using
  871. * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
  872. *
  873. * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
  874. * and freed by dev_pm_opp_of_remove_table.
  875. *
  876. * Locking: The internal opp_table and opp structures are RCU protected.
  877. * Hence this function internally uses RCU updater strategy with mutex locks
  878. * to keep the integrity of the internal data structures. Callers should ensure
  879. * that this function is *NOT* called under RCU protection or in contexts where
  880. * mutex cannot be locked.
  881. *
  882. * Return:
  883. * 0 On success OR
  884. * Duplicate OPPs (both freq and volt are same) and opp->available
  885. * -EEXIST Freq are same and volt are different OR
  886. * Duplicate OPPs (both freq and volt are same) and !opp->available
  887. * -ENOMEM Memory allocation failure
  888. */
  889. int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
  890. bool dynamic)
  891. {
  892. struct opp_table *opp_table;
  893. struct dev_pm_opp *new_opp;
  894. unsigned long tol;
  895. int ret;
  896. /* Hold our table modification lock here */
  897. mutex_lock(&opp_table_lock);
  898. new_opp = _allocate_opp(dev, &opp_table);
  899. if (!new_opp) {
  900. ret = -ENOMEM;
  901. goto unlock;
  902. }
  903. /* populate the opp table */
  904. new_opp->rate = freq;
  905. tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
  906. new_opp->u_volt = u_volt;
  907. new_opp->u_volt_min = u_volt - tol;
  908. new_opp->u_volt_max = u_volt + tol;
  909. new_opp->available = true;
  910. new_opp->dynamic = dynamic;
  911. ret = _opp_add(dev, new_opp, opp_table);
  912. if (ret)
  913. goto free_opp;
  914. mutex_unlock(&opp_table_lock);
  915. /*
  916. * Notify the changes in the availability of the operable
  917. * frequency/voltage list.
  918. */
  919. srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
  920. return 0;
  921. free_opp:
  922. _opp_remove(opp_table, new_opp, false);
  923. unlock:
  924. mutex_unlock(&opp_table_lock);
  925. return ret;
  926. }
  927. /**
  928. * dev_pm_opp_set_supported_hw() - Set supported platforms
  929. * @dev: Device for which supported-hw has to be set.
  930. * @versions: Array of hierarchy of versions to match.
  931. * @count: Number of elements in the array.
  932. *
  933. * This is required only for the V2 bindings, and it enables a platform to
  934. * specify the hierarchy of versions it supports. OPP layer will then enable
  935. * OPPs, which are available for those versions, based on its 'opp-supported-hw'
  936. * property.
  937. *
  938. * Locking: The internal opp_table and opp structures are RCU protected.
  939. * Hence this function internally uses RCU updater strategy with mutex locks
  940. * to keep the integrity of the internal data structures. Callers should ensure
  941. * that this function is *NOT* called under RCU protection or in contexts where
  942. * mutex cannot be locked.
  943. */
  944. int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
  945. unsigned int count)
  946. {
  947. struct opp_table *opp_table;
  948. int ret = 0;
  949. /* Hold our table modification lock here */
  950. mutex_lock(&opp_table_lock);
  951. opp_table = _add_opp_table(dev);
  952. if (!opp_table) {
  953. ret = -ENOMEM;
  954. goto unlock;
  955. }
  956. /* Make sure there are no concurrent readers while updating opp_table */
  957. WARN_ON(!list_empty(&opp_table->opp_list));
  958. /* Do we already have a version hierarchy associated with opp_table? */
  959. if (opp_table->supported_hw) {
  960. dev_err(dev, "%s: Already have supported hardware list\n",
  961. __func__);
  962. ret = -EBUSY;
  963. goto err;
  964. }
  965. opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
  966. GFP_KERNEL);
  967. if (!opp_table->supported_hw) {
  968. ret = -ENOMEM;
  969. goto err;
  970. }
  971. opp_table->supported_hw_count = count;
  972. mutex_unlock(&opp_table_lock);
  973. return 0;
  974. err:
  975. _remove_opp_table(opp_table);
  976. unlock:
  977. mutex_unlock(&opp_table_lock);
  978. return ret;
  979. }
  980. EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
  981. /**
  982. * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
  983. * @dev: Device for which supported-hw has to be put.
  984. *
  985. * This is required only for the V2 bindings, and is called for a matching
  986. * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
  987. * will not be freed.
  988. *
  989. * Locking: The internal opp_table and opp structures are RCU protected.
  990. * Hence this function internally uses RCU updater strategy with mutex locks
  991. * to keep the integrity of the internal data structures. Callers should ensure
  992. * that this function is *NOT* called under RCU protection or in contexts where
  993. * mutex cannot be locked.
  994. */
  995. void dev_pm_opp_put_supported_hw(struct device *dev)
  996. {
  997. struct opp_table *opp_table;
  998. /* Hold our table modification lock here */
  999. mutex_lock(&opp_table_lock);
  1000. /* Check for existing table for 'dev' first */
  1001. opp_table = _find_opp_table(dev);
  1002. if (IS_ERR(opp_table)) {
  1003. dev_err(dev, "Failed to find opp_table: %ld\n",
  1004. PTR_ERR(opp_table));
  1005. goto unlock;
  1006. }
  1007. /* Make sure there are no concurrent readers while updating opp_table */
  1008. WARN_ON(!list_empty(&opp_table->opp_list));
  1009. if (!opp_table->supported_hw) {
  1010. dev_err(dev, "%s: Doesn't have supported hardware list\n",
  1011. __func__);
  1012. goto unlock;
  1013. }
  1014. kfree(opp_table->supported_hw);
  1015. opp_table->supported_hw = NULL;
  1016. opp_table->supported_hw_count = 0;
  1017. /* Try freeing opp_table if this was the last blocking resource */
  1018. _remove_opp_table(opp_table);
  1019. unlock:
  1020. mutex_unlock(&opp_table_lock);
  1021. }
  1022. EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
  1023. /**
  1024. * dev_pm_opp_set_prop_name() - Set prop-extn name
  1025. * @dev: Device for which the prop-name has to be set.
  1026. * @name: name to postfix to properties.
  1027. *
  1028. * This is required only for the V2 bindings, and it enables a platform to
  1029. * specify the extn to be used for certain property names. The properties to
  1030. * which the extension will apply are opp-microvolt and opp-microamp. OPP core
  1031. * should postfix the property name with -<name> while looking for them.
  1032. *
  1033. * Locking: The internal opp_table and opp structures are RCU protected.
  1034. * Hence this function internally uses RCU updater strategy with mutex locks
  1035. * to keep the integrity of the internal data structures. Callers should ensure
  1036. * that this function is *NOT* called under RCU protection or in contexts where
  1037. * mutex cannot be locked.
  1038. */
  1039. int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
  1040. {
  1041. struct opp_table *opp_table;
  1042. int ret = 0;
  1043. /* Hold our table modification lock here */
  1044. mutex_lock(&opp_table_lock);
  1045. opp_table = _add_opp_table(dev);
  1046. if (!opp_table) {
  1047. ret = -ENOMEM;
  1048. goto unlock;
  1049. }
  1050. /* Make sure there are no concurrent readers while updating opp_table */
  1051. WARN_ON(!list_empty(&opp_table->opp_list));
  1052. /* Do we already have a prop-name associated with opp_table? */
  1053. if (opp_table->prop_name) {
  1054. dev_err(dev, "%s: Already have prop-name %s\n", __func__,
  1055. opp_table->prop_name);
  1056. ret = -EBUSY;
  1057. goto err;
  1058. }
  1059. opp_table->prop_name = kstrdup(name, GFP_KERNEL);
  1060. if (!opp_table->prop_name) {
  1061. ret = -ENOMEM;
  1062. goto err;
  1063. }
  1064. mutex_unlock(&opp_table_lock);
  1065. return 0;
  1066. err:
  1067. _remove_opp_table(opp_table);
  1068. unlock:
  1069. mutex_unlock(&opp_table_lock);
  1070. return ret;
  1071. }
  1072. EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
  1073. /**
  1074. * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
  1075. * @dev: Device for which the prop-name has to be put.
  1076. *
  1077. * This is required only for the V2 bindings, and is called for a matching
  1078. * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
  1079. * will not be freed.
  1080. *
  1081. * Locking: The internal opp_table and opp structures are RCU protected.
  1082. * Hence this function internally uses RCU updater strategy with mutex locks
  1083. * to keep the integrity of the internal data structures. Callers should ensure
  1084. * that this function is *NOT* called under RCU protection or in contexts where
  1085. * mutex cannot be locked.
  1086. */
  1087. void dev_pm_opp_put_prop_name(struct device *dev)
  1088. {
  1089. struct opp_table *opp_table;
  1090. /* Hold our table modification lock here */
  1091. mutex_lock(&opp_table_lock);
  1092. /* Check for existing table for 'dev' first */
  1093. opp_table = _find_opp_table(dev);
  1094. if (IS_ERR(opp_table)) {
  1095. dev_err(dev, "Failed to find opp_table: %ld\n",
  1096. PTR_ERR(opp_table));
  1097. goto unlock;
  1098. }
  1099. /* Make sure there are no concurrent readers while updating opp_table */
  1100. WARN_ON(!list_empty(&opp_table->opp_list));
  1101. if (!opp_table->prop_name) {
  1102. dev_err(dev, "%s: Doesn't have a prop-name\n", __func__);
  1103. goto unlock;
  1104. }
  1105. kfree(opp_table->prop_name);
  1106. opp_table->prop_name = NULL;
  1107. /* Try freeing opp_table if this was the last blocking resource */
  1108. _remove_opp_table(opp_table);
  1109. unlock:
  1110. mutex_unlock(&opp_table_lock);
  1111. }
  1112. EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
  1113. /**
  1114. * dev_pm_opp_set_regulator() - Set regulator name for the device
  1115. * @dev: Device for which regulator name is being set.
  1116. * @name: Name of the regulator.
  1117. *
  1118. * In order to support OPP switching, OPP layer needs to know the name of the
  1119. * device's regulator, as the core would be required to switch voltages as well.
  1120. *
  1121. * This must be called before any OPPs are initialized for the device.
  1122. *
  1123. * Locking: The internal opp_table and opp structures are RCU protected.
  1124. * Hence this function internally uses RCU updater strategy with mutex locks
  1125. * to keep the integrity of the internal data structures. Callers should ensure
  1126. * that this function is *NOT* called under RCU protection or in contexts where
  1127. * mutex cannot be locked.
  1128. */
  1129. struct opp_table *dev_pm_opp_set_regulator(struct device *dev, const char *name)
  1130. {
  1131. struct opp_table *opp_table;
  1132. struct regulator *reg;
  1133. int ret;
  1134. mutex_lock(&opp_table_lock);
  1135. opp_table = _add_opp_table(dev);
  1136. if (!opp_table) {
  1137. ret = -ENOMEM;
  1138. goto unlock;
  1139. }
  1140. /* This should be called before OPPs are initialized */
  1141. if (WARN_ON(!list_empty(&opp_table->opp_list))) {
  1142. ret = -EBUSY;
  1143. goto err;
  1144. }
  1145. /* Already have a regulator set */
  1146. if (WARN_ON(!IS_ERR(opp_table->regulator))) {
  1147. ret = -EBUSY;
  1148. goto err;
  1149. }
  1150. /* Allocate the regulator */
  1151. reg = regulator_get_optional(dev, name);
  1152. if (IS_ERR(reg)) {
  1153. ret = PTR_ERR(reg);
  1154. if (ret != -EPROBE_DEFER)
  1155. dev_err(dev, "%s: no regulator (%s) found: %d\n",
  1156. __func__, name, ret);
  1157. goto err;
  1158. }
  1159. opp_table->regulator = reg;
  1160. mutex_unlock(&opp_table_lock);
  1161. return opp_table;
  1162. err:
  1163. _remove_opp_table(opp_table);
  1164. unlock:
  1165. mutex_unlock(&opp_table_lock);
  1166. return ERR_PTR(ret);
  1167. }
  1168. EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
  1169. /**
  1170. * dev_pm_opp_put_regulator() - Releases resources blocked for regulator
  1171. * @opp_table: OPP table returned from dev_pm_opp_set_regulator().
  1172. *
  1173. * Locking: The internal opp_table and opp structures are RCU protected.
  1174. * Hence this function internally uses RCU updater strategy with mutex locks
  1175. * to keep the integrity of the internal data structures. Callers should ensure
  1176. * that this function is *NOT* called under RCU protection or in contexts where
  1177. * mutex cannot be locked.
  1178. */
  1179. void dev_pm_opp_put_regulator(struct opp_table *opp_table)
  1180. {
  1181. mutex_lock(&opp_table_lock);
  1182. if (IS_ERR(opp_table->regulator)) {
  1183. pr_err("%s: Doesn't have regulator set\n", __func__);
  1184. goto unlock;
  1185. }
  1186. /* Make sure there are no concurrent readers while updating opp_table */
  1187. WARN_ON(!list_empty(&opp_table->opp_list));
  1188. regulator_put(opp_table->regulator);
  1189. opp_table->regulator = ERR_PTR(-ENXIO);
  1190. /* Try freeing opp_table if this was the last blocking resource */
  1191. _remove_opp_table(opp_table);
  1192. unlock:
  1193. mutex_unlock(&opp_table_lock);
  1194. }
  1195. EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
  1196. /**
  1197. * dev_pm_opp_add() - Add an OPP table from a table definitions
  1198. * @dev: device for which we do this operation
  1199. * @freq: Frequency in Hz for this OPP
  1200. * @u_volt: Voltage in uVolts for this OPP
  1201. *
  1202. * This function adds an opp definition to the opp table and returns status.
  1203. * The opp is made available by default and it can be controlled using
  1204. * dev_pm_opp_enable/disable functions.
  1205. *
  1206. * Locking: The internal opp_table and opp structures are RCU protected.
  1207. * Hence this function internally uses RCU updater strategy with mutex locks
  1208. * to keep the integrity of the internal data structures. Callers should ensure
  1209. * that this function is *NOT* called under RCU protection or in contexts where
  1210. * mutex cannot be locked.
  1211. *
  1212. * Return:
  1213. * 0 On success OR
  1214. * Duplicate OPPs (both freq and volt are same) and opp->available
  1215. * -EEXIST Freq are same and volt are different OR
  1216. * Duplicate OPPs (both freq and volt are same) and !opp->available
  1217. * -ENOMEM Memory allocation failure
  1218. */
  1219. int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
  1220. {
  1221. return _opp_add_v1(dev, freq, u_volt, true);
  1222. }
  1223. EXPORT_SYMBOL_GPL(dev_pm_opp_add);
  1224. /**
  1225. * _opp_set_availability() - helper to set the availability of an opp
  1226. * @dev: device for which we do this operation
  1227. * @freq: OPP frequency to modify availability
  1228. * @availability_req: availability status requested for this opp
  1229. *
  1230. * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
  1231. * share a common logic which is isolated here.
  1232. *
  1233. * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  1234. * copy operation, returns 0 if no modification was done OR modification was
  1235. * successful.
  1236. *
  1237. * Locking: The internal opp_table and opp structures are RCU protected.
  1238. * Hence this function internally uses RCU updater strategy with mutex locks to
  1239. * keep the integrity of the internal data structures. Callers should ensure
  1240. * that this function is *NOT* called under RCU protection or in contexts where
  1241. * mutex locking or synchronize_rcu() blocking calls cannot be used.
  1242. */
  1243. static int _opp_set_availability(struct device *dev, unsigned long freq,
  1244. bool availability_req)
  1245. {
  1246. struct opp_table *opp_table;
  1247. struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
  1248. int r = 0;
  1249. /* keep the node allocated */
  1250. new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
  1251. if (!new_opp)
  1252. return -ENOMEM;
  1253. mutex_lock(&opp_table_lock);
  1254. /* Find the opp_table */
  1255. opp_table = _find_opp_table(dev);
  1256. if (IS_ERR(opp_table)) {
  1257. r = PTR_ERR(opp_table);
  1258. dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
  1259. goto unlock;
  1260. }
  1261. /* Do we have the frequency? */
  1262. list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
  1263. if (tmp_opp->rate == freq) {
  1264. opp = tmp_opp;
  1265. break;
  1266. }
  1267. }
  1268. if (IS_ERR(opp)) {
  1269. r = PTR_ERR(opp);
  1270. goto unlock;
  1271. }
  1272. /* Is update really needed? */
  1273. if (opp->available == availability_req)
  1274. goto unlock;
  1275. /* copy the old data over */
  1276. *new_opp = *opp;
  1277. /* plug in new node */
  1278. new_opp->available = availability_req;
  1279. list_replace_rcu(&opp->node, &new_opp->node);
  1280. mutex_unlock(&opp_table_lock);
  1281. call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
  1282. /* Notify the change of the OPP availability */
  1283. if (availability_req)
  1284. srcu_notifier_call_chain(&opp_table->srcu_head,
  1285. OPP_EVENT_ENABLE, new_opp);
  1286. else
  1287. srcu_notifier_call_chain(&opp_table->srcu_head,
  1288. OPP_EVENT_DISABLE, new_opp);
  1289. return 0;
  1290. unlock:
  1291. mutex_unlock(&opp_table_lock);
  1292. kfree(new_opp);
  1293. return r;
  1294. }
  1295. /**
  1296. * dev_pm_opp_enable() - Enable a specific OPP
  1297. * @dev: device for which we do this operation
  1298. * @freq: OPP frequency to enable
  1299. *
  1300. * Enables a provided opp. If the operation is valid, this returns 0, else the
  1301. * corresponding error value. It is meant to be used for users an OPP available
  1302. * after being temporarily made unavailable with dev_pm_opp_disable.
  1303. *
  1304. * Locking: The internal opp_table and opp structures are RCU protected.
  1305. * Hence this function indirectly uses RCU and mutex locks to keep the
  1306. * integrity of the internal data structures. Callers should ensure that
  1307. * this function is *NOT* called under RCU protection or in contexts where
  1308. * mutex locking or synchronize_rcu() blocking calls cannot be used.
  1309. *
  1310. * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  1311. * copy operation, returns 0 if no modification was done OR modification was
  1312. * successful.
  1313. */
  1314. int dev_pm_opp_enable(struct device *dev, unsigned long freq)
  1315. {
  1316. return _opp_set_availability(dev, freq, true);
  1317. }
  1318. EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
  1319. /**
  1320. * dev_pm_opp_disable() - Disable a specific OPP
  1321. * @dev: device for which we do this operation
  1322. * @freq: OPP frequency to disable
  1323. *
  1324. * Disables a provided opp. If the operation is valid, this returns
  1325. * 0, else the corresponding error value. It is meant to be a temporary
  1326. * control by users to make this OPP not available until the circumstances are
  1327. * right to make it available again (with a call to dev_pm_opp_enable).
  1328. *
  1329. * Locking: The internal opp_table and opp structures are RCU protected.
  1330. * Hence this function indirectly uses RCU and mutex locks to keep the
  1331. * integrity of the internal data structures. Callers should ensure that
  1332. * this function is *NOT* called under RCU protection or in contexts where
  1333. * mutex locking or synchronize_rcu() blocking calls cannot be used.
  1334. *
  1335. * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  1336. * copy operation, returns 0 if no modification was done OR modification was
  1337. * successful.
  1338. */
  1339. int dev_pm_opp_disable(struct device *dev, unsigned long freq)
  1340. {
  1341. return _opp_set_availability(dev, freq, false);
  1342. }
  1343. EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
  1344. /**
  1345. * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
  1346. * @dev: device pointer used to lookup OPP table.
  1347. *
  1348. * Return: pointer to notifier head if found, otherwise -ENODEV or
  1349. * -EINVAL based on type of error casted as pointer. value must be checked
  1350. * with IS_ERR to determine valid pointer or error result.
  1351. *
  1352. * Locking: This function must be called under rcu_read_lock(). opp_table is a
  1353. * RCU protected pointer. The reason for the same is that the opp pointer which
  1354. * is returned will remain valid for use with opp_get_{voltage, freq} only while
  1355. * under the locked area. The pointer returned must be used prior to unlocking
  1356. * with rcu_read_unlock() to maintain the integrity of the pointer.
  1357. */
  1358. struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
  1359. {
  1360. struct opp_table *opp_table = _find_opp_table(dev);
  1361. if (IS_ERR(opp_table))
  1362. return ERR_CAST(opp_table); /* matching type */
  1363. return &opp_table->srcu_head;
  1364. }
  1365. EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
  1366. /*
  1367. * Free OPPs either created using static entries present in DT or even the
  1368. * dynamically added entries based on remove_all param.
  1369. */
  1370. void _dev_pm_opp_remove_table(struct device *dev, bool remove_all)
  1371. {
  1372. struct opp_table *opp_table;
  1373. struct dev_pm_opp *opp, *tmp;
  1374. /* Hold our table modification lock here */
  1375. mutex_lock(&opp_table_lock);
  1376. /* Check for existing table for 'dev' */
  1377. opp_table = _find_opp_table(dev);
  1378. if (IS_ERR(opp_table)) {
  1379. int error = PTR_ERR(opp_table);
  1380. if (error != -ENODEV)
  1381. WARN(1, "%s: opp_table: %d\n",
  1382. IS_ERR_OR_NULL(dev) ?
  1383. "Invalid device" : dev_name(dev),
  1384. error);
  1385. goto unlock;
  1386. }
  1387. /* Find if opp_table manages a single device */
  1388. if (list_is_singular(&opp_table->dev_list)) {
  1389. /* Free static OPPs */
  1390. list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
  1391. if (remove_all || !opp->dynamic)
  1392. _opp_remove(opp_table, opp, true);
  1393. }
  1394. } else {
  1395. _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
  1396. }
  1397. unlock:
  1398. mutex_unlock(&opp_table_lock);
  1399. }
  1400. /**
  1401. * dev_pm_opp_remove_table() - Free all OPPs associated with the device
  1402. * @dev: device pointer used to lookup OPP table.
  1403. *
  1404. * Free both OPPs created using static entries present in DT and the
  1405. * dynamically added entries.
  1406. *
  1407. * Locking: The internal opp_table and opp structures are RCU protected.
  1408. * Hence this function indirectly uses RCU updater strategy with mutex locks
  1409. * to keep the integrity of the internal data structures. Callers should ensure
  1410. * that this function is *NOT* called under RCU protection or in contexts where
  1411. * mutex cannot be locked.
  1412. */
  1413. void dev_pm_opp_remove_table(struct device *dev)
  1414. {
  1415. _dev_pm_opp_remove_table(dev, true);
  1416. }
  1417. EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);