cpu.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272
  1. /*
  2. * Generic OPP helper interface for CPU device
  3. *
  4. * Copyright (C) 2009-2014 Texas Instruments Incorporated.
  5. * Nishanth Menon
  6. * Romit Dasgupta
  7. * Kevin Hilman
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14. #include <linux/cpu.h>
  15. #include <linux/cpufreq.h>
  16. #include <linux/err.h>
  17. #include <linux/errno.h>
  18. #include <linux/export.h>
  19. #include <linux/slab.h>
  20. #include "opp.h"
  21. #ifdef CONFIG_CPU_FREQ
  22. /**
  23. * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
  24. * @dev: device for which we do this operation
  25. * @table: Cpufreq table returned back to caller
  26. *
  27. * Generate a cpufreq table for a provided device- this assumes that the
  28. * opp table is already initialized and ready for usage.
  29. *
  30. * This function allocates required memory for the cpufreq table. It is
  31. * expected that the caller does the required maintenance such as freeing
  32. * the table as required.
  33. *
  34. * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
  35. * if no memory available for the operation (table is not populated), returns 0
  36. * if successful and table is populated.
  37. *
  38. * WARNING: It is important for the callers to ensure refreshing their copy of
  39. * the table if any of the mentioned functions have been invoked in the interim.
  40. *
  41. * Locking: The internal opp_table and opp structures are RCU protected.
  42. * Since we just use the regular accessor functions to access the internal data
  43. * structures, we use RCU read lock inside this function. As a result, users of
  44. * this function DONOT need to use explicit locks for invoking.
  45. */
  46. int dev_pm_opp_init_cpufreq_table(struct device *dev,
  47. struct cpufreq_frequency_table **table)
  48. {
  49. struct dev_pm_opp *opp;
  50. struct cpufreq_frequency_table *freq_table = NULL;
  51. int i, max_opps, ret = 0;
  52. unsigned long rate;
  53. rcu_read_lock();
  54. max_opps = dev_pm_opp_get_opp_count(dev);
  55. if (max_opps <= 0) {
  56. ret = max_opps ? max_opps : -ENODATA;
  57. goto out;
  58. }
  59. freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC);
  60. if (!freq_table) {
  61. ret = -ENOMEM;
  62. goto out;
  63. }
  64. for (i = 0, rate = 0; i < max_opps; i++, rate++) {
  65. /* find next rate */
  66. opp = dev_pm_opp_find_freq_ceil(dev, &rate);
  67. if (IS_ERR(opp)) {
  68. ret = PTR_ERR(opp);
  69. goto out;
  70. }
  71. freq_table[i].driver_data = i;
  72. freq_table[i].frequency = rate / 1000;
  73. /* Is Boost/turbo opp ? */
  74. if (dev_pm_opp_is_turbo(opp))
  75. freq_table[i].flags = CPUFREQ_BOOST_FREQ;
  76. }
  77. freq_table[i].driver_data = i;
  78. freq_table[i].frequency = CPUFREQ_TABLE_END;
  79. *table = &freq_table[0];
  80. out:
  81. rcu_read_unlock();
  82. if (ret)
  83. kfree(freq_table);
  84. return ret;
  85. }
  86. EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
  87. /**
  88. * dev_pm_opp_free_cpufreq_table() - free the cpufreq table
  89. * @dev: device for which we do this operation
  90. * @table: table to free
  91. *
  92. * Free up the table allocated by dev_pm_opp_init_cpufreq_table
  93. */
  94. void dev_pm_opp_free_cpufreq_table(struct device *dev,
  95. struct cpufreq_frequency_table **table)
  96. {
  97. if (!table)
  98. return;
  99. kfree(*table);
  100. *table = NULL;
  101. }
  102. EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
  103. #endif /* CONFIG_CPU_FREQ */
  104. void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of)
  105. {
  106. struct device *cpu_dev;
  107. int cpu;
  108. WARN_ON(cpumask_empty(cpumask));
  109. for_each_cpu(cpu, cpumask) {
  110. cpu_dev = get_cpu_device(cpu);
  111. if (!cpu_dev) {
  112. pr_err("%s: failed to get cpu%d device\n", __func__,
  113. cpu);
  114. continue;
  115. }
  116. if (of)
  117. dev_pm_opp_of_remove_table(cpu_dev);
  118. else
  119. dev_pm_opp_remove_table(cpu_dev);
  120. }
  121. }
  122. /**
  123. * dev_pm_opp_cpumask_remove_table() - Removes OPP table for @cpumask
  124. * @cpumask: cpumask for which OPP table needs to be removed
  125. *
  126. * This removes the OPP tables for CPUs present in the @cpumask.
  127. * This should be used to remove all the OPPs entries associated with
  128. * the cpus in @cpumask.
  129. *
  130. * Locking: The internal opp_table and opp structures are RCU protected.
  131. * Hence this function internally uses RCU updater strategy with mutex locks
  132. * to keep the integrity of the internal data structures. Callers should ensure
  133. * that this function is *NOT* called under RCU protection or in contexts where
  134. * mutex cannot be locked.
  135. */
  136. void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask)
  137. {
  138. _dev_pm_opp_cpumask_remove_table(cpumask, false);
  139. }
  140. EXPORT_SYMBOL_GPL(dev_pm_opp_cpumask_remove_table);
  141. /**
  142. * dev_pm_opp_set_sharing_cpus() - Mark OPP table as shared by few CPUs
  143. * @cpu_dev: CPU device for which we do this operation
  144. * @cpumask: cpumask of the CPUs which share the OPP table with @cpu_dev
  145. *
  146. * This marks OPP table of the @cpu_dev as shared by the CPUs present in
  147. * @cpumask.
  148. *
  149. * Returns -ENODEV if OPP table isn't already present.
  150. *
  151. * Locking: The internal opp_table and opp structures are RCU protected.
  152. * Hence this function internally uses RCU updater strategy with mutex locks
  153. * to keep the integrity of the internal data structures. Callers should ensure
  154. * that this function is *NOT* called under RCU protection or in contexts where
  155. * mutex cannot be locked.
  156. */
  157. int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev,
  158. const struct cpumask *cpumask)
  159. {
  160. struct opp_device *opp_dev;
  161. struct opp_table *opp_table;
  162. struct device *dev;
  163. int cpu, ret = 0;
  164. mutex_lock(&opp_table_lock);
  165. opp_table = _find_opp_table(cpu_dev);
  166. if (IS_ERR(opp_table)) {
  167. ret = PTR_ERR(opp_table);
  168. goto unlock;
  169. }
  170. for_each_cpu(cpu, cpumask) {
  171. if (cpu == cpu_dev->id)
  172. continue;
  173. dev = get_cpu_device(cpu);
  174. if (!dev) {
  175. dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
  176. __func__, cpu);
  177. continue;
  178. }
  179. opp_dev = _add_opp_dev(dev, opp_table);
  180. if (!opp_dev) {
  181. dev_err(dev, "%s: failed to add opp-dev for cpu%d device\n",
  182. __func__, cpu);
  183. continue;
  184. }
  185. /* Mark opp-table as multiple CPUs are sharing it now */
  186. opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
  187. }
  188. unlock:
  189. mutex_unlock(&opp_table_lock);
  190. return ret;
  191. }
  192. EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);
  193. /**
  194. * dev_pm_opp_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with @cpu_dev
  195. * @cpu_dev: CPU device for which we do this operation
  196. * @cpumask: cpumask to update with information of sharing CPUs
  197. *
  198. * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
  199. *
  200. * Returns -ENODEV if OPP table isn't already present and -EINVAL if the OPP
  201. * table's status is access-unknown.
  202. *
  203. * Locking: The internal opp_table and opp structures are RCU protected.
  204. * Hence this function internally uses RCU updater strategy with mutex locks
  205. * to keep the integrity of the internal data structures. Callers should ensure
  206. * that this function is *NOT* called under RCU protection or in contexts where
  207. * mutex cannot be locked.
  208. */
  209. int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
  210. {
  211. struct opp_device *opp_dev;
  212. struct opp_table *opp_table;
  213. int ret = 0;
  214. mutex_lock(&opp_table_lock);
  215. opp_table = _find_opp_table(cpu_dev);
  216. if (IS_ERR(opp_table)) {
  217. ret = PTR_ERR(opp_table);
  218. goto unlock;
  219. }
  220. if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) {
  221. ret = -EINVAL;
  222. goto unlock;
  223. }
  224. cpumask_clear(cpumask);
  225. if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) {
  226. list_for_each_entry(opp_dev, &opp_table->dev_list, node)
  227. cpumask_set_cpu(opp_dev->dev->id, cpumask);
  228. } else {
  229. cpumask_set_cpu(cpu_dev->id, cpumask);
  230. }
  231. unlock:
  232. mutex_unlock(&opp_table_lock);
  233. return ret;
  234. }
  235. EXPORT_SYMBOL_GPL(dev_pm_opp_get_sharing_cpus);