msm_performance.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358
  1. /*
  2. * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/init.h>
  14. #include <linux/notifier.h>
  15. #include <linux/cpu.h>
  16. #include <linux/moduleparam.h>
  17. #include <linux/cpumask.h>
  18. #include <linux/cpufreq.h>
  19. #include <linux/slab.h>
  20. #include <linux/sched.h>
  21. #include <linux/tick.h>
  22. #include <trace/events/power.h>
  23. #include <linux/sysfs.h>
  24. #include <linux/module.h>
  25. #include <linux/input.h>
  26. #include <linux/kthread.h>
  27. /* To handle cpufreq min/max request */
  28. struct cpu_status {
  29. unsigned int min;
  30. unsigned int max;
  31. };
  32. static DEFINE_PER_CPU(struct cpu_status, cpu_stats);
  33. struct events {
  34. spinlock_t cpu_hotplug_lock;
  35. bool cpu_hotplug;
  36. bool init_success;
  37. };
  38. static struct events events_group;
  39. static struct task_struct *events_notify_thread;
  40. /**************************sysfs start********************************/
  41. /*
  42. * Userspace sends cpu#:min_freq_value to vote for min_freq_value as the new
  43. * scaling_min. To withdraw its vote it needs to enter cpu#:0
  44. */
  45. static int set_cpu_min_freq(const char *buf, const struct kernel_param *kp)
  46. {
  47. int i, j, ntokens = 0;
  48. unsigned int val, cpu;
  49. const char *cp = buf;
  50. struct cpu_status *i_cpu_stats;
  51. struct cpufreq_policy policy;
  52. cpumask_var_t limit_mask;
  53. int ret;
  54. while ((cp = strpbrk(cp + 1, " :")))
  55. ntokens++;
  56. /* CPU:value pair */
  57. if (!(ntokens % 2))
  58. return -EINVAL;
  59. cp = buf;
  60. cpumask_clear(limit_mask);
  61. for (i = 0; i < ntokens; i += 2) {
  62. if (sscanf(cp, "%u:%u", &cpu, &val) != 2)
  63. return -EINVAL;
  64. if (cpu > (num_present_cpus() - 1))
  65. return -EINVAL;
  66. i_cpu_stats = &per_cpu(cpu_stats, cpu);
  67. i_cpu_stats->min = val;
  68. cpumask_set_cpu(cpu, limit_mask);
  69. cp = strnchr(cp, strlen(cp), ' ');
  70. cp++;
  71. }
  72. /*
  73. * Since on synchronous systems policy is shared amongst multiple
  74. * CPUs only one CPU needs to be updated for the limit to be
  75. * reflected for the entire cluster. We can avoid updating the policy
  76. * of other CPUs in the cluster once it is done for at least one CPU
  77. * in the cluster
  78. */
  79. get_online_cpus();
  80. for_each_cpu(i, limit_mask) {
  81. i_cpu_stats = &per_cpu(cpu_stats, i);
  82. if (cpufreq_get_policy(&policy, i))
  83. continue;
  84. if (cpu_online(i) && (policy.min != i_cpu_stats->min)) {
  85. ret = cpufreq_update_policy(i);
  86. if (ret)
  87. continue;
  88. }
  89. for_each_cpu(j, policy.related_cpus)
  90. cpumask_clear_cpu(j, limit_mask);
  91. }
  92. put_online_cpus();
  93. return 0;
  94. }
  95. static int get_cpu_min_freq(char *buf, const struct kernel_param *kp)
  96. {
  97. int cnt = 0, cpu;
  98. for_each_present_cpu(cpu) {
  99. cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
  100. "%d:%u ", cpu, per_cpu(cpu_stats, cpu).min);
  101. }
  102. cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "\n");
  103. return cnt;
  104. }
  105. static const struct kernel_param_ops param_ops_cpu_min_freq = {
  106. .set = set_cpu_min_freq,
  107. .get = get_cpu_min_freq,
  108. };
  109. module_param_cb(cpu_min_freq, &param_ops_cpu_min_freq, NULL, 0644);
  110. /*
  111. * Userspace sends cpu#:max_freq_value to vote for max_freq_value as the new
  112. * scaling_max. To withdraw its vote it needs to enter cpu#:UINT_MAX
  113. */
  114. static int set_cpu_max_freq(const char *buf, const struct kernel_param *kp)
  115. {
  116. int i, j, ntokens = 0;
  117. unsigned int val, cpu;
  118. const char *cp = buf;
  119. struct cpu_status *i_cpu_stats;
  120. struct cpufreq_policy policy;
  121. cpumask_var_t limit_mask;
  122. int ret;
  123. while ((cp = strpbrk(cp + 1, " :")))
  124. ntokens++;
  125. /* CPU:value pair */
  126. if (!(ntokens % 2))
  127. return -EINVAL;
  128. cp = buf;
  129. cpumask_clear(limit_mask);
  130. for (i = 0; i < ntokens; i += 2) {
  131. if (sscanf(cp, "%u:%u", &cpu, &val) != 2)
  132. return -EINVAL;
  133. if (cpu > (num_present_cpus() - 1))
  134. return -EINVAL;
  135. i_cpu_stats = &per_cpu(cpu_stats, cpu);
  136. i_cpu_stats->max = val;
  137. cpumask_set_cpu(cpu, limit_mask);
  138. cp = strnchr(cp, strlen(cp), ' ');
  139. cp++;
  140. }
  141. get_online_cpus();
  142. for_each_cpu(i, limit_mask) {
  143. i_cpu_stats = &per_cpu(cpu_stats, i);
  144. if (cpufreq_get_policy(&policy, i))
  145. continue;
  146. if (cpu_online(i) && (policy.max != i_cpu_stats->max)) {
  147. ret = cpufreq_update_policy(i);
  148. if (ret)
  149. continue;
  150. }
  151. for_each_cpu(j, policy.related_cpus)
  152. cpumask_clear_cpu(j, limit_mask);
  153. }
  154. put_online_cpus();
  155. return 0;
  156. }
  157. static int get_cpu_max_freq(char *buf, const struct kernel_param *kp)
  158. {
  159. int cnt = 0, cpu;
  160. for_each_present_cpu(cpu) {
  161. cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
  162. "%d:%u ", cpu, per_cpu(cpu_stats, cpu).max);
  163. }
  164. cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "\n");
  165. return cnt;
  166. }
  167. static const struct kernel_param_ops param_ops_cpu_max_freq = {
  168. .set = set_cpu_max_freq,
  169. .get = get_cpu_max_freq,
  170. };
  171. module_param_cb(cpu_max_freq, &param_ops_cpu_max_freq, NULL, 0644);
  172. /* CPU Hotplug */
  173. static struct kobject *events_kobj;
  174. static ssize_t show_cpu_hotplug(struct kobject *kobj,
  175. struct kobj_attribute *attr, char *buf)
  176. {
  177. return snprintf(buf, PAGE_SIZE, "\n");
  178. }
  179. static struct kobj_attribute cpu_hotplug_attr =
  180. __ATTR(cpu_hotplug, 0444, show_cpu_hotplug, NULL);
  181. static struct attribute *events_attrs[] = {
  182. &cpu_hotplug_attr.attr,
  183. NULL,
  184. };
  185. static struct attribute_group events_attr_group = {
  186. .attrs = events_attrs,
  187. };
  188. /*******************************sysfs ends************************************/
  189. static int perf_adjust_notify(struct notifier_block *nb, unsigned long val,
  190. void *data)
  191. {
  192. struct cpufreq_policy *policy = data;
  193. unsigned int cpu = policy->cpu;
  194. struct cpu_status *cpu_st = &per_cpu(cpu_stats, cpu);
  195. unsigned int min = cpu_st->min, max = cpu_st->max;
  196. if (val != CPUFREQ_ADJUST)
  197. return NOTIFY_OK;
  198. pr_debug("msm_perf: CPU%u policy before: %u:%u kHz\n", cpu,
  199. policy->min, policy->max);
  200. pr_debug("msm_perf: CPU%u seting min:max %u:%u kHz\n", cpu, min, max);
  201. cpufreq_verify_within_limits(policy, min, max);
  202. pr_debug("msm_perf: CPU%u policy after: %u:%u kHz\n", cpu,
  203. policy->min, policy->max);
  204. return NOTIFY_OK;
  205. }
  206. static struct notifier_block perf_cpufreq_nb = {
  207. .notifier_call = perf_adjust_notify,
  208. };
  209. static int hotplug_notify(unsigned int cpu)
  210. {
  211. unsigned long flags;
  212. if (events_group.init_success) {
  213. spin_lock_irqsave(&(events_group.cpu_hotplug_lock), flags);
  214. events_group.cpu_hotplug = true;
  215. spin_unlock_irqrestore(&(events_group.cpu_hotplug_lock), flags);
  216. wake_up_process(events_notify_thread);
  217. }
  218. return 0;
  219. }
  220. static int events_notify_userspace(void *data)
  221. {
  222. unsigned long flags;
  223. bool notify_change;
  224. while (1) {
  225. set_current_state(TASK_INTERRUPTIBLE);
  226. spin_lock_irqsave(&(events_group.cpu_hotplug_lock), flags);
  227. if (!events_group.cpu_hotplug) {
  228. spin_unlock_irqrestore(&(events_group.cpu_hotplug_lock),
  229. flags);
  230. schedule();
  231. if (kthread_should_stop())
  232. break;
  233. spin_lock_irqsave(&(events_group.cpu_hotplug_lock),
  234. flags);
  235. }
  236. set_current_state(TASK_RUNNING);
  237. notify_change = events_group.cpu_hotplug;
  238. events_group.cpu_hotplug = false;
  239. spin_unlock_irqrestore(&(events_group.cpu_hotplug_lock), flags);
  240. if (notify_change)
  241. sysfs_notify(events_kobj, NULL, "cpu_hotplug");
  242. }
  243. return 0;
  244. }
  245. static int init_events_group(void)
  246. {
  247. int ret;
  248. struct kobject *module_kobj;
  249. module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
  250. if (!module_kobj) {
  251. pr_err("msm_perf: Couldn't find module kobject\n");
  252. return -ENOENT;
  253. }
  254. events_kobj = kobject_create_and_add("events", module_kobj);
  255. if (!events_kobj) {
  256. pr_err("msm_perf: Failed to add events_kobj\n");
  257. return -ENOMEM;
  258. }
  259. ret = sysfs_create_group(events_kobj, &events_attr_group);
  260. if (ret) {
  261. pr_err("msm_perf: Failed to create sysfs\n");
  262. return ret;
  263. }
  264. spin_lock_init(&(events_group.cpu_hotplug_lock));
  265. events_notify_thread = kthread_run(events_notify_userspace,
  266. NULL, "msm_perf:events_notify");
  267. if (IS_ERR(events_notify_thread))
  268. return PTR_ERR(events_notify_thread);
  269. events_group.init_success = true;
  270. return 0;
  271. }
  272. static int __init msm_performance_init(void)
  273. {
  274. unsigned int cpu;
  275. int rc;
  276. cpufreq_register_notifier(&perf_cpufreq_nb, CPUFREQ_POLICY_NOTIFIER);
  277. for_each_present_cpu(cpu)
  278. per_cpu(cpu_stats, cpu).max = UINT_MAX;
  279. rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE,
  280. "msm_performance_cpu_hotplug",
  281. hotplug_notify,
  282. NULL);
  283. init_events_group();
  284. return 0;
  285. }
  286. late_initcall(msm_performance_init);