123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358 |
- /*
- * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
- #include <linux/init.h>
- #include <linux/notifier.h>
- #include <linux/cpu.h>
- #include <linux/moduleparam.h>
- #include <linux/cpumask.h>
- #include <linux/cpufreq.h>
- #include <linux/slab.h>
- #include <linux/sched.h>
- #include <linux/tick.h>
- #include <trace/events/power.h>
- #include <linux/sysfs.h>
- #include <linux/module.h>
- #include <linux/input.h>
- #include <linux/kthread.h>
- /* To handle cpufreq min/max request */
- struct cpu_status {
- unsigned int min;
- unsigned int max;
- };
- static DEFINE_PER_CPU(struct cpu_status, cpu_stats);
- struct events {
- spinlock_t cpu_hotplug_lock;
- bool cpu_hotplug;
- bool init_success;
- };
- static struct events events_group;
- static struct task_struct *events_notify_thread;
- /**************************sysfs start********************************/
- /*
- * Userspace sends cpu#:min_freq_value to vote for min_freq_value as the new
- * scaling_min. To withdraw its vote it needs to enter cpu#:0
- */
- static int set_cpu_min_freq(const char *buf, const struct kernel_param *kp)
- {
- int i, j, ntokens = 0;
- unsigned int val, cpu;
- const char *cp = buf;
- struct cpu_status *i_cpu_stats;
- struct cpufreq_policy policy;
- cpumask_var_t limit_mask;
- int ret;
- while ((cp = strpbrk(cp + 1, " :")))
- ntokens++;
- /* CPU:value pair */
- if (!(ntokens % 2))
- return -EINVAL;
- cp = buf;
- cpumask_clear(limit_mask);
- for (i = 0; i < ntokens; i += 2) {
- if (sscanf(cp, "%u:%u", &cpu, &val) != 2)
- return -EINVAL;
- if (cpu > (num_present_cpus() - 1))
- return -EINVAL;
- i_cpu_stats = &per_cpu(cpu_stats, cpu);
- i_cpu_stats->min = val;
- cpumask_set_cpu(cpu, limit_mask);
- cp = strnchr(cp, strlen(cp), ' ');
- cp++;
- }
- /*
- * Since on synchronous systems policy is shared amongst multiple
- * CPUs only one CPU needs to be updated for the limit to be
- * reflected for the entire cluster. We can avoid updating the policy
- * of other CPUs in the cluster once it is done for at least one CPU
- * in the cluster
- */
- get_online_cpus();
- for_each_cpu(i, limit_mask) {
- i_cpu_stats = &per_cpu(cpu_stats, i);
- if (cpufreq_get_policy(&policy, i))
- continue;
- if (cpu_online(i) && (policy.min != i_cpu_stats->min)) {
- ret = cpufreq_update_policy(i);
- if (ret)
- continue;
- }
- for_each_cpu(j, policy.related_cpus)
- cpumask_clear_cpu(j, limit_mask);
- }
- put_online_cpus();
- return 0;
- }
- static int get_cpu_min_freq(char *buf, const struct kernel_param *kp)
- {
- int cnt = 0, cpu;
- for_each_present_cpu(cpu) {
- cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
- "%d:%u ", cpu, per_cpu(cpu_stats, cpu).min);
- }
- cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "\n");
- return cnt;
- }
- static const struct kernel_param_ops param_ops_cpu_min_freq = {
- .set = set_cpu_min_freq,
- .get = get_cpu_min_freq,
- };
- module_param_cb(cpu_min_freq, ¶m_ops_cpu_min_freq, NULL, 0644);
- /*
- * Userspace sends cpu#:max_freq_value to vote for max_freq_value as the new
- * scaling_max. To withdraw its vote it needs to enter cpu#:UINT_MAX
- */
- static int set_cpu_max_freq(const char *buf, const struct kernel_param *kp)
- {
- int i, j, ntokens = 0;
- unsigned int val, cpu;
- const char *cp = buf;
- struct cpu_status *i_cpu_stats;
- struct cpufreq_policy policy;
- cpumask_var_t limit_mask;
- int ret;
- while ((cp = strpbrk(cp + 1, " :")))
- ntokens++;
- /* CPU:value pair */
- if (!(ntokens % 2))
- return -EINVAL;
- cp = buf;
- cpumask_clear(limit_mask);
- for (i = 0; i < ntokens; i += 2) {
- if (sscanf(cp, "%u:%u", &cpu, &val) != 2)
- return -EINVAL;
- if (cpu > (num_present_cpus() - 1))
- return -EINVAL;
- i_cpu_stats = &per_cpu(cpu_stats, cpu);
- i_cpu_stats->max = val;
- cpumask_set_cpu(cpu, limit_mask);
- cp = strnchr(cp, strlen(cp), ' ');
- cp++;
- }
- get_online_cpus();
- for_each_cpu(i, limit_mask) {
- i_cpu_stats = &per_cpu(cpu_stats, i);
- if (cpufreq_get_policy(&policy, i))
- continue;
- if (cpu_online(i) && (policy.max != i_cpu_stats->max)) {
- ret = cpufreq_update_policy(i);
- if (ret)
- continue;
- }
- for_each_cpu(j, policy.related_cpus)
- cpumask_clear_cpu(j, limit_mask);
- }
- put_online_cpus();
- return 0;
- }
- static int get_cpu_max_freq(char *buf, const struct kernel_param *kp)
- {
- int cnt = 0, cpu;
- for_each_present_cpu(cpu) {
- cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
- "%d:%u ", cpu, per_cpu(cpu_stats, cpu).max);
- }
- cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "\n");
- return cnt;
- }
- static const struct kernel_param_ops param_ops_cpu_max_freq = {
- .set = set_cpu_max_freq,
- .get = get_cpu_max_freq,
- };
- module_param_cb(cpu_max_freq, ¶m_ops_cpu_max_freq, NULL, 0644);
- /* CPU Hotplug */
- static struct kobject *events_kobj;
- static ssize_t show_cpu_hotplug(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
- {
- return snprintf(buf, PAGE_SIZE, "\n");
- }
- static struct kobj_attribute cpu_hotplug_attr =
- __ATTR(cpu_hotplug, 0444, show_cpu_hotplug, NULL);
- static struct attribute *events_attrs[] = {
- &cpu_hotplug_attr.attr,
- NULL,
- };
- static struct attribute_group events_attr_group = {
- .attrs = events_attrs,
- };
- /*******************************sysfs ends************************************/
- static int perf_adjust_notify(struct notifier_block *nb, unsigned long val,
- void *data)
- {
- struct cpufreq_policy *policy = data;
- unsigned int cpu = policy->cpu;
- struct cpu_status *cpu_st = &per_cpu(cpu_stats, cpu);
- unsigned int min = cpu_st->min, max = cpu_st->max;
- if (val != CPUFREQ_ADJUST)
- return NOTIFY_OK;
- pr_debug("msm_perf: CPU%u policy before: %u:%u kHz\n", cpu,
- policy->min, policy->max);
- pr_debug("msm_perf: CPU%u seting min:max %u:%u kHz\n", cpu, min, max);
- cpufreq_verify_within_limits(policy, min, max);
- pr_debug("msm_perf: CPU%u policy after: %u:%u kHz\n", cpu,
- policy->min, policy->max);
- return NOTIFY_OK;
- }
- static struct notifier_block perf_cpufreq_nb = {
- .notifier_call = perf_adjust_notify,
- };
- static int hotplug_notify(unsigned int cpu)
- {
- unsigned long flags;
- if (events_group.init_success) {
- spin_lock_irqsave(&(events_group.cpu_hotplug_lock), flags);
- events_group.cpu_hotplug = true;
- spin_unlock_irqrestore(&(events_group.cpu_hotplug_lock), flags);
- wake_up_process(events_notify_thread);
- }
- return 0;
- }
- static int events_notify_userspace(void *data)
- {
- unsigned long flags;
- bool notify_change;
- while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
- spin_lock_irqsave(&(events_group.cpu_hotplug_lock), flags);
- if (!events_group.cpu_hotplug) {
- spin_unlock_irqrestore(&(events_group.cpu_hotplug_lock),
- flags);
- schedule();
- if (kthread_should_stop())
- break;
- spin_lock_irqsave(&(events_group.cpu_hotplug_lock),
- flags);
- }
- set_current_state(TASK_RUNNING);
- notify_change = events_group.cpu_hotplug;
- events_group.cpu_hotplug = false;
- spin_unlock_irqrestore(&(events_group.cpu_hotplug_lock), flags);
- if (notify_change)
- sysfs_notify(events_kobj, NULL, "cpu_hotplug");
- }
- return 0;
- }
- static int init_events_group(void)
- {
- int ret;
- struct kobject *module_kobj;
- module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
- if (!module_kobj) {
- pr_err("msm_perf: Couldn't find module kobject\n");
- return -ENOENT;
- }
- events_kobj = kobject_create_and_add("events", module_kobj);
- if (!events_kobj) {
- pr_err("msm_perf: Failed to add events_kobj\n");
- return -ENOMEM;
- }
- ret = sysfs_create_group(events_kobj, &events_attr_group);
- if (ret) {
- pr_err("msm_perf: Failed to create sysfs\n");
- return ret;
- }
- spin_lock_init(&(events_group.cpu_hotplug_lock));
- events_notify_thread = kthread_run(events_notify_userspace,
- NULL, "msm_perf:events_notify");
- if (IS_ERR(events_notify_thread))
- return PTR_ERR(events_notify_thread);
- events_group.init_success = true;
- return 0;
- }
- static int __init msm_performance_init(void)
- {
- unsigned int cpu;
- int rc;
- cpufreq_register_notifier(&perf_cpufreq_nb, CPUFREQ_POLICY_NOTIFIER);
- for_each_present_cpu(cpu)
- per_cpu(cpu_stats, cpu).max = UINT_MAX;
- rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE,
- "msm_performance_cpu_hotplug",
- hotplug_notify,
- NULL);
- init_events_group();
- return 0;
- }
- late_initcall(msm_performance_init);
|