summaryrefslogtreecommitdiff
path: root/target/linux/xburst/patches-3.2/0010-cpufreq_stats-Support-runtime-changes-to-frequency-t.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/xburst/patches-3.2/0010-cpufreq_stats-Support-runtime-changes-to-frequency-t.patch')
-rw-r--r--target/linux/xburst/patches-3.2/0010-cpufreq_stats-Support-runtime-changes-to-frequency-t.patch306
1 files changed, 306 insertions, 0 deletions
diff --git a/target/linux/xburst/patches-3.2/0010-cpufreq_stats-Support-runtime-changes-to-frequency-t.patch b/target/linux/xburst/patches-3.2/0010-cpufreq_stats-Support-runtime-changes-to-frequency-t.patch
new file mode 100644
index 0000000000..d65a362dc5
--- /dev/null
+++ b/target/linux/xburst/patches-3.2/0010-cpufreq_stats-Support-runtime-changes-to-frequency-t.patch
@@ -0,0 +1,306 @@
+From ca40c7542f0cd0e0dfa074bd4ccefc04b8561427 Mon Sep 17 00:00:00 2001
+From: Maarten ter Huurne <maarten@treewalker.org>
+Date: Tue, 2 Aug 2011 10:26:09 +0200
+Subject: [PATCH 10/21] cpufreq_stats: Support runtime changes to frequency
+ table.
+
+---
+ drivers/cpufreq/cpufreq_stats.c | 161 ++++++++++++++++++++-------------------
+ 1 files changed, 83 insertions(+), 78 deletions(-)
+
+diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
+index c5072a9..95f6eb9 100644
+--- a/drivers/cpufreq/cpufreq_stats.c
++++ b/drivers/cpufreq/cpufreq_stats.c
+@@ -21,6 +21,7 @@
+ #include <linux/kobject.h>
+ #include <linux/spinlock.h>
+ #include <linux/notifier.h>
++#include <linux/string.h>
+ #include <asm/cputime.h>
+
+ static spinlock_t cpufreq_stats_lock;
+@@ -37,7 +38,7 @@ struct cpufreq_stats {
+ unsigned long long last_time;
+ unsigned int max_state;
+ unsigned int state_num;
+- unsigned int last_index;
++ int last_index;
+ cputime64_t *time_in_state;
+ unsigned int *freq_table;
+ #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
+@@ -60,7 +61,7 @@ static int cpufreq_stats_update(unsigned int cpu)
+ cur_time = get_jiffies_64();
+ spin_lock(&cpufreq_stats_lock);
+ stat = per_cpu(cpufreq_stats_table, cpu);
+- if (stat->time_in_state)
++ if (stat->time_in_state && stat->last_index != -1)
+ stat->time_in_state[stat->last_index] =
+ cputime64_add(stat->time_in_state[stat->last_index],
+ cputime_sub(cur_time, stat->last_time));
+@@ -83,7 +84,7 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
+ ssize_t len = 0;
+ int i;
+ struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
+- if (!stat)
++ if (!stat || !stat->time_in_state)
+ return 0;
+ cpufreq_stats_update(stat->cpu);
+ for (i = 0; i < stat->state_num; i++) {
+@@ -101,7 +102,7 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
+ int i, j;
+
+ struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
+- if (!stat)
++ if (!stat || !stat->trans_table)
+ return 0;
+ cpufreq_stats_update(stat->cpu);
+ len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
+@@ -160,63 +161,35 @@ static struct attribute_group stats_attr_group = {
+ static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
+ {
+ int index;
+- for (index = 0; index < stat->max_state; index++)
+- if (stat->freq_table[index] == freq)
+- return index;
++ if (stat->freq_table)
++ for (index = 0; index < stat->max_state; index++)
++ if (stat->freq_table[index] == freq)
++ return index;
+ return -1;
+ }
+
+-/* should be called late in the CPU removal sequence so that the stats
+- * memory is still available in case someone tries to use it.
+- */
+ static void cpufreq_stats_free_table(unsigned int cpu)
+ {
+ struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
++ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
++ if (policy && policy->cpu == cpu)
++ sysfs_remove_group(&policy->kobj, &stats_attr_group);
+ if (stat) {
+ kfree(stat->time_in_state);
+ kfree(stat);
+ }
+ per_cpu(cpufreq_stats_table, cpu) = NULL;
+-}
+-
+-/* must be called early in the CPU removal sequence (before
+- * cpufreq_remove_dev) so that policy is still valid.
+- */
+-static void cpufreq_stats_free_sysfs(unsigned int cpu)
+-{
+- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+- if (policy && policy->cpu == cpu)
+- sysfs_remove_group(&policy->kobj, &stats_attr_group);
+ if (policy)
+ cpufreq_cpu_put(policy);
+ }
+
+-static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
++static int cpufreq_stats_update_table(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table)
+ {
+- unsigned int i, j, count = 0, ret = 0;
+- struct cpufreq_stats *stat;
+- struct cpufreq_policy *data;
++ unsigned int i, j, count = 0;
+ unsigned int alloc_size;
+ unsigned int cpu = policy->cpu;
+- if (per_cpu(cpufreq_stats_table, cpu))
+- return -EBUSY;
+- stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL);
+- if ((stat) == NULL)
+- return -ENOMEM;
+-
+- data = cpufreq_cpu_get(cpu);
+- if (data == NULL) {
+- ret = -EINVAL;
+- goto error_get_fail;
+- }
+-
+- ret = sysfs_create_group(&data->kobj, &stats_attr_group);
+- if (ret)
+- goto error_out;
+-
+- stat->cpu = cpu;
+- per_cpu(cpufreq_stats_table, cpu) = stat;
++ struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
+
+ for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ unsigned int freq = table[i].frequency;
+@@ -225,40 +198,73 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
+ count++;
+ }
+
++ if (stat->max_state != count) {
++ stat->max_state = count;
++ kfree(stat->time_in_state);
++ stat->time_in_state = NULL;
++ }
+ alloc_size = count * sizeof(int) + count * sizeof(cputime64_t);
+-
+ #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
+ alloc_size += count * count * sizeof(int);
+ #endif
+- stat->max_state = count;
+- stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
+- if (!stat->time_in_state) {
+- ret = -ENOMEM;
+- goto error_out;
+- }
+- stat->freq_table = (unsigned int *)(stat->time_in_state + count);
+-
++ if (stat->time_in_state) {
++ memset(stat->time_in_state, 0, alloc_size);
++ } else {
++ stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
++ if (!stat->time_in_state)
++ return -ENOMEM;
++ stat->freq_table = (unsigned int *)(
++ stat->time_in_state + count);
+ #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
+- stat->trans_table = stat->freq_table + count;
++ stat->trans_table = stat->freq_table + count;
+ #endif
++ }
++
+ j = 0;
+- for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+- unsigned int freq = table[i].frequency;
+- if (freq == CPUFREQ_ENTRY_INVALID)
+- continue;
+- if (freq_table_get_index(stat, freq) == -1)
+- stat->freq_table[j++] = freq;
++ if (stat->freq_table) {
++ for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
++ unsigned int freq = table[i].frequency;
++ if (freq == CPUFREQ_ENTRY_INVALID)
++ continue;
++ if (freq_table_get_index(stat, freq) == -1)
++ stat->freq_table[j++] = freq;
++ }
+ }
+ stat->state_num = j;
+ spin_lock(&cpufreq_stats_lock);
+ stat->last_time = get_jiffies_64();
+ stat->last_index = freq_table_get_index(stat, policy->cur);
+ spin_unlock(&cpufreq_stats_lock);
++ return 0;
++}
++
++static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
++ struct cpufreq_frequency_table *table)
++{
++ unsigned int ret = 0;
++ struct cpufreq_stats *stat;
++ struct cpufreq_policy *data;
++ unsigned int cpu = policy->cpu;
++
++ stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL);
++ if ((stat) == NULL)
++ return -ENOMEM;
++
++ data = cpufreq_cpu_get(cpu);
++ if (data == NULL) {
++ ret = -EINVAL;
++ goto error_out;
++ }
++ ret = sysfs_create_group(&data->kobj, &stats_attr_group);
+ cpufreq_cpu_put(data);
++ if (ret)
++ goto error_out;
++
++ stat->cpu = cpu;
++ per_cpu(cpufreq_stats_table, cpu) = stat;
++
+ return 0;
+ error_out:
+- cpufreq_cpu_put(data);
+-error_get_fail:
+ kfree(stat);
+ per_cpu(cpufreq_stats_table, cpu) = NULL;
+ return ret;
+@@ -276,10 +282,12 @@ static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
+ table = cpufreq_frequency_get_table(cpu);
+ if (!table)
+ return 0;
+- ret = cpufreq_stats_create_table(policy, table);
+- if (ret)
+- return ret;
+- return 0;
++ if (!per_cpu(cpufreq_stats_table, cpu)) {
++ ret = cpufreq_stats_create_table(policy, table);
++ if (ret)
++ return ret;
++ }
++ return cpufreq_stats_update_table(policy, table);
+ }
+
+ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
+@@ -299,21 +307,23 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
+ old_index = stat->last_index;
+ new_index = freq_table_get_index(stat, freq->new);
+
+- /* We can't do stat->time_in_state[-1]= .. */
+- if (old_index == -1 || new_index == -1)
+- return 0;
+-
+ cpufreq_stats_update(freq->cpu);
+-
+ if (old_index == new_index)
+ return 0;
+
++ if (new_index == -1)
++ return 0;
++
+ spin_lock(&cpufreq_stats_lock);
+ stat->last_index = new_index;
++ if (old_index != -1) {
+ #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
+- stat->trans_table[old_index * stat->max_state + new_index]++;
++ if (stat->trans_table)
++ stat->trans_table[old_index * stat->max_state +
++ new_index]++;
+ #endif
+- stat->total_trans++;
++ stat->total_trans++;
++ }
+ spin_unlock(&cpufreq_stats_lock);
+ return 0;
+ }
+@@ -329,9 +339,6 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
+ case CPU_ONLINE_FROZEN:
+ cpufreq_update_policy(cpu);
+ break;
+- case CPU_DOWN_PREPARE:
+- cpufreq_stats_free_sysfs(cpu);
+- break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ cpufreq_stats_free_table(cpu);
+@@ -340,10 +347,9 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
+ return NOTIFY_OK;
+ }
+
+-/* priority=1 so this will get called before cpufreq_remove_dev */
+-static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
++static struct notifier_block cpufreq_stat_cpu_notifier __refdata =
++{
+ .notifier_call = cpufreq_stat_cpu_callback,
+- .priority = 1,
+ };
+
+ static struct notifier_block notifier_policy_block = {
+@@ -390,7 +396,6 @@ static void __exit cpufreq_stats_exit(void)
+ unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
+ for_each_online_cpu(cpu) {
+ cpufreq_stats_free_table(cpu);
+- cpufreq_stats_free_sysfs(cpu);
+ }
+ }
+
+--
+1.7.5.4
+