View | Details | Raw Unified | Return to bug 47301 | Differences between
and this patch

Collapse All | Expand All

(-)a/drivers/cpufreq/powernow-k8.c (-36 / +53 lines)
Lines 35-41 Link Here
35
#include <linux/slab.h>
35
#include <linux/slab.h>
36
#include <linux/string.h>
36
#include <linux/string.h>
37
#include <linux/cpumask.h>
37
#include <linux/cpumask.h>
38
#include <linux/sched.h>	/* for current / set_cpus_allowed() */
39
#include <linux/io.h>
38
#include <linux/io.h>
40
#include <linux/delay.h>
39
#include <linux/delay.h>
41
40
Lines 1139-1184 static int transition_frequency_pstate(struct powernow_k8_data *data, Link Here
1139
	return res;
1138
	return res;
1140
}
1139
}
1141
1140
1142
/* Driver entry point to switch to the target frequency */
1141
struct powernowk8_target_work {
1143
static int powernowk8_target(struct cpufreq_policy *pol,
1142
	struct work_struct		work;
1144
		unsigned targfreq, unsigned relation)
1143
	struct cpufreq_policy		*pol;
1144
	unsigned			targfreq;
1145
	unsigned			relation;
1146
	int				ret;
1147
};
1148
1149
static void powernowk8_target_on_cpu(struct work_struct *work)
1145
{
1150
{
1146
	cpumask_var_t oldmask;
1151
	struct powernowk8_target_work *tw =
1152
		container_of(work, struct powernowk8_target_work, work);
1153
	struct cpufreq_policy *pol = tw->pol;
1147
	struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
1154
	struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
1148
	u32 checkfid;
1155
	u32 checkfid;
1149
	u32 checkvid;
1156
	u32 checkvid;
1150
	unsigned int newstate;
1157
	unsigned int newstate;
1151
	int ret = -EIO;
1152
1158
1159
	tw->ret = -EINVAL;
1153
	if (!data)
1160
	if (!data)
1154
		return -EINVAL;
1161
		return;
1162
1163
	tw->ret = -EIO;
1155
1164
1156
	checkfid = data->currfid;
1165
	checkfid = data->currfid;
1157
	checkvid = data->currvid;
1166
	checkvid = data->currvid;
1158
1167
1159
	/* only run on specific CPU from here on. */
1160
	/* This is poor form: use a workqueue or smp_call_function_single */
1161
	if (!alloc_cpumask_var(&oldmask, GFP_KERNEL))
1162
		return -ENOMEM;
1163
1164
	cpumask_copy(oldmask, tsk_cpus_allowed(current));
1165
	set_cpus_allowed_ptr(current, cpumask_of(pol->cpu));
1166
1167
	if (smp_processor_id() != pol->cpu) {
1168
		printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
1169
		goto err_out;
1170
	}
1171
1172
	if (pending_bit_stuck()) {
1168
	if (pending_bit_stuck()) {
1173
		printk(KERN_ERR PFX "failing targ, change pending bit set\n");
1169
		printk(KERN_ERR PFX "failing targ, change pending bit set\n");
1174
		goto err_out;
1170
		return;
1175
	}
1171
	}
1176
1172
1177
	pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n",
1173
	pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n",
1178
		pol->cpu, targfreq, pol->min, pol->max, relation);
1174
		pol->cpu, tw->targfreq, pol->min, pol->max, tw->relation);
1179
1175
1180
	if (query_current_values_with_pending_wait(data))
1176
	if (query_current_values_with_pending_wait(data))
1181
		goto err_out;
1177
		return;
1182
1178
1183
	if (cpu_family != CPU_HW_PSTATE) {
1179
	if (cpu_family != CPU_HW_PSTATE) {
1184
		pr_debug("targ: curr fid 0x%x, vid 0x%x\n",
1180
		pr_debug("targ: curr fid 0x%x, vid 0x%x\n",
Lines 1195-1217 static int powernowk8_target(struct cpufreq_policy *pol, Link Here
1195
	}
1191
	}
1196
1192
1197
	if (cpufreq_frequency_table_target(pol, data->powernow_table,
1193
	if (cpufreq_frequency_table_target(pol, data->powernow_table,
1198
				targfreq, relation, &newstate))
1194
				tw->targfreq, tw->relation, &newstate))
1199
		goto err_out;
1195
		return;
1200
1196
1201
	mutex_lock(&fidvid_mutex);
1197
	mutex_lock(&fidvid_mutex);
1202
1198
1203
	powernow_k8_acpi_pst_values(data, newstate);
1199
	powernow_k8_acpi_pst_values(data, newstate);
1204
1200
1205
	if (cpu_family == CPU_HW_PSTATE)
1201
	if (cpu_family == CPU_HW_PSTATE)
1206
		ret = transition_frequency_pstate(data,
1202
		tw->ret = transition_frequency_pstate(data,
1207
			data->powernow_table[newstate].index);
1203
				data->powernow_table[newstate].index);
1208
	else
1204
	else
1209
		ret = transition_frequency_fidvid(data, newstate);
1205
		tw->ret = transition_frequency_fidvid(data, newstate);
1210
	if (ret) {
1206
	if (tw->ret) {
1211
		printk(KERN_ERR PFX "transition frequency failed\n");
1207
		printk(KERN_ERR PFX "transition frequency failed\n");
1212
		ret = 1;
1208
		tw->ret = 1;
1213
		mutex_unlock(&fidvid_mutex);
1209
		mutex_unlock(&fidvid_mutex);
1214
		goto err_out;
1210
		return;
1215
	}
1211
	}
1216
	mutex_unlock(&fidvid_mutex);
1212
	mutex_unlock(&fidvid_mutex);
1217
1213
Lines 1220-1231 static int powernowk8_target(struct cpufreq_policy *pol, Link Here
1220
				data->powernow_table[newstate].index);
1216
				data->powernow_table[newstate].index);
1221
	else
1217
	else
1222
		pol->cur = find_khz_freq_from_fid(data->currfid);
1218
		pol->cur = find_khz_freq_from_fid(data->currfid);
1223
	ret = 0;
1224
1219
1225
err_out:
1220
	tw->ret = 0;
1226
	set_cpus_allowed_ptr(current, oldmask);
1221
}
1227
	free_cpumask_var(oldmask);
1222
1228
	return ret;
1223
/* Driver entry point to switch to the target frequency */
1224
static int powernowk8_target(struct cpufreq_policy *pol,
1225
		unsigned targfreq, unsigned relation)
1226
{
1227
	struct powernowk8_target_work tw;
1228
1229
	/*
1230
	 * Must run on @pol->cpu.  Bounce to workqueue if necessary.
1231
	 * cpufreq core is responsible for ensuring the cpu stays online.
1232
	 */
1233
	INIT_WORK_ONSTACK(&tw.work, powernowk8_target_on_cpu);
1234
	tw.pol = pol;
1235
	tw.targfreq = targfreq;
1236
	tw.relation = relation;
1237
1238
	if (smp_processor_id() == pol->cpu) {
1239
		powernowk8_target_on_cpu(&tw.work);
1240
	} else {
1241
		schedule_work_on(pol->cpu, &tw.work);
1242
		flush_work(&tw.work);
1243
	}
1244
1245
	return tw.ret;
1229
}
1246
}
1230
1247
1231
/* Driver entry point to verify the policy and range of frequencies */
1248
/* Driver entry point to verify the policy and range of frequencies */

Return to bug 47301