• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/idle/
1/*
2 * intel_idle.c - native hardware idle loop for modern Intel processors
3 *
4 * Copyright (c) 2010, Intel Corporation.
5 * Len Brown <len.brown@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21/*
22 * intel_idle is a cpuidle driver that loads on specific Intel processors
23 * in lieu of the legacy ACPI processor_idle driver.  The intent is to
24 * make Linux more efficient on these processors, as intel_idle knows
25 * more than ACPI, as well as make Linux more immune to ACPI BIOS bugs.
26 */
27
28/*
29 * Design Assumptions
30 *
31 * All CPUs have same idle states as boot CPU
32 *
33 * Chipset BM_STS (bus master status) bit is a NOP
34 *	for preventing entry into deep C-stats
35 */
36
37
38/* un-comment DEBUG to enable pr_debug() statements */
39#define DEBUG
40
41#include <linux/kernel.h>
42#include <linux/cpuidle.h>
43#include <linux/clockchips.h>
44#include <linux/hrtimer.h>	/* ktime_get_real() */
45#include <trace/events/power.h>
46#include <linux/sched.h>
47
48#define INTEL_IDLE_VERSION "0.4"
49#define PREFIX "intel_idle: "
50
51#define MWAIT_SUBSTATE_MASK	(0xf)
52#define MWAIT_CSTATE_MASK	(0xf)
53#define MWAIT_SUBSTATE_SIZE	(4)
54#define MWAIT_MAX_NUM_CSTATES	8
55#define CPUID_MWAIT_LEAF (5)
56#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
57#define CPUID5_ECX_INTERRUPT_BREAK	(0x2)
58
59static struct cpuidle_driver intel_idle_driver = {
60	.name = "intel_idle",
61	.owner = THIS_MODULE,
62};
63/* intel_idle.max_cstate=0 disables driver */
64static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1;
65
66static unsigned int mwait_substates;
67
68/* Reliable LAPIC Timer States, bit 1 for C1 etc.  */
69static unsigned int lapic_timer_reliable_states;
70
71static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
72static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
73
74static struct cpuidle_state *cpuidle_state_table;
75
76/*
77 * States are indexed by the cstate number,
78 * which is also the index into the MWAIT hint array.
79 * Thus C0 is a dummy.
80 */
81static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
82	{ /* MWAIT C0 */ },
83	{ /* MWAIT C1 */
84		.name = "NHM-C1",
85		.desc = "MWAIT 0x00",
86		.driver_data = (void *) 0x00,
87		.flags = CPUIDLE_FLAG_TIME_VALID,
88		.exit_latency = 3,
89		.power_usage = 1000,
90		.target_residency = 6,
91		.enter = &intel_idle },
92	{ /* MWAIT C2 */
93		.name = "NHM-C3",
94		.desc = "MWAIT 0x10",
95		.driver_data = (void *) 0x10,
96		.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
97		.exit_latency = 20,
98		.power_usage = 500,
99		.target_residency = 80,
100		.enter = &intel_idle },
101	{ /* MWAIT C3 */
102		.name = "NHM-C6",
103		.desc = "MWAIT 0x20",
104		.driver_data = (void *) 0x20,
105		.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
106		.exit_latency = 200,
107		.power_usage = 350,
108		.target_residency = 800,
109		.enter = &intel_idle },
110};
111
112static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
113	{ /* MWAIT C0 */ },
114	{ /* MWAIT C1 */
115		.name = "ATM-C1",
116		.desc = "MWAIT 0x00",
117		.driver_data = (void *) 0x00,
118		.flags = CPUIDLE_FLAG_TIME_VALID,
119		.exit_latency = 1,
120		.power_usage = 1000,
121		.target_residency = 4,
122		.enter = &intel_idle },
123	{ /* MWAIT C2 */
124		.name = "ATM-C2",
125		.desc = "MWAIT 0x10",
126		.driver_data = (void *) 0x10,
127		.flags = CPUIDLE_FLAG_TIME_VALID,
128		.exit_latency = 20,
129		.power_usage = 500,
130		.target_residency = 80,
131		.enter = &intel_idle },
132	{ /* MWAIT C3 */ },
133	{ /* MWAIT C4 */
134		.name = "ATM-C4",
135		.desc = "MWAIT 0x30",
136		.driver_data = (void *) 0x30,
137		.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
138		.exit_latency = 100,
139		.power_usage = 250,
140		.target_residency = 400,
141		.enter = &intel_idle },
142	{ /* MWAIT C5 */ },
143	{ /* MWAIT C6 */
144		.name = "ATM-C6",
145		.desc = "MWAIT 0x52",
146		.driver_data = (void *) 0x52,
147		.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
148		.exit_latency = 140,
149		.power_usage = 150,
150		.target_residency = 560,
151		.enter = &intel_idle },
152};
153
154/**
155 * intel_idle
156 * @dev: cpuidle_device
157 * @state: cpuidle state
158 *
159 */
160static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
161{
162	unsigned long ecx = 1; /* break on interrupt flag */
163	unsigned long eax = (unsigned long)cpuidle_get_statedata(state);
164	unsigned int cstate;
165	ktime_t kt_before, kt_after;
166	s64 usec_delta;
167	int cpu = smp_processor_id();
168
169	cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
170
171	local_irq_disable();
172
173	/*
174	 * If the state flag indicates that the TLB will be flushed or if this
175	 * is the deepest c-state supported, do a voluntary leave mm to avoid
176	 * costly and mostly unnecessary wakeups for flushing the user TLB's
177	 * associated with the active mm.
178	 */
179	if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED ||
180	    (&dev->states[dev->state_count - 1] == state))
181		leave_mm(cpu);
182
183	if (!(lapic_timer_reliable_states & (1 << (cstate))))
184		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
185
186	kt_before = ktime_get_real();
187
188	stop_critical_timings();
189#ifndef MODULE
190	trace_power_start(POWER_CSTATE, (eax >> 4) + 1, cpu);
191#endif
192	if (!need_resched()) {
193
194		__monitor((void *)&current_thread_info()->flags, 0, 0);
195		smp_mb();
196		if (!need_resched())
197			__mwait(eax, ecx);
198	}
199
200	start_critical_timings();
201
202	kt_after = ktime_get_real();
203	usec_delta = ktime_to_us(ktime_sub(kt_after, kt_before));
204
205	local_irq_enable();
206
207	if (!(lapic_timer_reliable_states & (1 << (cstate))))
208		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
209
210	return usec_delta;
211}
212
213/*
214 * intel_idle_probe()
215 */
216static int intel_idle_probe(void)
217{
218	unsigned int eax, ebx, ecx;
219
220	if (max_cstate == 0) {
221		pr_debug(PREFIX "disabled\n");
222		return -EPERM;
223	}
224
225	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
226		return -ENODEV;
227
228	if (!boot_cpu_has(X86_FEATURE_MWAIT))
229		return -ENODEV;
230
231	if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
232		return -ENODEV;
233
234	cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
235
236	if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
237		!(ecx & CPUID5_ECX_INTERRUPT_BREAK))
238			return -ENODEV;
239
240	pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates);
241
242	if (boot_cpu_has(X86_FEATURE_ARAT))	/* Always Reliable APIC Timer */
243		lapic_timer_reliable_states = 0xFFFFFFFF;
244
245	if (boot_cpu_data.x86 != 6)	/* family 6 */
246		return -ENODEV;
247
248	switch (boot_cpu_data.x86_model) {
249
250	case 0x1A:	/* Core i7, Xeon 5500 series */
251	case 0x1E:	/* Core i7 and i5 Processor - Lynnfield Jasper Forest */
252	case 0x1F:	/* Core i7 and i5 Processor - Nehalem */
253	case 0x2E:	/* Nehalem-EX Xeon */
254	case 0x2F:	/* Westmere-EX Xeon */
255		lapic_timer_reliable_states = (1 << 1);	 /* C1 */
256
257	case 0x25:	/* Westmere */
258	case 0x2C:	/* Westmere */
259		cpuidle_state_table = nehalem_cstates;
260		break;
261
262	case 0x1C:	/* 28 - Atom Processor */
263	case 0x26:	/* 38 - Lincroft Atom Processor */
264		lapic_timer_reliable_states = (1 << 1); /* C1 */
265		cpuidle_state_table = atom_cstates;
266		break;
267#ifdef FUTURE_USE
268	case 0x17:	/* 23 - Core 2 Duo */
269		lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */
270#endif
271
272	default:
273		pr_debug(PREFIX "does not run on family %d model %d\n",
274			boot_cpu_data.x86, boot_cpu_data.x86_model);
275		return -ENODEV;
276	}
277
278	pr_debug(PREFIX "v" INTEL_IDLE_VERSION
279		" model 0x%X\n", boot_cpu_data.x86_model);
280
281	pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
282		lapic_timer_reliable_states);
283	return 0;
284}
285
286/*
287 * intel_idle_cpuidle_devices_uninit()
288 * unregister, free cpuidle_devices
289 */
290static void intel_idle_cpuidle_devices_uninit(void)
291{
292	int i;
293	struct cpuidle_device *dev;
294
295	for_each_online_cpu(i) {
296		dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
297		cpuidle_unregister_device(dev);
298	}
299
300	free_percpu(intel_idle_cpuidle_devices);
301	return;
302}
303/*
304 * intel_idle_cpuidle_devices_init()
305 * allocate, initialize, register cpuidle_devices
306 */
307static int intel_idle_cpuidle_devices_init(void)
308{
309	int i, cstate;
310	struct cpuidle_device *dev;
311
312	intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
313	if (intel_idle_cpuidle_devices == NULL)
314		return -ENOMEM;
315
316	for_each_online_cpu(i) {
317		dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
318
319		dev->state_count = 1;
320
321		for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
322			int num_substates;
323
324			if (cstate > max_cstate) {
325				printk(PREFIX "max_cstate %d reached\n",
326					max_cstate);
327				break;
328			}
329
330			/* does the state exist in CPUID.MWAIT? */
331			num_substates = (mwait_substates >> ((cstate) * 4))
332						& MWAIT_SUBSTATE_MASK;
333			if (num_substates == 0)
334				continue;
335			/* is the state not enabled? */
336			if (cpuidle_state_table[cstate].enter == NULL) {
337				/* does the driver not know about the state? */
338				if (*cpuidle_state_table[cstate].name == '\0')
339					pr_debug(PREFIX "unaware of model 0x%x"
340						" MWAIT %d please"
341						" contact lenb@kernel.org",
342					boot_cpu_data.x86_model, cstate);
343				continue;
344			}
345
346			if ((cstate > 2) &&
347				!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
348				mark_tsc_unstable("TSC halts in idle"
349					" states deeper than C2");
350
351			dev->states[dev->state_count] =	/* structure copy */
352				cpuidle_state_table[cstate];
353
354			dev->state_count += 1;
355		}
356
357		dev->cpu = i;
358		if (cpuidle_register_device(dev)) {
359			pr_debug(PREFIX "cpuidle_register_device %d failed!\n",
360				 i);
361			intel_idle_cpuidle_devices_uninit();
362			return -EIO;
363		}
364	}
365
366	return 0;
367}
368
369
370static int __init intel_idle_init(void)
371{
372	int retval;
373
374	retval = intel_idle_probe();
375	if (retval)
376		return retval;
377
378	retval = cpuidle_register_driver(&intel_idle_driver);
379	if (retval) {
380		printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
381			cpuidle_get_driver()->name);
382		return retval;
383	}
384
385	retval = intel_idle_cpuidle_devices_init();
386	if (retval) {
387		cpuidle_unregister_driver(&intel_idle_driver);
388		return retval;
389	}
390
391	return 0;
392}
393
394static void __exit intel_idle_exit(void)
395{
396	intel_idle_cpuidle_devices_uninit();
397	cpuidle_unregister_driver(&intel_idle_driver);
398
399	return;
400}
401
402module_init(intel_idle_init);
403module_exit(intel_idle_exit);
404
405module_param(max_cstate, int, 0444);
406
407MODULE_AUTHOR("Len Brown <len.brown@intel.com>");
408MODULE_DESCRIPTION("Cpuidle driver for Intel Hardware v" INTEL_IDLE_VERSION);
409MODULE_LICENSE("GPL");
410