• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/arch/arm/mach-omap2/
1/*
2 * OMAP3 Power Management Routines
3 *
4 * Copyright (C) 2006-2008 Nokia Corporation
5 * Tony Lindgren <tony@atomide.com>
6 * Jouni Hogander
7 *
8 * Copyright (C) 2007 Texas Instruments, Inc.
9 * Rajendra Nayak <rnayak@ti.com>
10 *
11 * Copyright (C) 2005 Texas Instruments, Inc.
12 * Richard Woodruff <r-woodruff2@ti.com>
13 *
14 * Based on pm.c for omap1
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License version 2 as
18 * published by the Free Software Foundation.
19 */
20
21#include <linux/pm.h>
22#include <linux/suspend.h>
23#include <linux/interrupt.h>
24#include <linux/module.h>
25#include <linux/list.h>
26#include <linux/err.h>
27#include <linux/gpio.h>
28#include <linux/clk.h>
29#include <linux/delay.h>
30#include <linux/slab.h>
31
32#include <plat/sram.h>
33#include <plat/clockdomain.h>
34#include <plat/powerdomain.h>
35#include <plat/control.h>
36#include <plat/serial.h>
37#include <plat/sdrc.h>
38#include <plat/prcm.h>
39#include <plat/gpmc.h>
40#include <plat/dma.h>
41#include <plat/dmtimer.h>
42
43#include <asm/tlbflush.h>
44
45#include "cm.h"
46#include "cm-regbits-34xx.h"
47#include "prm-regbits-34xx.h"
48
49#include "prm.h"
50#include "pm.h"
51#include "sdrc.h"
52
53/* Scratchpad offsets */
54#define OMAP343X_TABLE_ADDRESS_OFFSET	   0x31
55#define OMAP343X_TABLE_VALUE_OFFSET	   0x30
56#define OMAP343X_CONTROL_REG_VALUE_OFFSET  0x32
57
58u32 enable_off_mode;
59u32 sleep_while_idle;
60u32 wakeup_timer_seconds;
61u32 wakeup_timer_milliseconds;
62
63struct power_state {
64	struct powerdomain *pwrdm;
65	u32 next_state;
66#ifdef CONFIG_SUSPEND
67	u32 saved_state;
68#endif
69	struct list_head node;
70};
71
72static LIST_HEAD(pwrst_list);
73
74static void (*_omap_sram_idle)(u32 *addr, int save_state);
75
76static int (*_omap_save_secure_sram)(u32 *addr);
77
78static struct powerdomain *mpu_pwrdm, *neon_pwrdm;
79static struct powerdomain *core_pwrdm, *per_pwrdm;
80static struct powerdomain *cam_pwrdm;
81
82static inline void omap3_per_save_context(void)
83{
84	omap_gpio_save_context();
85}
86
87static inline void omap3_per_restore_context(void)
88{
89	omap_gpio_restore_context();
90}
91
92static void omap3_enable_io_chain(void)
93{
94	int timeout = 0;
95
96	if (omap_rev() >= OMAP3430_REV_ES3_1) {
97		prm_set_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
98				     PM_WKEN);
99		/* Do a readback to assure write has been done */
100		prm_read_mod_reg(WKUP_MOD, PM_WKEN);
101
102		while (!(prm_read_mod_reg(WKUP_MOD, PM_WKEN) &
103			 OMAP3430_ST_IO_CHAIN_MASK)) {
104			timeout++;
105			if (timeout > 1000) {
106				printk(KERN_ERR "Wake up daisy chain "
107				       "activation failed.\n");
108				return;
109			}
110			prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN_MASK,
111					     WKUP_MOD, PM_WKEN);
112		}
113	}
114}
115
116static void omap3_disable_io_chain(void)
117{
118	if (omap_rev() >= OMAP3430_REV_ES3_1)
119		prm_clear_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
120				       PM_WKEN);
121}
122
123static void omap3_core_save_context(void)
124{
125	u32 control_padconf_off;
126
127	/* Save the padconf registers */
128	control_padconf_off = omap_ctrl_readl(OMAP343X_CONTROL_PADCONF_OFF);
129	control_padconf_off |= START_PADCONF_SAVE;
130	omap_ctrl_writel(control_padconf_off, OMAP343X_CONTROL_PADCONF_OFF);
131	/* wait for the save to complete */
132	while (!(omap_ctrl_readl(OMAP343X_CONTROL_GENERAL_PURPOSE_STATUS)
133			& PADCONF_SAVE_DONE))
134		udelay(1);
135
136	/*
137	 * Force write last pad into memory, as this can fail in some
138	 * cases according to erratas 1.157, 1.185
139	 */
140	omap_ctrl_writel(omap_ctrl_readl(OMAP343X_PADCONF_ETK_D14),
141		OMAP343X_CONTROL_MEM_WKUP + 0x2a0);
142
143	/* Save the Interrupt controller context */
144	omap_intc_save_context();
145	/* Save the GPMC context */
146	omap3_gpmc_save_context();
147	/* Save the system control module context, padconf already save above*/
148	omap3_control_save_context();
149	omap_dma_global_context_save();
150}
151
152static void omap3_core_restore_context(void)
153{
154	/* Restore the control module context, padconf restored by h/w */
155	omap3_control_restore_context();
156	/* Restore the GPMC context */
157	omap3_gpmc_restore_context();
158	/* Restore the interrupt controller context */
159	omap_intc_restore_context();
160	omap_dma_global_context_restore();
161}
162
163static void omap3_save_secure_ram_context(u32 target_mpu_state)
164{
165	u32 ret;
166
167	if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
168		/*
169		 * MPU next state must be set to POWER_ON temporarily,
170		 * otherwise the WFI executed inside the ROM code
171		 * will hang the system.
172		 */
173		pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
174		ret = _omap_save_secure_sram((u32 *)
175				__pa(omap3_secure_ram_storage));
176		pwrdm_set_next_pwrst(mpu_pwrdm, target_mpu_state);
177		/* Following is for error tracking, it should not happen */
178		if (ret) {
179			printk(KERN_ERR "save_secure_sram() returns %08x\n",
180				ret);
181			while (1)
182				;
183		}
184	}
185}
186
187/*
188 * PRCM Interrupt Handler Helper Function
189 *
190 * The purpose of this function is to clear any wake-up events latched
191 * in the PRCM PM_WKST_x registers. It is possible that a wake-up event
192 * may occur whilst attempting to clear a PM_WKST_x register and thus
193 * set another bit in this register. A while loop is used to ensure
194 * that any peripheral wake-up events occurring while attempting to
195 * clear the PM_WKST_x are detected and cleared.
196 */
197static int prcm_clear_mod_irqs(s16 module, u8 regs)
198{
199	u32 wkst, fclk, iclk, clken;
200	u16 wkst_off = (regs == 3) ? OMAP3430ES2_PM_WKST3 : PM_WKST1;
201	u16 fclk_off = (regs == 3) ? OMAP3430ES2_CM_FCLKEN3 : CM_FCLKEN1;
202	u16 iclk_off = (regs == 3) ? CM_ICLKEN3 : CM_ICLKEN1;
203	u16 grpsel_off = (regs == 3) ?
204		OMAP3430ES2_PM_MPUGRPSEL3 : OMAP3430_PM_MPUGRPSEL;
205	int c = 0;
206
207	wkst = prm_read_mod_reg(module, wkst_off);
208	wkst &= prm_read_mod_reg(module, grpsel_off);
209	if (wkst) {
210		iclk = cm_read_mod_reg(module, iclk_off);
211		fclk = cm_read_mod_reg(module, fclk_off);
212		while (wkst) {
213			clken = wkst;
214			cm_set_mod_reg_bits(clken, module, iclk_off);
215			/*
216			 * For USBHOST, we don't know whether HOST1 or
217			 * HOST2 woke us up, so enable both f-clocks
218			 */
219			if (module == OMAP3430ES2_USBHOST_MOD)
220				clken |= 1 << OMAP3430ES2_EN_USBHOST2_SHIFT;
221			cm_set_mod_reg_bits(clken, module, fclk_off);
222			prm_write_mod_reg(wkst, module, wkst_off);
223			wkst = prm_read_mod_reg(module, wkst_off);
224			c++;
225		}
226		cm_write_mod_reg(iclk, module, iclk_off);
227		cm_write_mod_reg(fclk, module, fclk_off);
228	}
229
230	return c;
231}
232
233static int _prcm_int_handle_wakeup(void)
234{
235	int c;
236
237	c = prcm_clear_mod_irqs(WKUP_MOD, 1);
238	c += prcm_clear_mod_irqs(CORE_MOD, 1);
239	c += prcm_clear_mod_irqs(OMAP3430_PER_MOD, 1);
240	if (omap_rev() > OMAP3430_REV_ES1_0) {
241		c += prcm_clear_mod_irqs(CORE_MOD, 3);
242		c += prcm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1);
243	}
244
245	return c;
246}
247
248/*
249 * PRCM Interrupt Handler
250 *
251 * The PRM_IRQSTATUS_MPU register indicates if there are any pending
252 * interrupts from the PRCM for the MPU. These bits must be cleared in
253 * order to clear the PRCM interrupt. The PRCM interrupt handler is
254 * implemented to simply clear the PRM_IRQSTATUS_MPU in order to clear
255 * the PRCM interrupt. Please note that bit 0 of the PRM_IRQSTATUS_MPU
256 * register indicates that a wake-up event is pending for the MPU and
257 * this bit can only be cleared if the all the wake-up events latched
258 * in the various PM_WKST_x registers have been cleared. The interrupt
259 * handler is implemented using a do-while loop so that if a wake-up
260 * event occurred during the processing of the prcm interrupt handler
261 * (setting a bit in the corresponding PM_WKST_x register and thus
262 * preventing us from clearing bit 0 of the PRM_IRQSTATUS_MPU register)
263 * this would be handled.
264 */
265static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id)
266{
267	u32 irqenable_mpu, irqstatus_mpu;
268	int c = 0;
269
270	irqenable_mpu = prm_read_mod_reg(OCP_MOD,
271					 OMAP3_PRM_IRQENABLE_MPU_OFFSET);
272	irqstatus_mpu = prm_read_mod_reg(OCP_MOD,
273					 OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
274	irqstatus_mpu &= irqenable_mpu;
275
276	do {
277		if (irqstatus_mpu & (OMAP3430_WKUP_ST_MASK |
278				     OMAP3430_IO_ST_MASK)) {
279			c = _prcm_int_handle_wakeup();
280
281			/*
282			 * Is the MPU PRCM interrupt handler racing with the
283			 * IVA2 PRCM interrupt handler ?
284			 */
285			WARN(c == 0, "prcm: WARNING: PRCM indicated MPU wakeup "
286			     "but no wakeup sources are marked\n");
287		} else {
288			WARN(1, "prcm: WARNING: PRCM interrupt received, but "
289			     "no code to handle it (%08x)\n", irqstatus_mpu);
290		}
291
292		prm_write_mod_reg(irqstatus_mpu, OCP_MOD,
293					OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
294
295		irqstatus_mpu = prm_read_mod_reg(OCP_MOD,
296					OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
297		irqstatus_mpu &= irqenable_mpu;
298
299	} while (irqstatus_mpu);
300
301	return IRQ_HANDLED;
302}
303
304static void restore_control_register(u32 val)
305{
306	__asm__ __volatile__ ("mcr p15, 0, %0, c1, c0, 0" : : "r" (val));
307}
308
309/* Function to restore the table entry that was modified for enabling MMU */
310static void restore_table_entry(void)
311{
312	u32 *scratchpad_address;
313	u32 previous_value, control_reg_value;
314	u32 *address;
315
316	scratchpad_address = OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD);
317
318	/* Get address of entry that was modified */
319	address = (u32 *)__raw_readl(scratchpad_address +
320				     OMAP343X_TABLE_ADDRESS_OFFSET);
321	/* Get the previous value which needs to be restored */
322	previous_value = __raw_readl(scratchpad_address +
323				     OMAP343X_TABLE_VALUE_OFFSET);
324	address = __va(address);
325	*address = previous_value;
326	flush_tlb_all();
327	control_reg_value = __raw_readl(scratchpad_address
328					+ OMAP343X_CONTROL_REG_VALUE_OFFSET);
329	/* This will enable caches and prediction */
330	restore_control_register(control_reg_value);
331}
332
333void omap_sram_idle(void)
334{
335	/* Variable to tell what needs to be saved and restored
336	 * in omap_sram_idle*/
337	/* save_state = 0 => Nothing to save and restored */
338	/* save_state = 1 => Only L1 and logic lost */
339	/* save_state = 2 => Only L2 lost */
340	/* save_state = 3 => L1, L2 and logic lost */
341	int save_state = 0;
342	int mpu_next_state = PWRDM_POWER_ON;
343	int per_next_state = PWRDM_POWER_ON;
344	int core_next_state = PWRDM_POWER_ON;
345	int core_prev_state, per_prev_state;
346	u32 sdrc_pwr = 0;
347	int per_state_modified = 0;
348
349	if (!_omap_sram_idle)
350		return;
351
352	pwrdm_clear_all_prev_pwrst(mpu_pwrdm);
353	pwrdm_clear_all_prev_pwrst(neon_pwrdm);
354	pwrdm_clear_all_prev_pwrst(core_pwrdm);
355	pwrdm_clear_all_prev_pwrst(per_pwrdm);
356
357	mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
358	switch (mpu_next_state) {
359	case PWRDM_POWER_ON:
360	case PWRDM_POWER_RET:
361		/* No need to save context */
362		save_state = 0;
363		break;
364	case PWRDM_POWER_OFF:
365		save_state = 3;
366		break;
367	default:
368		/* Invalid state */
369		printk(KERN_ERR "Invalid mpu state in sram_idle\n");
370		return;
371	}
372	pwrdm_pre_transition();
373
374	/* NEON control */
375	if (pwrdm_read_pwrst(neon_pwrdm) == PWRDM_POWER_ON)
376		pwrdm_set_next_pwrst(neon_pwrdm, mpu_next_state);
377
378	/* Enable IO-PAD and IO-CHAIN wakeups */
379	per_next_state = pwrdm_read_next_pwrst(per_pwrdm);
380	core_next_state = pwrdm_read_next_pwrst(core_pwrdm);
381	if (omap3_has_io_wakeup() && \
382			(per_next_state < PWRDM_POWER_ON ||
383			core_next_state < PWRDM_POWER_ON)) {
384		prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
385		omap3_enable_io_chain();
386	}
387
388	/* PER */
389	if (per_next_state < PWRDM_POWER_ON) {
390		omap_uart_prepare_idle(2);
391		omap2_gpio_prepare_for_idle(per_next_state);
392		if (per_next_state == PWRDM_POWER_OFF) {
393			if (core_next_state == PWRDM_POWER_ON) {
394				per_next_state = PWRDM_POWER_RET;
395				pwrdm_set_next_pwrst(per_pwrdm, per_next_state);
396				per_state_modified = 1;
397			} else
398				omap3_per_save_context();
399		}
400	}
401
402	if (pwrdm_read_pwrst(cam_pwrdm) == PWRDM_POWER_ON)
403		omap2_clkdm_deny_idle(mpu_pwrdm->pwrdm_clkdms[0]);
404
405	/* CORE */
406	if (core_next_state < PWRDM_POWER_ON) {
407		omap_uart_prepare_idle(0);
408		omap_uart_prepare_idle(1);
409		if (core_next_state == PWRDM_POWER_OFF) {
410			omap3_core_save_context();
411			omap3_prcm_save_context();
412		}
413	}
414
415	omap3_intc_prepare_idle();
416
417	/*
418	* On EMU/HS devices ROM code restores a SRDC value
419	* from scratchpad which has automatic self refresh on timeout
420	* of AUTO_CNT = 1 enabled. This takes care of errata 1.142.
421	* Hence store/restore the SDRC_POWER register here.
422	*/
423	if (omap_rev() >= OMAP3430_REV_ES3_0 &&
424	    omap_type() != OMAP2_DEVICE_TYPE_GP &&
425	    core_next_state == PWRDM_POWER_OFF)
426		sdrc_pwr = sdrc_read_reg(SDRC_POWER);
427
428	/*
429	 * omap3_arm_context is the location where ARM registers
430	 * get saved. The restore path then reads from this
431	 * location and restores them back.
432	 */
433	_omap_sram_idle(omap3_arm_context, save_state);
434	cpu_init();
435
436	/* Restore normal SDRC POWER settings */
437	if (omap_rev() >= OMAP3430_REV_ES3_0 &&
438	    omap_type() != OMAP2_DEVICE_TYPE_GP &&
439	    core_next_state == PWRDM_POWER_OFF)
440		sdrc_write_reg(sdrc_pwr, SDRC_POWER);
441
442	/* Restore table entry modified during MMU restoration */
443	if (pwrdm_read_prev_pwrst(mpu_pwrdm) == PWRDM_POWER_OFF)
444		restore_table_entry();
445
446	/* CORE */
447	if (core_next_state < PWRDM_POWER_ON) {
448		core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm);
449		if (core_prev_state == PWRDM_POWER_OFF) {
450			omap3_core_restore_context();
451			omap3_prcm_restore_context();
452			omap3_sram_restore_context();
453			omap2_sms_restore_context();
454		}
455		omap_uart_resume_idle(0);
456		omap_uart_resume_idle(1);
457		if (core_next_state == PWRDM_POWER_OFF)
458			prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
459					       OMAP3430_GR_MOD,
460					       OMAP3_PRM_VOLTCTRL_OFFSET);
461	}
462	omap3_intc_resume_idle();
463
464	/* PER */
465	if (per_next_state < PWRDM_POWER_ON) {
466		per_prev_state = pwrdm_read_prev_pwrst(per_pwrdm);
467		omap2_gpio_resume_after_idle();
468		if (per_prev_state == PWRDM_POWER_OFF)
469			omap3_per_restore_context();
470		omap_uart_resume_idle(2);
471		if (per_state_modified)
472			pwrdm_set_next_pwrst(per_pwrdm, PWRDM_POWER_OFF);
473	}
474
475	/* Disable IO-PAD and IO-CHAIN wakeup */
476	if (omap3_has_io_wakeup() &&
477	    (per_next_state < PWRDM_POWER_ON ||
478	     core_next_state < PWRDM_POWER_ON)) {
479		prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
480		omap3_disable_io_chain();
481	}
482
483	pwrdm_post_transition();
484
485	omap2_clkdm_allow_idle(mpu_pwrdm->pwrdm_clkdms[0]);
486}
487
488int omap3_can_sleep(void)
489{
490	if (!sleep_while_idle)
491		return 0;
492	if (!omap_uart_can_sleep())
493		return 0;
494	return 1;
495}
496
497/* This sets pwrdm state (other than mpu & core. Currently only ON &
498 * RET are supported. Function is assuming that clkdm doesn't have
499 * hw_sup mode enabled. */
500int set_pwrdm_state(struct powerdomain *pwrdm, u32 state)
501{
502	u32 cur_state;
503	int sleep_switch = 0;
504	int ret = 0;
505
506	if (pwrdm == NULL || IS_ERR(pwrdm))
507		return -EINVAL;
508
509	while (!(pwrdm->pwrsts & (1 << state))) {
510		if (state == PWRDM_POWER_OFF)
511			return ret;
512		state--;
513	}
514
515	cur_state = pwrdm_read_next_pwrst(pwrdm);
516	if (cur_state == state)
517		return ret;
518
519	if (pwrdm_read_pwrst(pwrdm) < PWRDM_POWER_ON) {
520		omap2_clkdm_wakeup(pwrdm->pwrdm_clkdms[0]);
521		sleep_switch = 1;
522		pwrdm_wait_transition(pwrdm);
523	}
524
525	ret = pwrdm_set_next_pwrst(pwrdm, state);
526	if (ret) {
527		printk(KERN_ERR "Unable to set state of powerdomain: %s\n",
528		       pwrdm->name);
529		goto err;
530	}
531
532	if (sleep_switch) {
533		omap2_clkdm_allow_idle(pwrdm->pwrdm_clkdms[0]);
534		pwrdm_wait_transition(pwrdm);
535		pwrdm_state_switch(pwrdm);
536	}
537
538err:
539	return ret;
540}
541
542static void omap3_pm_idle(void)
543{
544	local_irq_disable();
545	local_fiq_disable();
546
547	if (!omap3_can_sleep())
548		goto out;
549
550	if (omap_irq_pending() || need_resched())
551		goto out;
552
553	omap_sram_idle();
554
555out:
556	local_fiq_enable();
557	local_irq_enable();
558}
559
560#ifdef CONFIG_SUSPEND
561static suspend_state_t suspend_state;
562
563static void omap2_pm_wakeup_on_timer(u32 seconds, u32 milliseconds)
564{
565	u32 tick_rate, cycles;
566
567	if (!seconds && !milliseconds)
568		return;
569
570	tick_rate = clk_get_rate(omap_dm_timer_get_fclk(gptimer_wakeup));
571	cycles = tick_rate * seconds + tick_rate * milliseconds / 1000;
572	omap_dm_timer_stop(gptimer_wakeup);
573	omap_dm_timer_set_load_start(gptimer_wakeup, 0, 0xffffffff - cycles);
574
575	pr_info("PM: Resume timer in %u.%03u secs"
576		" (%d ticks at %d ticks/sec.)\n",
577		seconds, milliseconds, cycles, tick_rate);
578}
579
580static int omap3_pm_prepare(void)
581{
582	disable_hlt();
583	return 0;
584}
585
586static int omap3_pm_suspend(void)
587{
588	struct power_state *pwrst;
589	int state, ret = 0;
590
591	if (wakeup_timer_seconds || wakeup_timer_milliseconds)
592		omap2_pm_wakeup_on_timer(wakeup_timer_seconds,
593					 wakeup_timer_milliseconds);
594
595	/* Read current next_pwrsts */
596	list_for_each_entry(pwrst, &pwrst_list, node)
597		pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm);
598	/* Set ones wanted by suspend */
599	list_for_each_entry(pwrst, &pwrst_list, node) {
600		if (set_pwrdm_state(pwrst->pwrdm, pwrst->next_state))
601			goto restore;
602		if (pwrdm_clear_all_prev_pwrst(pwrst->pwrdm))
603			goto restore;
604	}
605
606	omap_uart_prepare_suspend();
607	omap3_intc_suspend();
608
609	omap_sram_idle();
610
611restore:
612	/* Restore next_pwrsts */
613	list_for_each_entry(pwrst, &pwrst_list, node) {
614		state = pwrdm_read_prev_pwrst(pwrst->pwrdm);
615		if (state > pwrst->next_state) {
616			printk(KERN_INFO "Powerdomain (%s) didn't enter "
617			       "target state %d\n",
618			       pwrst->pwrdm->name, pwrst->next_state);
619			ret = -1;
620		}
621		set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
622	}
623	if (ret)
624		printk(KERN_ERR "Could not enter target state in pm_suspend\n");
625	else
626		printk(KERN_INFO "Successfully put all powerdomains "
627		       "to target state\n");
628
629	return ret;
630}
631
632static int omap3_pm_enter(suspend_state_t unused)
633{
634	int ret = 0;
635
636	switch (suspend_state) {
637	case PM_SUSPEND_STANDBY:
638	case PM_SUSPEND_MEM:
639		ret = omap3_pm_suspend();
640		break;
641	default:
642		ret = -EINVAL;
643	}
644
645	return ret;
646}
647
648static void omap3_pm_finish(void)
649{
650	enable_hlt();
651}
652
653/* Hooks to enable / disable UART interrupts during suspend */
654static int omap3_pm_begin(suspend_state_t state)
655{
656	suspend_state = state;
657	omap_uart_enable_irqs(0);
658	return 0;
659}
660
661static void omap3_pm_end(void)
662{
663	suspend_state = PM_SUSPEND_ON;
664	omap_uart_enable_irqs(1);
665	return;
666}
667
668static struct platform_suspend_ops omap_pm_ops = {
669	.begin		= omap3_pm_begin,
670	.end		= omap3_pm_end,
671	.prepare	= omap3_pm_prepare,
672	.enter		= omap3_pm_enter,
673	.finish		= omap3_pm_finish,
674	.valid		= suspend_valid_only_mem,
675};
676#endif /* CONFIG_SUSPEND */
677
678
679/**
680 * omap3_iva_idle(): ensure IVA is in idle so it can be put into
681 *                   retention
682 *
683 * In cases where IVA2 is activated by bootcode, it may prevent
684 * full-chip retention or off-mode because it is not idle.  This
685 * function forces the IVA2 into idle state so it can go
686 * into retention/off and thus allow full-chip retention/off.
687 *
688 **/
689static void __init omap3_iva_idle(void)
690{
691	/* ensure IVA2 clock is disabled */
692	cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
693
694	/* if no clock activity, nothing else to do */
695	if (!(cm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSTST) &
696	      OMAP3430_CLKACTIVITY_IVA2_MASK))
697		return;
698
699	/* Reset IVA2 */
700	prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
701			  OMAP3430_RST2_IVA2_MASK |
702			  OMAP3430_RST3_IVA2_MASK,
703			  OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
704
705	/* Enable IVA2 clock */
706	cm_write_mod_reg(OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_MASK,
707			 OMAP3430_IVA2_MOD, CM_FCLKEN);
708
709	/* Set IVA2 boot mode to 'idle' */
710	omap_ctrl_writel(OMAP3_IVA2_BOOTMOD_IDLE,
711			 OMAP343X_CONTROL_IVA2_BOOTMOD);
712
713	/* Un-reset IVA2 */
714	prm_write_mod_reg(0, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
715
716	/* Disable IVA2 clock */
717	cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
718
719	/* Reset IVA2 */
720	prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
721			  OMAP3430_RST2_IVA2_MASK |
722			  OMAP3430_RST3_IVA2_MASK,
723			  OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
724}
725
726static void __init omap3_d2d_idle(void)
727{
728	u16 mask, padconf;
729
730	/* In a stand alone OMAP3430 where there is not a stacked
731	 * modem for the D2D Idle Ack and D2D MStandby must be pulled
732	 * high. S CONTROL_PADCONF_SAD2D_IDLEACK and
733	 * CONTROL_PADCONF_SAD2D_MSTDBY to have a pull up. */
734	mask = (1 << 4) | (1 << 3); /* pull-up, enabled */
735	padconf = omap_ctrl_readw(OMAP3_PADCONF_SAD2D_MSTANDBY);
736	padconf |= mask;
737	omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_MSTANDBY);
738
739	padconf = omap_ctrl_readw(OMAP3_PADCONF_SAD2D_IDLEACK);
740	padconf |= mask;
741	omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_IDLEACK);
742
743	/* reset modem */
744	prm_write_mod_reg(OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RSTPWRON_MASK |
745			  OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RST_MASK,
746			  CORE_MOD, OMAP2_RM_RSTCTRL);
747	prm_write_mod_reg(0, CORE_MOD, OMAP2_RM_RSTCTRL);
748}
749
750static void __init prcm_setup_regs(void)
751{
752	prm_write_mod_reg(0, OMAP3430_IVA2_MOD, PM_WKDEP);
753	prm_write_mod_reg(0, MPU_MOD, PM_WKDEP);
754	prm_write_mod_reg(0, OMAP3430_DSS_MOD, PM_WKDEP);
755	prm_write_mod_reg(0, OMAP3430_NEON_MOD, PM_WKDEP);
756	prm_write_mod_reg(0, OMAP3430_CAM_MOD, PM_WKDEP);
757	prm_write_mod_reg(0, OMAP3430_PER_MOD, PM_WKDEP);
758	if (omap_rev() > OMAP3430_REV_ES1_0) {
759		prm_write_mod_reg(0, OMAP3430ES2_SGX_MOD, PM_WKDEP);
760		prm_write_mod_reg(0, OMAP3430ES2_USBHOST_MOD, PM_WKDEP);
761	} else
762		prm_write_mod_reg(0, GFX_MOD, PM_WKDEP);
763
764	/*
765	 * Enable interface clock autoidle for all modules.
766	 * Note that in the long run this should be done by clockfw
767	 */
768	cm_write_mod_reg(
769		OMAP3430_AUTO_MODEM_MASK |
770		OMAP3430ES2_AUTO_MMC3_MASK |
771		OMAP3430ES2_AUTO_ICR_MASK |
772		OMAP3430_AUTO_AES2_MASK |
773		OMAP3430_AUTO_SHA12_MASK |
774		OMAP3430_AUTO_DES2_MASK |
775		OMAP3430_AUTO_MMC2_MASK |
776		OMAP3430_AUTO_MMC1_MASK |
777		OMAP3430_AUTO_MSPRO_MASK |
778		OMAP3430_AUTO_HDQ_MASK |
779		OMAP3430_AUTO_MCSPI4_MASK |
780		OMAP3430_AUTO_MCSPI3_MASK |
781		OMAP3430_AUTO_MCSPI2_MASK |
782		OMAP3430_AUTO_MCSPI1_MASK |
783		OMAP3430_AUTO_I2C3_MASK |
784		OMAP3430_AUTO_I2C2_MASK |
785		OMAP3430_AUTO_I2C1_MASK |
786		OMAP3430_AUTO_UART2_MASK |
787		OMAP3430_AUTO_UART1_MASK |
788		OMAP3430_AUTO_GPT11_MASK |
789		OMAP3430_AUTO_GPT10_MASK |
790		OMAP3430_AUTO_MCBSP5_MASK |
791		OMAP3430_AUTO_MCBSP1_MASK |
792		OMAP3430ES1_AUTO_FAC_MASK | /* This is es1 only */
793		OMAP3430_AUTO_MAILBOXES_MASK |
794		OMAP3430_AUTO_OMAPCTRL_MASK |
795		OMAP3430ES1_AUTO_FSHOSTUSB_MASK |
796		OMAP3430_AUTO_HSOTGUSB_MASK |
797		OMAP3430_AUTO_SAD2D_MASK |
798		OMAP3430_AUTO_SSI_MASK,
799		CORE_MOD, CM_AUTOIDLE1);
800
801	cm_write_mod_reg(
802		OMAP3430_AUTO_PKA_MASK |
803		OMAP3430_AUTO_AES1_MASK |
804		OMAP3430_AUTO_RNG_MASK |
805		OMAP3430_AUTO_SHA11_MASK |
806		OMAP3430_AUTO_DES1_MASK,
807		CORE_MOD, CM_AUTOIDLE2);
808
809	if (omap_rev() > OMAP3430_REV_ES1_0) {
810		cm_write_mod_reg(
811			OMAP3430_AUTO_MAD2D_MASK |
812			OMAP3430ES2_AUTO_USBTLL_MASK,
813			CORE_MOD, CM_AUTOIDLE3);
814	}
815
816	cm_write_mod_reg(
817		OMAP3430_AUTO_WDT2_MASK |
818		OMAP3430_AUTO_WDT1_MASK |
819		OMAP3430_AUTO_GPIO1_MASK |
820		OMAP3430_AUTO_32KSYNC_MASK |
821		OMAP3430_AUTO_GPT12_MASK |
822		OMAP3430_AUTO_GPT1_MASK,
823		WKUP_MOD, CM_AUTOIDLE);
824
825	cm_write_mod_reg(
826		OMAP3430_AUTO_DSS_MASK,
827		OMAP3430_DSS_MOD,
828		CM_AUTOIDLE);
829
830	cm_write_mod_reg(
831		OMAP3430_AUTO_CAM_MASK,
832		OMAP3430_CAM_MOD,
833		CM_AUTOIDLE);
834
835	cm_write_mod_reg(
836		OMAP3430_AUTO_GPIO6_MASK |
837		OMAP3430_AUTO_GPIO5_MASK |
838		OMAP3430_AUTO_GPIO4_MASK |
839		OMAP3430_AUTO_GPIO3_MASK |
840		OMAP3430_AUTO_GPIO2_MASK |
841		OMAP3430_AUTO_WDT3_MASK |
842		OMAP3430_AUTO_UART3_MASK |
843		OMAP3430_AUTO_GPT9_MASK |
844		OMAP3430_AUTO_GPT8_MASK |
845		OMAP3430_AUTO_GPT7_MASK |
846		OMAP3430_AUTO_GPT6_MASK |
847		OMAP3430_AUTO_GPT5_MASK |
848		OMAP3430_AUTO_GPT4_MASK |
849		OMAP3430_AUTO_GPT3_MASK |
850		OMAP3430_AUTO_GPT2_MASK |
851		OMAP3430_AUTO_MCBSP4_MASK |
852		OMAP3430_AUTO_MCBSP3_MASK |
853		OMAP3430_AUTO_MCBSP2_MASK,
854		OMAP3430_PER_MOD,
855		CM_AUTOIDLE);
856
857	if (omap_rev() > OMAP3430_REV_ES1_0) {
858		cm_write_mod_reg(
859			OMAP3430ES2_AUTO_USBHOST_MASK,
860			OMAP3430ES2_USBHOST_MOD,
861			CM_AUTOIDLE);
862	}
863
864	omap_ctrl_writel(OMAP3430_AUTOIDLE_MASK, OMAP2_CONTROL_SYSCONFIG);
865
866	/*
867	 * Set all plls to autoidle. This is needed until autoidle is
868	 * enabled by clockfw
869	 */
870	cm_write_mod_reg(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
871			 OMAP3430_IVA2_MOD, CM_AUTOIDLE2);
872	cm_write_mod_reg(1 << OMAP3430_AUTO_MPU_DPLL_SHIFT,
873			 MPU_MOD,
874			 CM_AUTOIDLE2);
875	cm_write_mod_reg((1 << OMAP3430_AUTO_PERIPH_DPLL_SHIFT) |
876			 (1 << OMAP3430_AUTO_CORE_DPLL_SHIFT),
877			 PLL_MOD,
878			 CM_AUTOIDLE);
879	cm_write_mod_reg(1 << OMAP3430ES2_AUTO_PERIPH2_DPLL_SHIFT,
880			 PLL_MOD,
881			 CM_AUTOIDLE2);
882
883	/*
884	 * Enable control of expternal oscillator through
885	 * sys_clkreq. In the long run clock framework should
886	 * take care of this.
887	 */
888	prm_rmw_mod_reg_bits(OMAP_AUTOEXTCLKMODE_MASK,
889			     1 << OMAP_AUTOEXTCLKMODE_SHIFT,
890			     OMAP3430_GR_MOD,
891			     OMAP3_PRM_CLKSRC_CTRL_OFFSET);
892
893	/* setup wakup source */
894	prm_write_mod_reg(OMAP3430_EN_IO_MASK | OMAP3430_EN_GPIO1_MASK |
895			  OMAP3430_EN_GPT1_MASK | OMAP3430_EN_GPT12_MASK,
896			  WKUP_MOD, PM_WKEN);
897	/* No need to write EN_IO, that is always enabled */
898	prm_write_mod_reg(OMAP3430_GRPSEL_GPIO1_MASK |
899			  OMAP3430_GRPSEL_GPT1_MASK |
900			  OMAP3430_GRPSEL_GPT12_MASK,
901			  WKUP_MOD, OMAP3430_PM_MPUGRPSEL);
902	/* For some reason IO doesn't generate wakeup event even if
903	 * it is selected to mpu wakeup goup */
904	prm_write_mod_reg(OMAP3430_IO_EN_MASK | OMAP3430_WKUP_EN_MASK,
905			  OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET);
906
907	/* Enable PM_WKEN to support DSS LPR */
908	prm_write_mod_reg(OMAP3430_PM_WKEN_DSS_EN_DSS_MASK,
909				OMAP3430_DSS_MOD, PM_WKEN);
910
911	/* Enable wakeups in PER */
912	prm_write_mod_reg(OMAP3430_EN_GPIO2_MASK | OMAP3430_EN_GPIO3_MASK |
913			  OMAP3430_EN_GPIO4_MASK | OMAP3430_EN_GPIO5_MASK |
914			  OMAP3430_EN_GPIO6_MASK | OMAP3430_EN_UART3_MASK |
915			  OMAP3430_EN_MCBSP2_MASK | OMAP3430_EN_MCBSP3_MASK |
916			  OMAP3430_EN_MCBSP4_MASK,
917			  OMAP3430_PER_MOD, PM_WKEN);
918	/* and allow them to wake up MPU */
919	prm_write_mod_reg(OMAP3430_GRPSEL_GPIO2_MASK |
920			  OMAP3430_GRPSEL_GPIO3_MASK |
921			  OMAP3430_GRPSEL_GPIO4_MASK |
922			  OMAP3430_GRPSEL_GPIO5_MASK |
923			  OMAP3430_GRPSEL_GPIO6_MASK |
924			  OMAP3430_GRPSEL_UART3_MASK |
925			  OMAP3430_GRPSEL_MCBSP2_MASK |
926			  OMAP3430_GRPSEL_MCBSP3_MASK |
927			  OMAP3430_GRPSEL_MCBSP4_MASK,
928			  OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL);
929
930	/* Don't attach IVA interrupts */
931	prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL);
932	prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1);
933	prm_write_mod_reg(0, CORE_MOD, OMAP3430ES2_PM_IVAGRPSEL3);
934	prm_write_mod_reg(0, OMAP3430_PER_MOD, OMAP3430_PM_IVAGRPSEL);
935
936	/* Clear any pending 'reset' flags */
937	prm_write_mod_reg(0xffffffff, MPU_MOD, OMAP2_RM_RSTST);
938	prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP2_RM_RSTST);
939	prm_write_mod_reg(0xffffffff, OMAP3430_PER_MOD, OMAP2_RM_RSTST);
940	prm_write_mod_reg(0xffffffff, OMAP3430_EMU_MOD, OMAP2_RM_RSTST);
941	prm_write_mod_reg(0xffffffff, OMAP3430_NEON_MOD, OMAP2_RM_RSTST);
942	prm_write_mod_reg(0xffffffff, OMAP3430_DSS_MOD, OMAP2_RM_RSTST);
943	prm_write_mod_reg(0xffffffff, OMAP3430ES2_USBHOST_MOD, OMAP2_RM_RSTST);
944
945	/* Clear any pending PRCM interrupts */
946	prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
947
948	omap3_iva_idle();
949	omap3_d2d_idle();
950}
951
952void omap3_pm_off_mode_enable(int enable)
953{
954	struct power_state *pwrst;
955	u32 state;
956
957	if (enable)
958		state = PWRDM_POWER_OFF;
959	else
960		state = PWRDM_POWER_RET;
961
962#ifdef CONFIG_CPU_IDLE
963	omap3_cpuidle_update_states();
964#endif
965
966	list_for_each_entry(pwrst, &pwrst_list, node) {
967		pwrst->next_state = state;
968		set_pwrdm_state(pwrst->pwrdm, state);
969	}
970}
971
972int omap3_pm_get_suspend_state(struct powerdomain *pwrdm)
973{
974	struct power_state *pwrst;
975
976	list_for_each_entry(pwrst, &pwrst_list, node) {
977		if (pwrst->pwrdm == pwrdm)
978			return pwrst->next_state;
979	}
980	return -EINVAL;
981}
982
983int omap3_pm_set_suspend_state(struct powerdomain *pwrdm, int state)
984{
985	struct power_state *pwrst;
986
987	list_for_each_entry(pwrst, &pwrst_list, node) {
988		if (pwrst->pwrdm == pwrdm) {
989			pwrst->next_state = state;
990			return 0;
991		}
992	}
993	return -EINVAL;
994}
995
996static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
997{
998	struct power_state *pwrst;
999
1000	if (!pwrdm->pwrsts)
1001		return 0;
1002
1003	pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC);
1004	if (!pwrst)
1005		return -ENOMEM;
1006	pwrst->pwrdm = pwrdm;
1007	pwrst->next_state = PWRDM_POWER_RET;
1008	list_add(&pwrst->node, &pwrst_list);
1009
1010	if (pwrdm_has_hdwr_sar(pwrdm))
1011		pwrdm_enable_hdwr_sar(pwrdm);
1012
1013	return set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
1014}
1015
1016/*
1017 * Enable hw supervised mode for all clockdomains if it's
1018 * supported. Initiate sleep transition for other clockdomains, if
1019 * they are not used
1020 */
1021static int __init clkdms_setup(struct clockdomain *clkdm, void *unused)
1022{
1023	clkdm_clear_all_wkdeps(clkdm);
1024	clkdm_clear_all_sleepdeps(clkdm);
1025
1026	if (clkdm->flags & CLKDM_CAN_ENABLE_AUTO)
1027		omap2_clkdm_allow_idle(clkdm);
1028	else if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP &&
1029		 atomic_read(&clkdm->usecount) == 0)
1030		omap2_clkdm_sleep(clkdm);
1031	return 0;
1032}
1033
1034void omap_push_sram_idle(void)
1035{
1036	_omap_sram_idle = omap_sram_push(omap34xx_cpu_suspend,
1037					omap34xx_cpu_suspend_sz);
1038	if (omap_type() != OMAP2_DEVICE_TYPE_GP)
1039		_omap_save_secure_sram = omap_sram_push(save_secure_ram_context,
1040				save_secure_ram_context_sz);
1041}
1042
1043static int __init omap3_pm_init(void)
1044{
1045	struct power_state *pwrst, *tmp;
1046	struct clockdomain *neon_clkdm, *per_clkdm, *mpu_clkdm, *core_clkdm;
1047	int ret;
1048
1049	if (!cpu_is_omap34xx())
1050		return -ENODEV;
1051
1052	printk(KERN_ERR "Power Management for TI OMAP3.\n");
1053
1054	prcm_setup_regs();
1055
1056	ret = request_irq(INT_34XX_PRCM_MPU_IRQ,
1057			  (irq_handler_t)prcm_interrupt_handler,
1058			  IRQF_DISABLED, "prcm", NULL);
1059	if (ret) {
1060		printk(KERN_ERR "request_irq failed to register for 0x%x\n",
1061		       INT_34XX_PRCM_MPU_IRQ);
1062		goto err1;
1063	}
1064
1065	ret = pwrdm_for_each(pwrdms_setup, NULL);
1066	if (ret) {
1067		printk(KERN_ERR "Failed to setup powerdomains\n");
1068		goto err2;
1069	}
1070
1071	(void) clkdm_for_each(clkdms_setup, NULL);
1072
1073	mpu_pwrdm = pwrdm_lookup("mpu_pwrdm");
1074	if (mpu_pwrdm == NULL) {
1075		printk(KERN_ERR "Failed to get mpu_pwrdm\n");
1076		goto err2;
1077	}
1078
1079	neon_pwrdm = pwrdm_lookup("neon_pwrdm");
1080	per_pwrdm = pwrdm_lookup("per_pwrdm");
1081	core_pwrdm = pwrdm_lookup("core_pwrdm");
1082	cam_pwrdm = pwrdm_lookup("cam_pwrdm");
1083
1084	neon_clkdm = clkdm_lookup("neon_clkdm");
1085	mpu_clkdm = clkdm_lookup("mpu_clkdm");
1086	per_clkdm = clkdm_lookup("per_clkdm");
1087	core_clkdm = clkdm_lookup("core_clkdm");
1088
1089	omap_push_sram_idle();
1090#ifdef CONFIG_SUSPEND
1091	suspend_set_ops(&omap_pm_ops);
1092#endif /* CONFIG_SUSPEND */
1093
1094	pm_idle = omap3_pm_idle;
1095	omap3_idle_init();
1096
1097	clkdm_add_wkdep(neon_clkdm, mpu_clkdm);
1098	if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
1099		omap3_secure_ram_storage =
1100			kmalloc(0x803F, GFP_KERNEL);
1101		if (!omap3_secure_ram_storage)
1102			printk(KERN_ERR "Memory allocation failed when"
1103					"allocating for secure sram context\n");
1104
1105		local_irq_disable();
1106		local_fiq_disable();
1107
1108		omap_dma_global_context_save();
1109		omap3_save_secure_ram_context(PWRDM_POWER_ON);
1110		omap_dma_global_context_restore();
1111
1112		local_irq_enable();
1113		local_fiq_enable();
1114	}
1115
1116	omap3_save_scratchpad_contents();
1117err1:
1118	return ret;
1119err2:
1120	free_irq(INT_34XX_PRCM_MPU_IRQ, NULL);
1121	list_for_each_entry_safe(pwrst, tmp, &pwrst_list, node) {
1122		list_del(&pwrst->node);
1123		kfree(pwrst);
1124	}
1125	return ret;
1126}
1127
1128late_initcall(omap3_pm_init);
1129