Deleted Added
full compact
machdep.c (241880) machdep.c (247454)
1/*-
2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1992 Terrence R. Lambert.
4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * William Jolitz.

--- 25 unchanged lines hidden (view full) ---

34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
39 */
40
41#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1992 Terrence R. Lambert.
4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * William Jolitz.

--- 25 unchanged lines hidden (view full) ---

34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
39 */
40
41#include <sys/cdefs.h>
42__FBSDID("$FreeBSD: head/sys/amd64/amd64/machdep.c 241880 2012-10-22 11:57:26Z eadler $");
42__FBSDID("$FreeBSD: head/sys/amd64/amd64/machdep.c 247454 2013-02-28 10:46:54Z davide $");
43
44#include "opt_atalk.h"
45#include "opt_atpic.h"
46#include "opt_compat.h"
47#include "opt_cpu.h"
48#include "opt_ddb.h"
49#include "opt_inet.h"
50#include "opt_ipx.h"

--- 602 unchanged lines hidden (view full) ---

653 */
654void
655cpu_halt(void)
656{
657 for (;;)
658 halt();
659}
660
43
44#include "opt_atalk.h"
45#include "opt_atpic.h"
46#include "opt_compat.h"
47#include "opt_cpu.h"
48#include "opt_ddb.h"
49#include "opt_inet.h"
50#include "opt_ipx.h"

--- 602 unchanged lines hidden (view full) ---

653 */
654void
655cpu_halt(void)
656{
657 for (;;)
658 halt();
659}
660
661void (*cpu_idle_hook)(void) = NULL; /* ACPI idle hook. */
661void (*cpu_idle_hook)(sbintime_t) = NULL; /* ACPI idle hook. */
662static int cpu_ident_amdc1e = 0; /* AMD C1E supported. */
663static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */
664TUNABLE_INT("machdep.idle_mwait", &idle_mwait);
665SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RW, &idle_mwait,
666 0, "Use MONITOR/MWAIT for short idle");
667
668#define STATE_RUNNING 0x0
669#define STATE_MWAIT 0x1
670#define STATE_SLEEPING 0x2
671
672static void
662static int cpu_ident_amdc1e = 0; /* AMD C1E supported. */
663static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */
664TUNABLE_INT("machdep.idle_mwait", &idle_mwait);
665SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RW, &idle_mwait,
666 0, "Use MONITOR/MWAIT for short idle");
667
668#define STATE_RUNNING 0x0
669#define STATE_MWAIT 0x1
670#define STATE_SLEEPING 0x2
671
672static void
673cpu_idle_acpi(int busy)
673cpu_idle_acpi(sbintime_t sbt)
674{
675 int *state;
676
677 state = (int *)PCPU_PTR(monitorbuf);
678 *state = STATE_SLEEPING;
679
680 /* See comments in cpu_idle_hlt(). */
681 disable_intr();
682 if (sched_runnable())
683 enable_intr();
684 else if (cpu_idle_hook)
674{
675 int *state;
676
677 state = (int *)PCPU_PTR(monitorbuf);
678 *state = STATE_SLEEPING;
679
680 /* See comments in cpu_idle_hlt(). */
681 disable_intr();
682 if (sched_runnable())
683 enable_intr();
684 else if (cpu_idle_hook)
685 cpu_idle_hook();
685 cpu_idle_hook(sbt);
686 else
687 __asm __volatile("sti; hlt");
688 *state = STATE_RUNNING;
689}
690
691static void
686 else
687 __asm __volatile("sti; hlt");
688 *state = STATE_RUNNING;
689}
690
691static void
692cpu_idle_hlt(int busy)
692cpu_idle_hlt(sbintime_t sbt)
693{
694 int *state;
695
696 state = (int *)PCPU_PTR(monitorbuf);
697 *state = STATE_SLEEPING;
698
699 /*
700 * Since we may be in a critical section from cpu_idle(), if

--- 24 unchanged lines hidden (view full) ---

725 */
726#define MWAIT_C0 0xf0
727#define MWAIT_C1 0x00
728#define MWAIT_C2 0x10
729#define MWAIT_C3 0x20
730#define MWAIT_C4 0x30
731
732static void
693{
694 int *state;
695
696 state = (int *)PCPU_PTR(monitorbuf);
697 *state = STATE_SLEEPING;
698
699 /*
700 * Since we may be in a critical section from cpu_idle(), if

--- 24 unchanged lines hidden (view full) ---

725 */
726#define MWAIT_C0 0xf0
727#define MWAIT_C1 0x00
728#define MWAIT_C2 0x10
729#define MWAIT_C3 0x20
730#define MWAIT_C4 0x30
731
732static void
733cpu_idle_mwait(int busy)
733cpu_idle_mwait(sbintime_t sbt)
734{
735 int *state;
736
737 state = (int *)PCPU_PTR(monitorbuf);
738 *state = STATE_MWAIT;
739
740 /* See comments in cpu_idle_hlt(). */
741 disable_intr();

--- 6 unchanged lines hidden (view full) ---

748 if (*state == STATE_MWAIT)
749 __asm __volatile("sti; mwait" : : "a" (MWAIT_C1), "c" (0));
750 else
751 enable_intr();
752 *state = STATE_RUNNING;
753}
754
755static void
734{
735 int *state;
736
737 state = (int *)PCPU_PTR(monitorbuf);
738 *state = STATE_MWAIT;
739
740 /* See comments in cpu_idle_hlt(). */
741 disable_intr();

--- 6 unchanged lines hidden (view full) ---

748 if (*state == STATE_MWAIT)
749 __asm __volatile("sti; mwait" : : "a" (MWAIT_C1), "c" (0));
750 else
751 enable_intr();
752 *state = STATE_RUNNING;
753}
754
755static void
756cpu_idle_spin(int busy)
756cpu_idle_spin(sbintime_t sbt)
757{
758 int *state;
759 int i;
760
761 state = (int *)PCPU_PTR(monitorbuf);
762 *state = STATE_RUNNING;
763
764 /*

--- 32 unchanged lines hidden (view full) ---

797 */
798 if (cpu_vendor_id == CPU_VENDOR_AMD &&
799 (cpu_id & 0x00000f00) == 0x00000f00 &&
800 (cpu_id & 0x0fff0000) >= 0x00040000) {
801 cpu_ident_amdc1e = 1;
802 }
803}
804
757{
758 int *state;
759 int i;
760
761 state = (int *)PCPU_PTR(monitorbuf);
762 *state = STATE_RUNNING;
763
764 /*

--- 32 unchanged lines hidden (view full) ---

797 */
798 if (cpu_vendor_id == CPU_VENDOR_AMD &&
799 (cpu_id & 0x00000f00) == 0x00000f00 &&
800 (cpu_id & 0x0fff0000) >= 0x00040000) {
801 cpu_ident_amdc1e = 1;
802 }
803}
804
805void (*cpu_idle_fn)(int) = cpu_idle_acpi;
805void (*cpu_idle_fn)(sbintime_t) = cpu_idle_acpi;
806
807void
808cpu_idle(int busy)
809{
810 uint64_t msr;
806
807void
808cpu_idle(int busy)
809{
810 uint64_t msr;
811 sbintime_t sbt = -1;
811
812 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d",
813 busy, curcpu);
814#ifdef MP_WATCHDOG
815 ap_watchdog(PCPU_GET(cpuid));
816#endif
817 /* If we are busy - try to use fast methods. */
818 if (busy) {
819 if ((cpu_feature2 & CPUID2_MON) && idle_mwait) {
820 cpu_idle_mwait(busy);
821 goto out;
822 }
823 }
824
825 /* If we have time - switch timers into idle mode. */
826 if (!busy) {
827 critical_enter();
812
813 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d",
814 busy, curcpu);
815#ifdef MP_WATCHDOG
816 ap_watchdog(PCPU_GET(cpuid));
817#endif
818 /* If we are busy - try to use fast methods. */
819 if (busy) {
820 if ((cpu_feature2 & CPUID2_MON) && idle_mwait) {
821 cpu_idle_mwait(busy);
822 goto out;
823 }
824 }
825
826 /* If we have time - switch timers into idle mode. */
827 if (!busy) {
828 critical_enter();
828 cpu_idleclock();
829 sbt = cpu_idleclock();
829 }
830
831 /* Apply AMD APIC timer C1E workaround. */
832 if (cpu_ident_amdc1e && cpu_disable_deep_sleep) {
833 msr = rdmsr(MSR_AMDK8_IPM);
834 if (msr & AMDK8_CMPHALT)
835 wrmsr(MSR_AMDK8_IPM, msr & ~AMDK8_CMPHALT);
836 }
837
838 /* Call main idle method. */
830 }
831
832 /* Apply AMD APIC timer C1E workaround. */
833 if (cpu_ident_amdc1e && cpu_disable_deep_sleep) {
834 msr = rdmsr(MSR_AMDK8_IPM);
835 if (msr & AMDK8_CMPHALT)
836 wrmsr(MSR_AMDK8_IPM, msr & ~AMDK8_CMPHALT);
837 }
838
839 /* Call main idle method. */
839 cpu_idle_fn(busy);
840 cpu_idle_fn(sbt);
840
841 /* Switch timers mack into active mode. */
842 if (!busy) {
843 cpu_activeclock();
844 critical_exit();
845 }
846out:
847 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done",

--- 1707 unchanged lines hidden ---
841
842 /* Switch timers mack into active mode. */
843 if (!busy) {
844 cpu_activeclock();
845 critical_exit();
846 }
847out:
848 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done",

--- 1707 unchanged lines hidden ---