Deleted Added
full compact
cpu_machdep.c (334044) cpu_machdep.c (334152)
1/*-
2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1992 Terrence R. Lambert.
4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * William Jolitz.

--- 25 unchanged lines hidden (view full) ---

34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
39 */
40
41#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1992 Terrence R. Lambert.
4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * William Jolitz.

--- 25 unchanged lines hidden (view full) ---

34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
39 */
40
41#include <sys/cdefs.h>
42__FBSDID("$FreeBSD: stable/11/sys/x86/x86/cpu_machdep.c 334044 2018-05-22 14:25:40Z kib $");
42__FBSDID("$FreeBSD: stable/11/sys/x86/x86/cpu_machdep.c 334152 2018-05-24 13:17:24Z kib $");
43
44#include "opt_atpic.h"
45#include "opt_compat.h"
46#include "opt_cpu.h"
47#include "opt_ddb.h"
48#include "opt_inet.h"
49#include "opt_isa.h"
50#include "opt_kdb.h"

--- 102 unchanged lines hidden (view full) ---

153 * NOTE: Interrupts will cause a wakeup; however, this function does
154 * not enable interrupt handling. The caller is responsible to enable
155 * interrupts.
156 */
157void
158acpi_cpu_idle_mwait(uint32_t mwait_hint)
159{
160 int *state;
43
44#include "opt_atpic.h"
45#include "opt_compat.h"
46#include "opt_cpu.h"
47#include "opt_ddb.h"
48#include "opt_inet.h"
49#include "opt_isa.h"
50#include "opt_kdb.h"

--- 102 unchanged lines hidden (view full) ---

153 * NOTE: Interrupts will cause a wakeup; however, this function does
154 * not enable interrupt handling. The caller is responsible to enable
155 * interrupts.
156 */
157void
158acpi_cpu_idle_mwait(uint32_t mwait_hint)
159{
160 int *state;
161 uint64_t v;
161
162 /*
163 * A comment in Linux patch claims that 'CPUs run faster with
164 * speculation protection disabled. All CPU threads in a core
165 * must disable speculation protection for it to be
166 * disabled. Disable it while we are idle so the other
167 * hyperthread can run fast.'
168 *
169 * XXXKIB. Software coordination mode should be supported,
170 * but all Intel CPUs provide hardware coordination.
171 */
172
173 state = (int *)PCPU_PTR(monitorbuf);
174 KASSERT(atomic_load_int(state) == STATE_SLEEPING,
175 ("cpu_mwait_cx: wrong monitorbuf state"));
176 atomic_store_int(state, STATE_MWAIT);
162
163 /*
164 * A comment in Linux patch claims that 'CPUs run faster with
165 * speculation protection disabled. All CPU threads in a core
166 * must disable speculation protection for it to be
167 * disabled. Disable it while we are idle so the other
168 * hyperthread can run fast.'
169 *
170 * XXXKIB. Software coordination mode should be supported,
171 * but all Intel CPUs provide hardware coordination.
172 */
173
174 state = (int *)PCPU_PTR(monitorbuf);
175 KASSERT(atomic_load_int(state) == STATE_SLEEPING,
176 ("cpu_mwait_cx: wrong monitorbuf state"));
177 atomic_store_int(state, STATE_MWAIT);
177 handle_ibrs_exit();
178 if (PCPU_GET(ibpb_set) || hw_ssb_active) {
179 v = rdmsr(MSR_IA32_SPEC_CTRL);
180 wrmsr(MSR_IA32_SPEC_CTRL, v & ~(IA32_SPEC_CTRL_IBRS |
181 IA32_SPEC_CTRL_STIBP | IA32_SPEC_CTRL_SSBD));
182 } else {
183 v = 0;
184 }
178 cpu_monitor(state, 0, 0);
179 if (atomic_load_int(state) == STATE_MWAIT)
180 cpu_mwait(MWAIT_INTRBREAK, mwait_hint);
185 cpu_monitor(state, 0, 0);
186 if (atomic_load_int(state) == STATE_MWAIT)
187 cpu_mwait(MWAIT_INTRBREAK, mwait_hint);
181 handle_ibrs_entry();
182
183 /*
188
189 /*
190 * SSB cannot be disabled while we sleep, or rather, if it was
191 * disabled, the sysctl thread will bind to our cpu to tweak
192 * MSR.
193 */
194 if (v != 0)
195 wrmsr(MSR_IA32_SPEC_CTRL, v);
196
197 /*
184 * We should exit on any event that interrupts mwait, because
185 * that event might be a wanted interrupt.
186 */
187 atomic_store_int(state, STATE_RUNNING);
188}
189
190/* Get current clock frequency for the given cpu id. */
191int

--- 639 unchanged lines hidden (view full) ---

831 return (error);
832 hw_ibrs_disable = val != 0;
833 hw_ibrs_recalculate();
834 return (0);
835}
836SYSCTL_PROC(_hw, OID_AUTO, ibrs_disable, CTLTYPE_INT | CTLFLAG_RWTUN |
837 CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, hw_ibrs_disable_handler, "I",
838 "Disable Indirect Branch Restricted Speculation");
198 * We should exit on any event that interrupts mwait, because
199 * that event might be a wanted interrupt.
200 */
201 atomic_store_int(state, STATE_RUNNING);
202}
203
204/* Get current clock frequency for the given cpu id. */
205int

--- 639 unchanged lines hidden (view full) ---

845 return (error);
846 hw_ibrs_disable = val != 0;
847 hw_ibrs_recalculate();
848 return (0);
849}
850SYSCTL_PROC(_hw, OID_AUTO, ibrs_disable, CTLTYPE_INT | CTLFLAG_RWTUN |
851 CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, hw_ibrs_disable_handler, "I",
852 "Disable Indirect Branch Restricted Speculation");
853
854int hw_ssb_active;
855int hw_ssb_disable;
856
857SYSCTL_INT(_hw, OID_AUTO, spec_store_bypass_disable_active, CTLFLAG_RD,
858 &hw_ssb_active, 0,
859 "Speculative Store Bypass Disable active");
860
861static void
862hw_ssb_set_one(bool enable)
863{
864 uint64_t v;
865
866 v = rdmsr(MSR_IA32_SPEC_CTRL);
867 if (enable)
868 v |= (uint64_t)IA32_SPEC_CTRL_SSBD;
869 else
870 v &= ~(uint64_t)IA32_SPEC_CTRL_SSBD;
871 wrmsr(MSR_IA32_SPEC_CTRL, v);
872}
873
874static void
875hw_ssb_set(bool enable, bool for_all_cpus)
876{
877 struct thread *td;
878 int bound_cpu, i, is_bound;
879
880 if ((cpu_stdext_feature3 & CPUID_STDEXT3_SSBD) == 0) {
881 hw_ssb_active = 0;
882 return;
883 }
884 hw_ssb_active = enable;
885 if (for_all_cpus) {
886 td = curthread;
887 thread_lock(td);
888 is_bound = sched_is_bound(td);
889 bound_cpu = td->td_oncpu;
890 CPU_FOREACH(i) {
891 sched_bind(td, i);
892 hw_ssb_set_one(enable);
893 }
894 if (is_bound)
895 sched_bind(td, bound_cpu);
896 else
897 sched_unbind(td);
898 thread_unlock(td);
899 } else {
900 hw_ssb_set_one(enable);
901 }
902}
903
904void
905hw_ssb_recalculate(bool all_cpus)
906{
907
908 switch (hw_ssb_disable) {
909 default:
910 hw_ssb_disable = 0;
911 /* FALLTHROUGH */
912 case 0: /* off */
913 hw_ssb_set(false, all_cpus);
914 break;
915 case 1: /* on */
916 hw_ssb_set(true, all_cpus);
917 break;
918 case 2: /* auto */
919 hw_ssb_set((cpu_ia32_arch_caps & IA32_ARCH_CAP_SSBD_NO) != 0 ?
920 false : true, all_cpus);
921 break;
922 }
923}
924
925static int
926hw_ssb_disable_handler(SYSCTL_HANDLER_ARGS)
927{
928 int error, val;
929
930 val = hw_ssb_disable;
931 error = sysctl_handle_int(oidp, &val, 0, req);
932 if (error != 0 || req->newptr == NULL)
933 return (error);
934 hw_ssb_disable = val;
935 hw_ssb_recalculate(true);
936 return (0);
937}
938SYSCTL_PROC(_hw, OID_AUTO, spec_store_bypass_disable, CTLTYPE_INT |
939 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
940 hw_ssb_disable_handler, "I",
941 "Speculative Store Bypass Disable (0 - off, 1 - on, 2 - auto");
942