1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 1996, by Steve Passe
5 * Copyright (c) 2003, by Peter Wemm
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. The name of the developer may NOT be used to endorse or promote products
14 *    derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30#include "opt_acpi.h"
31#include "opt_cpu.h"
32#include "opt_ddb.h"
33#include "opt_kstack_pages.h"
34#include "opt_sched.h"
35#include "opt_smp.h"
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/bus.h>
40#include <sys/cpuset.h>
41#include <sys/domainset.h>
42#include <sys/kdb.h>
43#include <sys/kernel.h>
44#include <sys/ktr.h>
45#include <sys/lock.h>
46#include <sys/malloc.h>
47#include <sys/memrange.h>
48#include <sys/mutex.h>
49#include <sys/pcpu.h>
50#include <sys/proc.h>
51#include <sys/sched.h>
52#include <sys/smp.h>
53#include <sys/sysctl.h>
54
55#include <vm/vm.h>
56#include <vm/vm_param.h>
57#include <vm/pmap.h>
58#include <vm/vm_kern.h>
59#include <vm/vm_extern.h>
60#include <vm/vm_page.h>
61#include <vm/vm_phys.h>
62
63#include <x86/apicreg.h>
64#include <machine/clock.h>
65#include <machine/cputypes.h>
66#include <machine/cpufunc.h>
67#include <x86/mca.h>
68#include <machine/md_var.h>
69#include <machine/pcb.h>
70#include <machine/psl.h>
71#include <machine/smp.h>
72#include <machine/specialreg.h>
73#include <machine/tss.h>
74#include <x86/ucode.h>
75#include <machine/cpu.h>
76#include <x86/init.h>
77
78#ifdef DEV_ACPI
79#include <contrib/dev/acpica/include/acpi.h>
80#include <dev/acpica/acpivar.h>
81#endif
82
83#define WARMBOOT_TARGET		0
84#define WARMBOOT_OFF		(KERNBASE + 0x0467)
85#define WARMBOOT_SEG		(KERNBASE + 0x0469)
86
87#define CMOS_REG		(0x70)
88#define CMOS_DATA		(0x71)
89#define BIOS_RESET		(0x0f)
90#define BIOS_WARM		(0x0a)
91
92#define GiB(v)			(v ## ULL << 30)
93
94#define	AP_BOOTPT_SZ		(PAGE_SIZE * 4)
95
96/* Temporary variables for init_secondary()  */
97static char *doublefault_stack;
98static char *mce_stack;
99static char *nmi_stack;
100static char *dbg_stack;
101void *bootpcpu;
102
103extern u_int mptramp_la57;
104extern u_int mptramp_nx;
105smp_targeted_tlb_shootdown_t smp_targeted_tlb_shootdown = &smp_targeted_tlb_shootdown_native;
106/*
107 * Local data and functions.
108 */
109
110static int start_ap(int apic_id, vm_paddr_t boot_address);
111
112void
113smp_targeted_tlb_shootdown_native(pmap_t pmap, vm_offset_t addr1, vm_offset_t addr2,
114    smp_invl_cb_t curcpu_cb, enum invl_op_codes op);
115/*
116 * Initialize the IPI handlers and start up the AP's.
117 */
118void
119cpu_mp_start(void)
120{
121	int i;
122
123	/* Initialize the logical ID to APIC ID table. */
124	for (i = 0; i < MAXCPU; i++) {
125		cpu_apic_ids[i] = -1;
126	}
127
128	/* Install an inter-CPU IPI for cache and TLB invalidations. */
129	setidt(IPI_INVLOP, pti ? IDTVEC(invlop_pti) : IDTVEC(invlop),
130	    SDT_SYSIGT, SEL_KPL, 0);
131
132	/* Install an inter-CPU IPI for all-CPU rendezvous */
133	setidt(IPI_RENDEZVOUS, pti ? IDTVEC(rendezvous_pti) :
134	    IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
135
136	/* Install generic inter-CPU IPI handler */
137	setidt(IPI_BITMAP_VECTOR, pti ? IDTVEC(ipi_intr_bitmap_handler_pti) :
138	    IDTVEC(ipi_intr_bitmap_handler), SDT_SYSIGT, SEL_KPL, 0);
139
140	/* Install an inter-CPU IPI for CPU stop/restart */
141	setidt(IPI_STOP, pti ? IDTVEC(cpustop_pti) : IDTVEC(cpustop),
142	    SDT_SYSIGT, SEL_KPL, 0);
143
144	/* Install an inter-CPU IPI for CPU suspend/resume */
145	setidt(IPI_SUSPEND, pti ? IDTVEC(cpususpend_pti) : IDTVEC(cpususpend),
146	    SDT_SYSIGT, SEL_KPL, 0);
147
148	/* Install an IPI for calling delayed SWI */
149	setidt(IPI_SWI, pti ? IDTVEC(ipi_swi_pti) : IDTVEC(ipi_swi),
150	    SDT_SYSIGT, SEL_KPL, 0);
151
152	/* Set boot_cpu_id if needed. */
153	if (boot_cpu_id == -1) {
154		boot_cpu_id = PCPU_GET(apic_id);
155		cpu_info[boot_cpu_id].cpu_bsp = 1;
156	} else
157		KASSERT(boot_cpu_id == PCPU_GET(apic_id),
158		    ("BSP's APIC ID doesn't match boot_cpu_id"));
159
160	/* Probe logical/physical core configuration. */
161	topo_probe();
162
163	assign_cpu_ids();
164
165	mptramp_la57 = la57;
166	mptramp_nx = pg_nx != 0;
167	MPASS(kernel_pmap->pm_cr3 < (1UL << 32));
168	mptramp_pagetables = kernel_pmap->pm_cr3;
169
170	/* Start each Application Processor */
171	start_all_aps();
172
173	set_interrupt_apic_ids();
174
175#if defined(DEV_ACPI) && MAXMEMDOM > 1
176	acpi_pxm_set_cpu_locality();
177#endif
178}
179
180/*
181 * AP CPU's call this to initialize themselves.
182 */
183void
184init_secondary(void)
185{
186	struct pcpu *pc;
187	struct nmi_pcpu *np;
188	struct user_segment_descriptor *gdt;
189	struct region_descriptor ap_gdt;
190	u_int64_t cr0;
191	int cpu, gsel_tss, x;
192
193	/* Set by the startup code for us to use */
194	cpu = bootAP;
195
196	/* Update microcode before doing anything else. */
197	ucode_load_ap(cpu);
198
199	/* Initialize the PCPU area. */
200	pc = bootpcpu;
201	pcpu_init(pc, cpu, sizeof(struct pcpu));
202	dpcpu_init(dpcpu, cpu);
203	pc->pc_apic_id = cpu_apic_ids[cpu];
204	pc->pc_prvspace = pc;
205	pc->pc_curthread = 0;
206	pc->pc_tssp = &pc->pc_common_tss;
207	pc->pc_rsp0 = 0;
208	pc->pc_pti_rsp0 = (((vm_offset_t)&pc->pc_pti_stack +
209	    PC_PTI_STACK_SZ * sizeof(uint64_t)) & ~0xful);
210	gdt = pc->pc_gdt;
211	pc->pc_tss = (struct system_segment_descriptor *)&gdt[GPROC0_SEL];
212	pc->pc_fs32p = &gdt[GUFS32_SEL];
213	pc->pc_gs32p = &gdt[GUGS32_SEL];
214	pc->pc_ldt = (struct system_segment_descriptor *)&gdt[GUSERLDT_SEL];
215	pc->pc_ucr3_load_mask = PMAP_UCR3_NOMASK;
216	/* See comment in pmap_bootstrap(). */
217	pc->pc_pcid_next = PMAP_PCID_KERN + 2;
218	pc->pc_pcid_gen = 1;
219	pc->pc_kpmap_store.pm_pcid = PMAP_PCID_KERN;
220	pc->pc_kpmap_store.pm_gen = 1;
221
222	pc->pc_smp_tlb_gen = 1;
223
224	/* Init tss */
225	pc->pc_common_tss = __pcpu[0].pc_common_tss;
226	pc->pc_common_tss.tss_iobase = sizeof(struct amd64tss) +
227	    IOPERM_BITMAP_SIZE;
228	pc->pc_common_tss.tss_rsp0 = 0;
229
230	/* The doublefault stack runs on IST1. */
231	np = ((struct nmi_pcpu *)&doublefault_stack[DBLFAULT_STACK_SIZE]) - 1;
232	np->np_pcpu = (register_t)pc;
233	pc->pc_common_tss.tss_ist1 = (long)np;
234
235	/* The NMI stack runs on IST2. */
236	np = ((struct nmi_pcpu *)&nmi_stack[NMI_STACK_SIZE]) - 1;
237	np->np_pcpu = (register_t)pc;
238	pc->pc_common_tss.tss_ist2 = (long)np;
239
240	/* The MC# stack runs on IST3. */
241	np = ((struct nmi_pcpu *)&mce_stack[MCE_STACK_SIZE]) - 1;
242	np->np_pcpu = (register_t)pc;
243	pc->pc_common_tss.tss_ist3 = (long)np;
244
245	/* The DB# stack runs on IST4. */
246	np = ((struct nmi_pcpu *)&dbg_stack[DBG_STACK_SIZE]) - 1;
247	np->np_pcpu = (register_t)pc;
248	pc->pc_common_tss.tss_ist4 = (long)np;
249
250	/* Prepare private GDT */
251	gdt_segs[GPROC0_SEL].ssd_base = (long)&pc->pc_common_tss;
252	for (x = 0; x < NGDT; x++) {
253		if (x != GPROC0_SEL && x != GPROC0_SEL + 1 &&
254		    x != GUSERLDT_SEL && x != GUSERLDT_SEL + 1)
255			ssdtosd(&gdt_segs[x], &gdt[x]);
256	}
257	ssdtosyssd(&gdt_segs[GPROC0_SEL],
258	    (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
259	ap_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
260	ap_gdt.rd_base = (u_long)gdt;
261	lgdt(&ap_gdt);			/* does magic intra-segment return */
262
263	wrmsr(MSR_FSBASE, 0);		/* User value */
264	wrmsr(MSR_GSBASE, (uint64_t)pc);
265	wrmsr(MSR_KGSBASE, 0);		/* User value */
266	fix_cpuid();
267
268	lidt(&r_idt);
269
270	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
271	ltr(gsel_tss);
272
273	/*
274	 * Set to a known state:
275	 * Set by mpboot.s: CR0_PG, CR0_PE
276	 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
277	 */
278	cr0 = rcr0();
279	cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
280	load_cr0(cr0);
281
282	amd64_conf_fast_syscall();
283
284	/* signal our startup to the BSP. */
285	mp_naps++;
286
287	/* Spin until the BSP releases the AP's. */
288	while (atomic_load_acq_int(&aps_ready) == 0)
289		ia32_pause();
290
291	init_secondary_tail();
292}
293
294static void
295amd64_mp_alloc_pcpu(void)
296{
297	vm_page_t m;
298	int cpu;
299
300	/* Allocate pcpu areas to the correct domain. */
301	for (cpu = 1; cpu < mp_ncpus; cpu++) {
302#ifdef NUMA
303		m = NULL;
304		if (vm_ndomains > 1) {
305			m = vm_page_alloc_noobj_domain(
306			    acpi_pxm_get_cpu_locality(cpu_apic_ids[cpu]),
307			    VM_ALLOC_ZERO);
308		}
309		if (m == NULL)
310#endif
311			m = vm_page_alloc_noobj(VM_ALLOC_ZERO);
312		if (m == NULL)
313			panic("cannot alloc pcpu page for cpu %d", cpu);
314		pmap_qenter((vm_offset_t)&__pcpu[cpu], &m, 1);
315	}
316}
317
318/*
319 * start each AP in our list
320 */
321int
322start_all_aps(void)
323{
324	vm_page_t m_boottramp, m_pml4, m_pdp, m_pd[4];
325	pml5_entry_t old_pml45;
326	pml4_entry_t *v_pml4;
327	pdp_entry_t *v_pdp;
328	pd_entry_t *v_pd;
329	vm_paddr_t boot_address;
330	u_int32_t mpbioswarmvec;
331	int apic_id, cpu, domain, i;
332	u_char mpbiosreason;
333
334	amd64_mp_alloc_pcpu();
335	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
336
337	MPASS(bootMP_size <= PAGE_SIZE);
338	m_boottramp = vm_page_alloc_noobj_contig(0, 1, 0,
339	    (1ULL << 20), /* Trampoline should be below 1M for real mode */
340	    PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
341	boot_address = VM_PAGE_TO_PHYS(m_boottramp);
342
343	/* Create a transient 1:1 mapping of low 4G */
344	if (la57) {
345		m_pml4 = pmap_page_alloc_below_4g(true);
346		v_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pml4));
347	} else {
348		v_pml4 = &kernel_pmap->pm_pmltop[0];
349	}
350	m_pdp = pmap_page_alloc_below_4g(true);
351	v_pdp = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pdp));
352	m_pd[0] = pmap_page_alloc_below_4g(false);
353	v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[0]));
354	for (i = 0; i < NPDEPG; i++)
355		v_pd[i] = (i << PDRSHIFT) | X86_PG_V | X86_PG_RW | X86_PG_A |
356		    X86_PG_M | PG_PS;
357	m_pd[1] = pmap_page_alloc_below_4g(false);
358	v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[1]));
359	for (i = 0; i < NPDEPG; i++)
360		v_pd[i] = (NBPDP + (i << PDRSHIFT)) | X86_PG_V | X86_PG_RW |
361		    X86_PG_A | X86_PG_M | PG_PS;
362	m_pd[2] = pmap_page_alloc_below_4g(false);
363	v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[2]));
364	for (i = 0; i < NPDEPG; i++)
365		v_pd[i] = (2UL * NBPDP + (i << PDRSHIFT)) | X86_PG_V |
366		    X86_PG_RW | X86_PG_A | X86_PG_M | PG_PS;
367	m_pd[3] = pmap_page_alloc_below_4g(false);
368	v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[3]));
369	for (i = 0; i < NPDEPG; i++)
370		v_pd[i] = (3UL * NBPDP + (i << PDRSHIFT)) | X86_PG_V |
371		    X86_PG_RW | X86_PG_A | X86_PG_M | PG_PS;
372	v_pdp[0] = VM_PAGE_TO_PHYS(m_pd[0]) | X86_PG_V |
373	    X86_PG_RW | X86_PG_A | X86_PG_M;
374	v_pdp[1] = VM_PAGE_TO_PHYS(m_pd[1]) | X86_PG_V |
375	    X86_PG_RW | X86_PG_A | X86_PG_M;
376	v_pdp[2] = VM_PAGE_TO_PHYS(m_pd[2]) | X86_PG_V |
377	    X86_PG_RW | X86_PG_A | X86_PG_M;
378	v_pdp[3] = VM_PAGE_TO_PHYS(m_pd[3]) | X86_PG_V |
379	    X86_PG_RW | X86_PG_A | X86_PG_M;
380	old_pml45 = kernel_pmap->pm_pmltop[0];
381	if (la57) {
382		kernel_pmap->pm_pmltop[0] = VM_PAGE_TO_PHYS(m_pml4) |
383		    X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M;
384	}
385	v_pml4[0] = VM_PAGE_TO_PHYS(m_pdp) | X86_PG_V |
386	    X86_PG_RW | X86_PG_A | X86_PG_M;
387	pmap_invalidate_all(kernel_pmap);
388
389	/* copy the AP 1st level boot code */
390	bcopy(mptramp_start, (void *)PHYS_TO_DMAP(boot_address), bootMP_size);
391	if (bootverbose)
392		printf("AP boot address %#lx\n", boot_address);
393
394	/* save the current value of the warm-start vector */
395	if (!efi_boot)
396		mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
397	outb(CMOS_REG, BIOS_RESET);
398	mpbiosreason = inb(CMOS_DATA);
399
400	/* setup a vector to our boot code */
401	if (!efi_boot) {
402		*((volatile u_short *)WARMBOOT_OFF) = WARMBOOT_TARGET;
403		*((volatile u_short *)WARMBOOT_SEG) = (boot_address >> 4);
404	}
405	outb(CMOS_REG, BIOS_RESET);
406	outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
407
408	/* start each AP */
409	domain = 0;
410	for (cpu = 1; cpu < mp_ncpus; cpu++) {
411		apic_id = cpu_apic_ids[cpu];
412#ifdef NUMA
413		if (vm_ndomains > 1)
414			domain = acpi_pxm_get_cpu_locality(apic_id);
415#endif
416		/* allocate and set up an idle stack data page */
417		bootstacks[cpu] = kmem_malloc(kstack_pages * PAGE_SIZE,
418		    M_WAITOK | M_ZERO);
419		doublefault_stack = kmem_malloc(DBLFAULT_STACK_SIZE,
420		    M_WAITOK | M_ZERO);
421		mce_stack = kmem_malloc(MCE_STACK_SIZE,
422		    M_WAITOK | M_ZERO);
423		nmi_stack = kmem_malloc_domainset(
424		    DOMAINSET_PREF(domain), NMI_STACK_SIZE, M_WAITOK | M_ZERO);
425		dbg_stack = kmem_malloc_domainset(
426		    DOMAINSET_PREF(domain), DBG_STACK_SIZE, M_WAITOK | M_ZERO);
427		dpcpu = kmem_malloc_domainset(DOMAINSET_PREF(domain),
428		    DPCPU_SIZE, M_WAITOK | M_ZERO);
429
430		bootpcpu = &__pcpu[cpu];
431		bootSTK = (char *)bootstacks[cpu] +
432		    kstack_pages * PAGE_SIZE - 8;
433		bootAP = cpu;
434
435		/* attempt to start the Application Processor */
436		if (!start_ap(apic_id, boot_address)) {
437			/* restore the warmstart vector */
438			if (!efi_boot)
439				*(u_int32_t *)WARMBOOT_OFF = mpbioswarmvec;
440			panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
441		}
442
443		CPU_SET(cpu, &all_cpus);	/* record AP in CPU map */
444	}
445
446	/* restore the warmstart vector */
447	if (!efi_boot)
448		*(u_int32_t *)WARMBOOT_OFF = mpbioswarmvec;
449
450	outb(CMOS_REG, BIOS_RESET);
451	outb(CMOS_DATA, mpbiosreason);
452
453	/* Destroy transient 1:1 mapping */
454	kernel_pmap->pm_pmltop[0] = old_pml45;
455	invlpg(0);
456	if (la57)
457		vm_page_free(m_pml4);
458	vm_page_free(m_pd[3]);
459	vm_page_free(m_pd[2]);
460	vm_page_free(m_pd[1]);
461	vm_page_free(m_pd[0]);
462	vm_page_free(m_pdp);
463	vm_page_free(m_boottramp);
464
465	/* number of APs actually started */
466	return (mp_naps);
467}
468
469/*
470 * This function starts the AP (application processor) identified
471 * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
472 * to accomplish this.  This is necessary because of the nuances
473 * of the different hardware we might encounter.  It isn't pretty,
474 * but it seems to work.
475 */
476static int
477start_ap(int apic_id, vm_paddr_t boot_address)
478{
479	int vector, ms;
480	int cpus;
481
482	/* calculate the vector */
483	vector = (boot_address >> 12) & 0xff;
484
485	/* used as a watchpoint to signal AP startup */
486	cpus = mp_naps;
487
488	ipi_startup(apic_id, vector);
489
490	/* Wait up to 5 seconds for it to start. */
491	for (ms = 0; ms < 5000; ms++) {
492		if (mp_naps > cpus)
493			return 1;	/* return SUCCESS */
494		DELAY(1000);
495	}
496	return 0;		/* return FAILURE */
497}
498
499/*
500 * Flush the TLB on other CPU's
501 */
502
503/*
504 * These variables are initialized at startup to reflect how each of
505 * the different kinds of invalidations should be performed on the
506 * current machine and environment.
507 */
508static enum invl_op_codes invl_op_tlb;
509static enum invl_op_codes invl_op_pgrng;
510static enum invl_op_codes invl_op_pg;
511
512/*
513 * Scoreboard of IPI completion notifications from target to IPI initiator.
514 *
515 * Each CPU can initiate shootdown IPI independently from other CPUs.
516 * Initiator enters critical section, then fills its local PCPU
517 * shootdown info (pc_smp_tlb_ vars), then clears scoreboard generation
518 * at location (cpu, my_cpuid) for each target cpu.  After that IPI is
519 * sent to all targets which scan for zeroed scoreboard generation
520 * words.  Upon finding such word the shootdown data is read from
521 * corresponding cpu's pcpu, and generation is set.  Meantime initiator
522 * loops waiting for all zeroed generations in scoreboard to update.
523 */
524static uint32_t *invl_scoreboard;
525
526static void
527invl_scoreboard_init(void *arg __unused)
528{
529	u_int i;
530
531	invl_scoreboard = malloc(sizeof(uint32_t) * (mp_maxid + 1) *
532	    (mp_maxid + 1), M_DEVBUF, M_WAITOK);
533	for (i = 0; i < (mp_maxid + 1) * (mp_maxid + 1); i++)
534		invl_scoreboard[i] = 1;
535
536	if (pmap_pcid_enabled) {
537		if (invpcid_works) {
538			if (pti)
539				invl_op_tlb = INVL_OP_TLB_INVPCID_PTI;
540			else
541				invl_op_tlb = INVL_OP_TLB_INVPCID;
542			invl_op_pgrng = INVL_OP_PGRNG_INVPCID;
543			invl_op_pg = INVL_OP_PG_INVPCID;
544		} else {
545			invl_op_tlb = INVL_OP_TLB_PCID;
546			invl_op_pgrng = INVL_OP_PGRNG_PCID;
547			invl_op_pg = INVL_OP_PG_PCID;
548		}
549	} else {
550		invl_op_tlb = INVL_OP_TLB;
551		invl_op_pgrng = INVL_OP_PGRNG;
552		invl_op_pg = INVL_OP_PG;
553	}
554}
555SYSINIT(invl_ops, SI_SUB_SMP - 1, SI_ORDER_ANY, invl_scoreboard_init, NULL);
556
557static uint32_t *
558invl_scoreboard_getcpu(u_int cpu)
559{
560	return (invl_scoreboard + cpu * (mp_maxid + 1));
561}
562
563static uint32_t *
564invl_scoreboard_slot(u_int cpu)
565{
566	return (invl_scoreboard_getcpu(cpu) + PCPU_GET(cpuid));
567}
568
569/*
570 * Used by the pmap to request cache or TLB invalidation on local and
571 * remote processors.  Mask provides the set of remote CPUs that are
572 * to be signalled with the invalidation IPI.  As an optimization, the
573 * curcpu_cb callback is invoked on the calling CPU in a critical
574 * section while waiting for the remote CPUs to complete the operation.
575 *
576 * The callback function is called unconditionally on the caller's
577 * underlying processor, even when this processor is not set in the
578 * mask.  So, the callback function must be prepared to handle such
579 * spurious invocations.
580 *
581 * Interrupts must be enabled when calling the function with smp
582 * started, to avoid deadlock with other IPIs that are protected with
583 * smp_ipi_mtx spinlock at the initiator side.
584 *
585 * Function must be called with the thread pinned, and it unpins on
586 * completion.
587 */
588void
589smp_targeted_tlb_shootdown_native(pmap_t pmap, vm_offset_t addr1, vm_offset_t addr2,
590    smp_invl_cb_t curcpu_cb, enum invl_op_codes op)
591{
592	cpuset_t mask;
593	uint32_t generation, *p_cpudone;
594	int cpu;
595	bool is_all;
596
597	/*
598	 * It is not necessary to signal other CPUs while booting or
599	 * when in the debugger.
600	 */
601	if (__predict_false(kdb_active || KERNEL_PANICKED() || !smp_started))
602		goto local_cb;
603
604	KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
605
606	/*
607	 * Make a stable copy of the set of CPUs on which the pmap is active.
608	 * See if we have to interrupt other CPUs.
609	 */
610	CPU_COPY(pmap_invalidate_cpu_mask(pmap), &mask);
611	is_all = CPU_CMP(&mask, &all_cpus) == 0;
612	CPU_CLR(curcpu, &mask);
613	if (CPU_EMPTY(&mask))
614		goto local_cb;
615
616	/*
617	 * Initiator must have interrupts enabled, which prevents
618	 * non-invalidation IPIs that take smp_ipi_mtx spinlock,
619	 * from deadlocking with us.  On the other hand, preemption
620	 * must be disabled to pin initiator to the instance of the
621	 * pcpu pc_smp_tlb data and scoreboard line.
622	 */
623	KASSERT((read_rflags() & PSL_I) != 0,
624	    ("smp_targeted_tlb_shootdown: interrupts disabled"));
625	critical_enter();
626
627	PCPU_SET(smp_tlb_addr1, addr1);
628	PCPU_SET(smp_tlb_addr2, addr2);
629	PCPU_SET(smp_tlb_pmap, pmap);
630	generation = PCPU_GET(smp_tlb_gen);
631	if (++generation == 0)
632		generation = 1;
633	PCPU_SET(smp_tlb_gen, generation);
634	PCPU_SET(smp_tlb_op, op);
635	/* Fence between filling smp_tlb fields and clearing scoreboard. */
636	atomic_thread_fence_rel();
637
638	CPU_FOREACH_ISSET(cpu, &mask) {
639		KASSERT(*invl_scoreboard_slot(cpu) != 0,
640		    ("IPI scoreboard is zero, initiator %d target %d",
641		    curcpu, cpu));
642		*invl_scoreboard_slot(cpu) = 0;
643	}
644
645	/*
646	 * IPI acts as a fence between writing to the scoreboard above
647	 * (zeroing slot) and reading from it below (wait for
648	 * acknowledgment).
649	 */
650	if (is_all) {
651		ipi_all_but_self(IPI_INVLOP);
652	} else {
653		ipi_selected(mask, IPI_INVLOP);
654	}
655	curcpu_cb(pmap, addr1, addr2);
656	CPU_FOREACH_ISSET(cpu, &mask) {
657		p_cpudone = invl_scoreboard_slot(cpu);
658		while (atomic_load_int(p_cpudone) != generation)
659			ia32_pause();
660	}
661
662	/*
663	 * Unpin before leaving critical section.  If the thread owes
664	 * preemption, this allows scheduler to select thread on any
665	 * CPU from its cpuset.
666	 */
667	sched_unpin();
668	critical_exit();
669
670	return;
671
672local_cb:
673	critical_enter();
674	curcpu_cb(pmap, addr1, addr2);
675	sched_unpin();
676	critical_exit();
677}
678
679void
680smp_masked_invltlb(pmap_t pmap, smp_invl_cb_t curcpu_cb)
681{
682	smp_targeted_tlb_shootdown(pmap, 0, 0, curcpu_cb, invl_op_tlb);
683#ifdef COUNT_XINVLTLB_HITS
684	ipi_global++;
685#endif
686}
687
688void
689smp_masked_invlpg(vm_offset_t addr, pmap_t pmap, smp_invl_cb_t curcpu_cb)
690{
691	smp_targeted_tlb_shootdown(pmap, addr, 0, curcpu_cb, invl_op_pg);
692#ifdef COUNT_XINVLTLB_HITS
693	ipi_page++;
694#endif
695}
696
697void
698smp_masked_invlpg_range(vm_offset_t addr1, vm_offset_t addr2, pmap_t pmap,
699    smp_invl_cb_t curcpu_cb)
700{
701	smp_targeted_tlb_shootdown(pmap, addr1, addr2, curcpu_cb,
702	    invl_op_pgrng);
703#ifdef COUNT_XINVLTLB_HITS
704	ipi_range++;
705	ipi_range_size += (addr2 - addr1) / PAGE_SIZE;
706#endif
707}
708
709void
710smp_cache_flush(smp_invl_cb_t curcpu_cb)
711{
712	smp_targeted_tlb_shootdown(kernel_pmap, 0, 0, curcpu_cb, INVL_OP_CACHE);
713}
714
715/*
716 * Handlers for TLB related IPIs
717 */
718static void
719invltlb_handler(pmap_t smp_tlb_pmap)
720{
721#ifdef COUNT_XINVLTLB_HITS
722	xhits_gbl[PCPU_GET(cpuid)]++;
723#endif /* COUNT_XINVLTLB_HITS */
724#ifdef COUNT_IPIS
725	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
726#endif /* COUNT_IPIS */
727
728	if (smp_tlb_pmap == kernel_pmap)
729		invltlb_glob();
730	else
731		invltlb();
732}
733
734static void
735invltlb_invpcid_handler(pmap_t smp_tlb_pmap)
736{
737	struct invpcid_descr d;
738
739#ifdef COUNT_XINVLTLB_HITS
740	xhits_gbl[PCPU_GET(cpuid)]++;
741#endif /* COUNT_XINVLTLB_HITS */
742#ifdef COUNT_IPIS
743	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
744#endif /* COUNT_IPIS */
745
746	d.pcid = pmap_get_pcid(smp_tlb_pmap);
747	d.pad = 0;
748	d.addr = 0;
749	invpcid(&d, smp_tlb_pmap == kernel_pmap ? INVPCID_CTXGLOB :
750	    INVPCID_CTX);
751}
752
753static void
754invltlb_invpcid_pti_handler(pmap_t smp_tlb_pmap)
755{
756	struct invpcid_descr d;
757
758#ifdef COUNT_XINVLTLB_HITS
759	xhits_gbl[PCPU_GET(cpuid)]++;
760#endif /* COUNT_XINVLTLB_HITS */
761#ifdef COUNT_IPIS
762	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
763#endif /* COUNT_IPIS */
764
765	d.pcid = pmap_get_pcid(smp_tlb_pmap);
766	d.pad = 0;
767	d.addr = 0;
768	if (smp_tlb_pmap == kernel_pmap) {
769		/*
770		 * This invalidation actually needs to clear kernel
771		 * mappings from the TLB in the current pmap, but
772		 * since we were asked for the flush in the kernel
773		 * pmap, achieve it by performing global flush.
774		 */
775		invpcid(&d, INVPCID_CTXGLOB);
776	} else {
777		invpcid(&d, INVPCID_CTX);
778		if (smp_tlb_pmap == PCPU_GET(curpmap) &&
779		    smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3)
780			PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
781	}
782}
783
784static void
785invltlb_pcid_handler(pmap_t smp_tlb_pmap)
786{
787#ifdef COUNT_XINVLTLB_HITS
788	xhits_gbl[PCPU_GET(cpuid)]++;
789#endif /* COUNT_XINVLTLB_HITS */
790#ifdef COUNT_IPIS
791	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
792#endif /* COUNT_IPIS */
793
794	if (smp_tlb_pmap == kernel_pmap) {
795		invltlb_glob();
796	} else {
797		/*
798		 * The current pmap might not be equal to
799		 * smp_tlb_pmap.  The clearing of the pm_gen in
800		 * pmap_invalidate_all() takes care of TLB
801		 * invalidation when switching to the pmap on this
802		 * CPU.
803		 */
804		if (smp_tlb_pmap == PCPU_GET(curpmap)) {
805			load_cr3(smp_tlb_pmap->pm_cr3 |
806			    pmap_get_pcid(smp_tlb_pmap));
807			if (smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3)
808				PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
809		}
810	}
811}
812
813static void
814invlpg_handler(vm_offset_t smp_tlb_addr1)
815{
816#ifdef COUNT_XINVLTLB_HITS
817	xhits_pg[PCPU_GET(cpuid)]++;
818#endif /* COUNT_XINVLTLB_HITS */
819#ifdef COUNT_IPIS
820	(*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
821#endif /* COUNT_IPIS */
822
823	invlpg(smp_tlb_addr1);
824}
825
826static void
827invlpg_invpcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1)
828{
829	struct invpcid_descr d;
830
831#ifdef COUNT_XINVLTLB_HITS
832	xhits_pg[PCPU_GET(cpuid)]++;
833#endif /* COUNT_XINVLTLB_HITS */
834#ifdef COUNT_IPIS
835	(*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
836#endif /* COUNT_IPIS */
837
838	pmap_invlpg(smp_tlb_pmap, smp_tlb_addr1);
839	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
840	    smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3 &&
841	    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
842		d.pcid = pmap_get_pcid(smp_tlb_pmap) | PMAP_PCID_USER_PT;
843		d.pad = 0;
844		d.addr = smp_tlb_addr1;
845		invpcid(&d, INVPCID_ADDR);
846	}
847}
848
849static void
850invlpg_pcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1)
851{
852	uint64_t kcr3, ucr3;
853	uint32_t pcid;
854
855#ifdef COUNT_XINVLTLB_HITS
856	xhits_pg[PCPU_GET(cpuid)]++;
857#endif /* COUNT_XINVLTLB_HITS */
858#ifdef COUNT_IPIS
859	(*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
860#endif /* COUNT_IPIS */
861
862	invlpg(smp_tlb_addr1);
863	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
864	    (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3 &&
865	    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
866		pcid = pmap_get_pcid(smp_tlb_pmap);
867		kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
868		ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
869		pmap_pti_pcid_invlpg(ucr3, kcr3, smp_tlb_addr1);
870	}
871}
872
873static void
874invlrng_handler(vm_offset_t smp_tlb_addr1, vm_offset_t smp_tlb_addr2)
875{
876	vm_offset_t addr;
877
878#ifdef COUNT_XINVLTLB_HITS
879	xhits_rng[PCPU_GET(cpuid)]++;
880#endif /* COUNT_XINVLTLB_HITS */
881#ifdef COUNT_IPIS
882	(*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
883#endif /* COUNT_IPIS */
884
885	addr = smp_tlb_addr1;
886	do {
887		invlpg(addr);
888		addr += PAGE_SIZE;
889	} while (addr < smp_tlb_addr2);
890}
891
892static void
893invlrng_invpcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1,
894    vm_offset_t smp_tlb_addr2)
895{
896	struct invpcid_descr d;
897	vm_offset_t addr;
898
899#ifdef COUNT_XINVLTLB_HITS
900	xhits_rng[PCPU_GET(cpuid)]++;
901#endif /* COUNT_XINVLTLB_HITS */
902#ifdef COUNT_IPIS
903	(*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
904#endif /* COUNT_IPIS */
905
906	addr = smp_tlb_addr1;
907	if (smp_tlb_pmap == kernel_pmap && PCPU_GET(pcid_invlpg_workaround)) {
908		struct invpcid_descr d = { 0 };
909
910		invpcid(&d, INVPCID_CTXGLOB);
911	} else {
912		do {
913			invlpg(addr);
914			addr += PAGE_SIZE;
915		} while (addr < smp_tlb_addr2);
916	}
917	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
918	    smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3 &&
919	    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
920		d.pcid = pmap_get_pcid(smp_tlb_pmap) | PMAP_PCID_USER_PT;
921		d.pad = 0;
922		d.addr = smp_tlb_addr1;
923		do {
924			invpcid(&d, INVPCID_ADDR);
925			d.addr += PAGE_SIZE;
926		} while (d.addr < smp_tlb_addr2);
927	}
928}
929
930static void
931invlrng_pcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1,
932    vm_offset_t smp_tlb_addr2)
933{
934	vm_offset_t addr;
935	uint64_t kcr3, ucr3;
936	uint32_t pcid;
937
938#ifdef COUNT_XINVLTLB_HITS
939	xhits_rng[PCPU_GET(cpuid)]++;
940#endif /* COUNT_XINVLTLB_HITS */
941#ifdef COUNT_IPIS
942	(*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
943#endif /* COUNT_IPIS */
944
945	addr = smp_tlb_addr1;
946	do {
947		invlpg(addr);
948		addr += PAGE_SIZE;
949	} while (addr < smp_tlb_addr2);
950	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
951	    (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3 &&
952	    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
953		pcid = pmap_get_pcid(smp_tlb_pmap);
954		kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
955		ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
956		pmap_pti_pcid_invlrng(ucr3, kcr3, smp_tlb_addr1, smp_tlb_addr2);
957	}
958}
959
960static void
961invlcache_handler(void)
962{
963#ifdef COUNT_IPIS
964	(*ipi_invlcache_counts[PCPU_GET(cpuid)])++;
965#endif /* COUNT_IPIS */
966	wbinvd();
967}
968
969static void
970invlop_handler_one_req(enum invl_op_codes smp_tlb_op, pmap_t smp_tlb_pmap,
971    vm_offset_t smp_tlb_addr1, vm_offset_t smp_tlb_addr2)
972{
973	switch (smp_tlb_op) {
974	case INVL_OP_TLB:
975		invltlb_handler(smp_tlb_pmap);
976		break;
977	case INVL_OP_TLB_INVPCID:
978		invltlb_invpcid_handler(smp_tlb_pmap);
979		break;
980	case INVL_OP_TLB_INVPCID_PTI:
981		invltlb_invpcid_pti_handler(smp_tlb_pmap);
982		break;
983	case INVL_OP_TLB_PCID:
984		invltlb_pcid_handler(smp_tlb_pmap);
985		break;
986	case INVL_OP_PGRNG:
987		invlrng_handler(smp_tlb_addr1, smp_tlb_addr2);
988		break;
989	case INVL_OP_PGRNG_INVPCID:
990		invlrng_invpcid_handler(smp_tlb_pmap, smp_tlb_addr1,
991		    smp_tlb_addr2);
992		break;
993	case INVL_OP_PGRNG_PCID:
994		invlrng_pcid_handler(smp_tlb_pmap, smp_tlb_addr1,
995		    smp_tlb_addr2);
996		break;
997	case INVL_OP_PG:
998		invlpg_handler(smp_tlb_addr1);
999		break;
1000	case INVL_OP_PG_INVPCID:
1001		invlpg_invpcid_handler(smp_tlb_pmap, smp_tlb_addr1);
1002		break;
1003	case INVL_OP_PG_PCID:
1004		invlpg_pcid_handler(smp_tlb_pmap, smp_tlb_addr1);
1005		break;
1006	case INVL_OP_CACHE:
1007		invlcache_handler();
1008		break;
1009	default:
1010		__assert_unreachable();
1011		break;
1012	}
1013}
1014
1015void
1016invlop_handler(void)
1017{
1018	struct pcpu *initiator_pc;
1019	pmap_t smp_tlb_pmap;
1020	vm_offset_t smp_tlb_addr1, smp_tlb_addr2;
1021	u_int initiator_cpu_id;
1022	enum invl_op_codes smp_tlb_op;
1023	uint32_t *scoreboard, smp_tlb_gen;
1024
1025	scoreboard = invl_scoreboard_getcpu(PCPU_GET(cpuid));
1026	for (;;) {
1027		for (initiator_cpu_id = 0; initiator_cpu_id <= mp_maxid;
1028		    initiator_cpu_id++) {
1029			if (atomic_load_int(&scoreboard[initiator_cpu_id]) == 0)
1030				break;
1031		}
1032		if (initiator_cpu_id > mp_maxid)
1033			break;
1034		initiator_pc = cpuid_to_pcpu[initiator_cpu_id];
1035
1036		/*
1037		 * This acquire fence and its corresponding release
1038		 * fence in smp_targeted_tlb_shootdown() is between
1039		 * reading zero scoreboard slot and accessing PCPU of
1040		 * initiator for pc_smp_tlb values.
1041		 */
1042		atomic_thread_fence_acq();
1043		smp_tlb_pmap = initiator_pc->pc_smp_tlb_pmap;
1044		smp_tlb_addr1 = initiator_pc->pc_smp_tlb_addr1;
1045		smp_tlb_addr2 = initiator_pc->pc_smp_tlb_addr2;
1046		smp_tlb_op = initiator_pc->pc_smp_tlb_op;
1047		smp_tlb_gen = initiator_pc->pc_smp_tlb_gen;
1048
1049		/*
1050		 * Ensure that we do not make our scoreboard
1051		 * notification visible to the initiator until the
1052		 * pc_smp_tlb values are read.  The corresponding
1053		 * fence is implicitly provided by the barrier in the
1054		 * IPI send operation before the APIC ICR register
1055		 * write.
1056		 *
1057		 * As an optimization, the request is acknowledged
1058		 * before the actual invalidation is performed.  It is
1059		 * safe because target CPU cannot return to userspace
1060		 * before handler finishes. Only NMI can preempt the
1061		 * handler, but NMI would see the kernel handler frame
1062		 * and not touch not-invalidated user page table.
1063		 */
1064		atomic_thread_fence_acq();
1065		atomic_store_int(&scoreboard[initiator_cpu_id], smp_tlb_gen);
1066
1067		invlop_handler_one_req(smp_tlb_op, smp_tlb_pmap, smp_tlb_addr1,
1068		    smp_tlb_addr2);
1069	}
1070}
1071