Deleted Added
full compact
intel_drv.c (257251) intel_drv.c (259512)
1/*-
2 * Copyright (c) 2013 The FreeBSD Foundation
3 * All rights reserved.
4 *
5 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
6 * under sponsorship from the FreeBSD Foundation.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2013 The FreeBSD Foundation
3 * All rights reserved.
4 *
5 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
6 * under sponsorship from the FreeBSD Foundation.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/x86/iommu/intel_drv.c 257251 2013-10-28 13:33:29Z kib $");
31__FBSDID("$FreeBSD: stable/10/sys/x86/iommu/intel_drv.c 259512 2013-12-17 13:49:35Z kib $");
32
33#include "opt_acpi.h"
34#if defined(__amd64__) /* || defined(__ia64__) */
35#define DEV_APIC
36#else
37#include "opt_apic.h"
38#endif
39#include "opt_ddb.h"
40
41#include <sys/param.h>
42#include <sys/bus.h>
43#include <sys/kernel.h>
44#include <sys/lock.h>
45#include <sys/malloc.h>
46#include <sys/memdesc.h>
47#include <sys/module.h>
48#include <sys/rman.h>
49#include <sys/rwlock.h>
50#include <sys/smp.h>
51#include <sys/taskqueue.h>
52#include <sys/tree.h>
53#include <machine/bus.h>
54#include <contrib/dev/acpica/include/acpi.h>
55#include <contrib/dev/acpica/include/accommon.h>
56#include <dev/acpica/acpivar.h>
57#include <vm/vm.h>
58#include <vm/vm_extern.h>
59#include <vm/vm_kern.h>
60#include <vm/vm_object.h>
61#include <vm/vm_page.h>
62#include <vm/vm_pager.h>
63#include <vm/vm_map.h>
64#include <x86/include/busdma_impl.h>
65#include <x86/iommu/intel_reg.h>
66#include <x86/iommu/busdma_dmar.h>
67#include <x86/iommu/intel_dmar.h>
68#include <dev/pci/pcivar.h>
69
70#ifdef DEV_APIC
71#include "pcib_if.h"
72#endif
73
32
33#include "opt_acpi.h"
34#if defined(__amd64__) /* || defined(__ia64__) */
35#define DEV_APIC
36#else
37#include "opt_apic.h"
38#endif
39#include "opt_ddb.h"
40
41#include <sys/param.h>
42#include <sys/bus.h>
43#include <sys/kernel.h>
44#include <sys/lock.h>
45#include <sys/malloc.h>
46#include <sys/memdesc.h>
47#include <sys/module.h>
48#include <sys/rman.h>
49#include <sys/rwlock.h>
50#include <sys/smp.h>
51#include <sys/taskqueue.h>
52#include <sys/tree.h>
53#include <machine/bus.h>
54#include <contrib/dev/acpica/include/acpi.h>
55#include <contrib/dev/acpica/include/accommon.h>
56#include <dev/acpica/acpivar.h>
57#include <vm/vm.h>
58#include <vm/vm_extern.h>
59#include <vm/vm_kern.h>
60#include <vm/vm_object.h>
61#include <vm/vm_page.h>
62#include <vm/vm_pager.h>
63#include <vm/vm_map.h>
64#include <x86/include/busdma_impl.h>
65#include <x86/iommu/intel_reg.h>
66#include <x86/iommu/busdma_dmar.h>
67#include <x86/iommu/intel_dmar.h>
68#include <dev/pci/pcivar.h>
69
70#ifdef DEV_APIC
71#include "pcib_if.h"
72#endif
73
74#define DMAR_REG_RID 1
75#define DMAR_IRQ_RID 0
74#define DMAR_FAULT_IRQ_RID 0
75#define DMAR_QI_IRQ_RID 1
76#define DMAR_REG_RID 2
76
77static devclass_t dmar_devclass;
78static device_t *dmar_devs;
79static int dmar_devcnt;
80
81typedef int (*dmar_iter_t)(ACPI_DMAR_HEADER *, void *);
82
83static void
84dmar_iterate_tbl(dmar_iter_t iter, void *arg)
85{
86 ACPI_TABLE_DMAR *dmartbl;
87 ACPI_DMAR_HEADER *dmarh;
88 char *ptr, *ptrend;
89 ACPI_STATUS status;
90
91 status = AcpiGetTable(ACPI_SIG_DMAR, 1, (ACPI_TABLE_HEADER **)&dmartbl);
92 if (ACPI_FAILURE(status))
93 return;
94 ptr = (char *)dmartbl + sizeof(*dmartbl);
95 ptrend = (char *)dmartbl + dmartbl->Header.Length;
96 for (;;) {
97 if (ptr >= ptrend)
98 break;
99 dmarh = (ACPI_DMAR_HEADER *)ptr;
100 if (dmarh->Length <= 0) {
101 printf("dmar_identify: corrupted DMAR table, l %d\n",
102 dmarh->Length);
103 break;
104 }
105 ptr += dmarh->Length;
106 if (!iter(dmarh, arg))
107 break;
108 }
109}
110
111struct find_iter_args {
112 int i;
113 ACPI_DMAR_HARDWARE_UNIT *res;
114};
115
116static int
117dmar_find_iter(ACPI_DMAR_HEADER *dmarh, void *arg)
118{
119 struct find_iter_args *fia;
120
121 if (dmarh->Type != ACPI_DMAR_TYPE_HARDWARE_UNIT)
122 return (1);
123
124 fia = arg;
125 if (fia->i == 0) {
126 fia->res = (ACPI_DMAR_HARDWARE_UNIT *)dmarh;
127 return (0);
128 }
129 fia->i--;
130 return (1);
131}
132
133static ACPI_DMAR_HARDWARE_UNIT *
134dmar_find_by_index(int idx)
135{
136 struct find_iter_args fia;
137
138 fia.i = idx;
139 fia.res = NULL;
140 dmar_iterate_tbl(dmar_find_iter, &fia);
141 return (fia.res);
142}
143
144static int
145dmar_count_iter(ACPI_DMAR_HEADER *dmarh, void *arg)
146{
147
148 if (dmarh->Type == ACPI_DMAR_TYPE_HARDWARE_UNIT)
149 dmar_devcnt++;
150 return (1);
151}
152
153static int dmar_enable = 0;
154static void
155dmar_identify(driver_t *driver, device_t parent)
156{
157 ACPI_TABLE_DMAR *dmartbl;
158 ACPI_DMAR_HARDWARE_UNIT *dmarh;
159 ACPI_STATUS status;
160 int i, error;
161
162 if (acpi_disabled("dmar"))
163 return;
164 TUNABLE_INT_FETCH("hw.dmar.enable", &dmar_enable);
165 if (!dmar_enable)
166 return;
167#ifdef INVARIANTS
168 TUNABLE_INT_FETCH("hw.dmar.check_free", &dmar_check_free);
169#endif
170 TUNABLE_INT_FETCH("hw.dmar.match_verbose", &dmar_match_verbose);
171 status = AcpiGetTable(ACPI_SIG_DMAR, 1, (ACPI_TABLE_HEADER **)&dmartbl);
172 if (ACPI_FAILURE(status))
173 return;
174 haw = dmartbl->Width + 1;
175 if ((1ULL << (haw + 1)) > BUS_SPACE_MAXADDR)
176 dmar_high = BUS_SPACE_MAXADDR;
177 else
178 dmar_high = 1ULL << (haw + 1);
179 if (bootverbose) {
180 printf("DMAR HAW=%d flags=<%b>\n", dmartbl->Width,
181 (unsigned)dmartbl->Flags,
182 "\020\001INTR_REMAP\002X2APIC_OPT_OUT");
183 }
184
185 dmar_iterate_tbl(dmar_count_iter, NULL);
186 if (dmar_devcnt == 0)
187 return;
188 dmar_devs = malloc(sizeof(device_t) * dmar_devcnt, M_DEVBUF,
189 M_WAITOK | M_ZERO);
190 for (i = 0; i < dmar_devcnt; i++) {
191 dmarh = dmar_find_by_index(i);
192 if (dmarh == NULL) {
193 printf("dmar_identify: cannot find HWUNIT %d\n", i);
194 continue;
195 }
196 dmar_devs[i] = BUS_ADD_CHILD(parent, 1, "dmar", i);
197 if (dmar_devs[i] == NULL) {
198 printf("dmar_identify: cannot create instance %d\n", i);
199 continue;
200 }
201 error = bus_set_resource(dmar_devs[i], SYS_RES_MEMORY,
202 DMAR_REG_RID, dmarh->Address, PAGE_SIZE);
203 if (error != 0) {
204 printf(
205 "dmar%d: unable to alloc register window at 0x%08jx: error %d\n",
206 i, (uintmax_t)dmarh->Address, error);
207 device_delete_child(parent, dmar_devs[i]);
208 dmar_devs[i] = NULL;
209 }
210 }
211}
212
213static int
214dmar_probe(device_t dev)
215{
216
217 if (acpi_get_handle(dev) != NULL)
218 return (ENXIO);
219 device_set_desc(dev, "DMA remap");
77
78static devclass_t dmar_devclass;
79static device_t *dmar_devs;
80static int dmar_devcnt;
81
82typedef int (*dmar_iter_t)(ACPI_DMAR_HEADER *, void *);
83
84static void
85dmar_iterate_tbl(dmar_iter_t iter, void *arg)
86{
87 ACPI_TABLE_DMAR *dmartbl;
88 ACPI_DMAR_HEADER *dmarh;
89 char *ptr, *ptrend;
90 ACPI_STATUS status;
91
92 status = AcpiGetTable(ACPI_SIG_DMAR, 1, (ACPI_TABLE_HEADER **)&dmartbl);
93 if (ACPI_FAILURE(status))
94 return;
95 ptr = (char *)dmartbl + sizeof(*dmartbl);
96 ptrend = (char *)dmartbl + dmartbl->Header.Length;
97 for (;;) {
98 if (ptr >= ptrend)
99 break;
100 dmarh = (ACPI_DMAR_HEADER *)ptr;
101 if (dmarh->Length <= 0) {
102 printf("dmar_identify: corrupted DMAR table, l %d\n",
103 dmarh->Length);
104 break;
105 }
106 ptr += dmarh->Length;
107 if (!iter(dmarh, arg))
108 break;
109 }
110}
111
112struct find_iter_args {
113 int i;
114 ACPI_DMAR_HARDWARE_UNIT *res;
115};
116
117static int
118dmar_find_iter(ACPI_DMAR_HEADER *dmarh, void *arg)
119{
120 struct find_iter_args *fia;
121
122 if (dmarh->Type != ACPI_DMAR_TYPE_HARDWARE_UNIT)
123 return (1);
124
125 fia = arg;
126 if (fia->i == 0) {
127 fia->res = (ACPI_DMAR_HARDWARE_UNIT *)dmarh;
128 return (0);
129 }
130 fia->i--;
131 return (1);
132}
133
134static ACPI_DMAR_HARDWARE_UNIT *
135dmar_find_by_index(int idx)
136{
137 struct find_iter_args fia;
138
139 fia.i = idx;
140 fia.res = NULL;
141 dmar_iterate_tbl(dmar_find_iter, &fia);
142 return (fia.res);
143}
144
145static int
146dmar_count_iter(ACPI_DMAR_HEADER *dmarh, void *arg)
147{
148
149 if (dmarh->Type == ACPI_DMAR_TYPE_HARDWARE_UNIT)
150 dmar_devcnt++;
151 return (1);
152}
153
154static int dmar_enable = 0;
155static void
156dmar_identify(driver_t *driver, device_t parent)
157{
158 ACPI_TABLE_DMAR *dmartbl;
159 ACPI_DMAR_HARDWARE_UNIT *dmarh;
160 ACPI_STATUS status;
161 int i, error;
162
163 if (acpi_disabled("dmar"))
164 return;
165 TUNABLE_INT_FETCH("hw.dmar.enable", &dmar_enable);
166 if (!dmar_enable)
167 return;
168#ifdef INVARIANTS
169 TUNABLE_INT_FETCH("hw.dmar.check_free", &dmar_check_free);
170#endif
171 TUNABLE_INT_FETCH("hw.dmar.match_verbose", &dmar_match_verbose);
172 status = AcpiGetTable(ACPI_SIG_DMAR, 1, (ACPI_TABLE_HEADER **)&dmartbl);
173 if (ACPI_FAILURE(status))
174 return;
175 haw = dmartbl->Width + 1;
176 if ((1ULL << (haw + 1)) > BUS_SPACE_MAXADDR)
177 dmar_high = BUS_SPACE_MAXADDR;
178 else
179 dmar_high = 1ULL << (haw + 1);
180 if (bootverbose) {
181 printf("DMAR HAW=%d flags=<%b>\n", dmartbl->Width,
182 (unsigned)dmartbl->Flags,
183 "\020\001INTR_REMAP\002X2APIC_OPT_OUT");
184 }
185
186 dmar_iterate_tbl(dmar_count_iter, NULL);
187 if (dmar_devcnt == 0)
188 return;
189 dmar_devs = malloc(sizeof(device_t) * dmar_devcnt, M_DEVBUF,
190 M_WAITOK | M_ZERO);
191 for (i = 0; i < dmar_devcnt; i++) {
192 dmarh = dmar_find_by_index(i);
193 if (dmarh == NULL) {
194 printf("dmar_identify: cannot find HWUNIT %d\n", i);
195 continue;
196 }
197 dmar_devs[i] = BUS_ADD_CHILD(parent, 1, "dmar", i);
198 if (dmar_devs[i] == NULL) {
199 printf("dmar_identify: cannot create instance %d\n", i);
200 continue;
201 }
202 error = bus_set_resource(dmar_devs[i], SYS_RES_MEMORY,
203 DMAR_REG_RID, dmarh->Address, PAGE_SIZE);
204 if (error != 0) {
205 printf(
206 "dmar%d: unable to alloc register window at 0x%08jx: error %d\n",
207 i, (uintmax_t)dmarh->Address, error);
208 device_delete_child(parent, dmar_devs[i]);
209 dmar_devs[i] = NULL;
210 }
211 }
212}
213
214static int
215dmar_probe(device_t dev)
216{
217
218 if (acpi_get_handle(dev) != NULL)
219 return (ENXIO);
220 device_set_desc(dev, "DMA remap");
220 return (0);
221 return (BUS_PROBE_NOWILDCARD);
221}
222
223static void
222}
223
224static void
225dmar_release_intr(device_t dev, struct dmar_unit *unit, int idx)
226{
227 struct dmar_msi_data *dmd;
228
229 dmd = &unit->intrs[idx];
230 if (dmd->irq == -1)
231 return;
232 bus_teardown_intr(dev, dmd->irq_res, dmd->intr_handle);
233 bus_release_resource(dev, SYS_RES_IRQ, dmd->irq_rid, dmd->irq_res);
234 bus_delete_resource(dev, SYS_RES_IRQ, dmd->irq_rid);
235 PCIB_RELEASE_MSIX(device_get_parent(device_get_parent(dev)),
236 dev, dmd->irq);
237 dmd->irq = -1;
238}
239
240static void
224dmar_release_resources(device_t dev, struct dmar_unit *unit)
225{
241dmar_release_resources(device_t dev, struct dmar_unit *unit)
242{
243 int i;
226
227 dmar_fini_busdma(unit);
244
245 dmar_fini_busdma(unit);
246 dmar_fini_qi(unit);
228 dmar_fini_fault_log(unit);
247 dmar_fini_fault_log(unit);
229 if (unit->irq != -1) {
230 bus_teardown_intr(dev, unit->irq_res, unit->intr_handle);
231 bus_release_resource(dev, SYS_RES_IRQ, unit->irq_rid,
232 unit->irq_res);
233 bus_delete_resource(dev, SYS_RES_IRQ, unit->irq_rid);
234 PCIB_RELEASE_MSIX(device_get_parent(device_get_parent(dev)),
235 dev, unit->irq);
236 unit->irq = -1;
237 }
248 for (i = 0; i < DMAR_INTR_TOTAL; i++)
249 dmar_release_intr(dev, unit, i);
238 if (unit->regs != NULL) {
239 bus_deactivate_resource(dev, SYS_RES_MEMORY, unit->reg_rid,
240 unit->regs);
241 bus_release_resource(dev, SYS_RES_MEMORY, unit->reg_rid,
242 unit->regs);
243 unit->regs = NULL;
244 }
245 if (unit->domids != NULL) {
246 delete_unrhdr(unit->domids);
247 unit->domids = NULL;
248 }
249 if (unit->ctx_obj != NULL) {
250 vm_object_deallocate(unit->ctx_obj);
251 unit->ctx_obj = NULL;
252 }
253}
254
255static int
250 if (unit->regs != NULL) {
251 bus_deactivate_resource(dev, SYS_RES_MEMORY, unit->reg_rid,
252 unit->regs);
253 bus_release_resource(dev, SYS_RES_MEMORY, unit->reg_rid,
254 unit->regs);
255 unit->regs = NULL;
256 }
257 if (unit->domids != NULL) {
258 delete_unrhdr(unit->domids);
259 unit->domids = NULL;
260 }
261 if (unit->ctx_obj != NULL) {
262 vm_object_deallocate(unit->ctx_obj);
263 unit->ctx_obj = NULL;
264 }
265}
266
267static int
256dmar_alloc_irq(device_t dev, struct dmar_unit *unit)
268dmar_alloc_irq(device_t dev, struct dmar_unit *unit, int idx)
257{
258 device_t pcib;
269{
270 device_t pcib;
271 struct dmar_msi_data *dmd;
259 uint64_t msi_addr;
260 uint32_t msi_data;
261 int error;
262
272 uint64_t msi_addr;
273 uint32_t msi_data;
274 int error;
275
276 dmd = &unit->intrs[idx];
263 pcib = device_get_parent(device_get_parent(dev)); /* Really not pcib */
277 pcib = device_get_parent(device_get_parent(dev)); /* Really not pcib */
264 error = PCIB_ALLOC_MSIX(pcib, dev, &unit->irq);
278 error = PCIB_ALLOC_MSIX(pcib, dev, &dmd->irq);
265 if (error != 0) {
279 if (error != 0) {
266 device_printf(dev, "cannot allocate fault interrupt, %d\n",
267 error);
280 device_printf(dev, "cannot allocate %s interrupt, %d\n",
281 dmd->name, error);
268 goto err1;
269 }
282 goto err1;
283 }
270 unit->irq_rid = DMAR_IRQ_RID;
271 error = bus_set_resource(dev, SYS_RES_IRQ, unit->irq_rid, unit->irq,
272 1);
284 error = bus_set_resource(dev, SYS_RES_IRQ, dmd->irq_rid,
285 dmd->irq, 1);
273 if (error != 0) {
286 if (error != 0) {
274 device_printf(dev, "cannot set interrupt resource, %d\n",
275 error);
287 device_printf(dev, "cannot set %s interrupt resource, %d\n",
288 dmd->name, error);
276 goto err2;
277 }
289 goto err2;
290 }
278 unit->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
279 &unit->irq_rid, RF_ACTIVE);
280 if (unit->irq_res == NULL) {
281 device_printf(dev, "cannot map fault interrupt\n");
291 dmd->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
292 &dmd->irq_rid, RF_ACTIVE);
293 if (dmd->irq_res == NULL) {
294 device_printf(dev,
295 "cannot allocate resource for %s interrupt\n", dmd->name);
282 error = ENXIO;
283 goto err3;
284 }
296 error = ENXIO;
297 goto err3;
298 }
285 error = bus_setup_intr(dev, unit->irq_res, INTR_TYPE_MISC,
286 dmar_intr, NULL, unit, &unit->intr_handle);
299 error = bus_setup_intr(dev, dmd->irq_res, INTR_TYPE_MISC,
300 dmd->handler, NULL, unit, &dmd->intr_handle);
287 if (error != 0) {
301 if (error != 0) {
288 device_printf(dev, "cannot setup fault interrupt, %d\n", error);
302 device_printf(dev, "cannot setup %s interrupt, %d\n",
303 dmd->name, error);
289 goto err4;
290 }
304 goto err4;
305 }
291 bus_describe_intr(dev, unit->irq_res, unit->intr_handle, "fault");
292 error = PCIB_MAP_MSI(pcib, dev, unit->irq, &msi_addr, &msi_data);
306 bus_describe_intr(dev, dmd->irq_res, dmd->intr_handle, dmd->name);
307 error = PCIB_MAP_MSI(pcib, dev, dmd->irq, &msi_addr, &msi_data);
293 if (error != 0) {
308 if (error != 0) {
294 device_printf(dev, "cannot map interrupt, %d\n", error);
309 device_printf(dev, "cannot map %s interrupt, %d\n",
310 dmd->name, error);
295 goto err5;
296 }
311 goto err5;
312 }
297 dmar_write4(unit, DMAR_FEDATA_REG, msi_data);
298 dmar_write4(unit, DMAR_FEADDR_REG, msi_addr);
313 dmar_write4(unit, dmd->msi_data_reg, msi_data);
314 dmar_write4(unit, dmd->msi_addr_reg, msi_addr);
299 /* Only for xAPIC mode */
315 /* Only for xAPIC mode */
300 dmar_write4(unit, DMAR_FEUADDR_REG, msi_addr >> 32);
316 dmar_write4(unit, dmd->msi_uaddr_reg, msi_addr >> 32);
301 return (0);
302
303err5:
317 return (0);
318
319err5:
304 bus_teardown_intr(dev, unit->irq_res, unit->intr_handle);
320 bus_teardown_intr(dev, dmd->irq_res, dmd->intr_handle);
305err4:
321err4:
306 bus_release_resource(dev, SYS_RES_IRQ, unit->irq_rid, unit->irq_res);
322 bus_release_resource(dev, SYS_RES_IRQ, dmd->irq_rid, dmd->irq_res);
307err3:
323err3:
308 bus_delete_resource(dev, SYS_RES_IRQ, unit->irq_rid);
324 bus_delete_resource(dev, SYS_RES_IRQ, dmd->irq_rid);
309err2:
325err2:
310 PCIB_RELEASE_MSIX(pcib, dev, unit->irq);
311 unit->irq = -1;
326 PCIB_RELEASE_MSIX(pcib, dev, dmd->irq);
327 dmd->irq = -1;
312err1:
313 return (error);
314}
315
316#ifdef DEV_APIC
317static int
318dmar_remap_intr(device_t dev, device_t child, u_int irq)
319{
320 struct dmar_unit *unit;
328err1:
329 return (error);
330}
331
332#ifdef DEV_APIC
333static int
334dmar_remap_intr(device_t dev, device_t child, u_int irq)
335{
336 struct dmar_unit *unit;
337 struct dmar_msi_data *dmd;
321 uint64_t msi_addr;
322 uint32_t msi_data;
338 uint64_t msi_addr;
339 uint32_t msi_data;
323 int error;
340 int i, error;
324
325 unit = device_get_softc(dev);
341
342 unit = device_get_softc(dev);
326 if (irq != unit->irq)
327 return (ENOENT);
328 error = PCIB_MAP_MSI(device_get_parent(device_get_parent(dev)), dev,
329 irq, &msi_addr, &msi_data);
330 if (error != 0)
331 return (error);
332 dmar_disable_intr(unit);
333 dmar_write4(unit, DMAR_FEDATA_REG, msi_data);
334 dmar_write4(unit, DMAR_FEADDR_REG, msi_addr);
335 dmar_write4(unit, DMAR_FEUADDR_REG, msi_addr >> 32);
336 dmar_enable_intr(unit);
337 return (0);
343 for (i = 0; i < DMAR_INTR_TOTAL; i++) {
344 dmd = &unit->intrs[i];
345 if (irq == dmd->irq) {
346 error = PCIB_MAP_MSI(device_get_parent(
347 device_get_parent(dev)),
348 dev, irq, &msi_addr, &msi_data);
349 if (error != 0)
350 return (error);
351 DMAR_LOCK(unit);
352 (dmd->disable_intr)(unit);
353 dmar_write4(unit, dmd->msi_data_reg, msi_data);
354 dmar_write4(unit, dmd->msi_addr_reg, msi_addr);
355 dmar_write4(unit, dmd->msi_uaddr_reg, msi_addr >> 32);
356 (dmd->enable_intr)(unit);
357 DMAR_UNLOCK(unit);
358 return (0);
359 }
360 }
361 return (ENOENT);
338}
339#endif
340
341static void
342dmar_print_caps(device_t dev, struct dmar_unit *unit,
343 ACPI_DMAR_HARDWARE_UNIT *dmaru)
344{
345 uint32_t caphi, ecaphi;
346
347 device_printf(dev, "regs@0x%08jx, ver=%d.%d, seg=%d, flags=<%b>\n",
348 (uintmax_t)dmaru->Address, DMAR_MAJOR_VER(unit->hw_ver),
349 DMAR_MINOR_VER(unit->hw_ver), dmaru->Segment,
350 dmaru->Flags, "\020\001INCLUDE_ALL_PCI");
351 caphi = unit->hw_cap >> 32;
352 device_printf(dev, "cap=%b,", (u_int)unit->hw_cap,
353 "\020\004AFL\005WBF\006PLMR\007PHMR\010CM\027ZLR\030ISOCH");
354 printf("%b, ", caphi, "\020\010PSI\027DWD\030DRD");
355 printf("ndoms=%d, sagaw=%d, mgaw=%d, fro=%d, nfr=%d, superp=%d",
356 DMAR_CAP_ND(unit->hw_cap), DMAR_CAP_SAGAW(unit->hw_cap),
357 DMAR_CAP_MGAW(unit->hw_cap), DMAR_CAP_FRO(unit->hw_cap),
358 DMAR_CAP_NFR(unit->hw_cap), DMAR_CAP_SPS(unit->hw_cap));
359 if ((unit->hw_cap & DMAR_CAP_PSI) != 0)
360 printf(", mamv=%d", DMAR_CAP_MAMV(unit->hw_cap));
361 printf("\n");
362 ecaphi = unit->hw_ecap >> 32;
363 device_printf(dev, "ecap=%b,", (u_int)unit->hw_ecap,
364 "\020\001C\002QI\003DI\004IR\005EIM\007PT\010SC");
365 printf("%b, ", ecaphi, "\020");
366 printf("mhmw=%d, iro=%d\n", DMAR_ECAP_MHMV(unit->hw_ecap),
367 DMAR_ECAP_IRO(unit->hw_ecap));
368}
369
370static int
371dmar_attach(device_t dev)
372{
373 struct dmar_unit *unit;
374 ACPI_DMAR_HARDWARE_UNIT *dmaru;
362}
363#endif
364
365static void
366dmar_print_caps(device_t dev, struct dmar_unit *unit,
367 ACPI_DMAR_HARDWARE_UNIT *dmaru)
368{
369 uint32_t caphi, ecaphi;
370
371 device_printf(dev, "regs@0x%08jx, ver=%d.%d, seg=%d, flags=<%b>\n",
372 (uintmax_t)dmaru->Address, DMAR_MAJOR_VER(unit->hw_ver),
373 DMAR_MINOR_VER(unit->hw_ver), dmaru->Segment,
374 dmaru->Flags, "\020\001INCLUDE_ALL_PCI");
375 caphi = unit->hw_cap >> 32;
376 device_printf(dev, "cap=%b,", (u_int)unit->hw_cap,
377 "\020\004AFL\005WBF\006PLMR\007PHMR\010CM\027ZLR\030ISOCH");
378 printf("%b, ", caphi, "\020\010PSI\027DWD\030DRD");
379 printf("ndoms=%d, sagaw=%d, mgaw=%d, fro=%d, nfr=%d, superp=%d",
380 DMAR_CAP_ND(unit->hw_cap), DMAR_CAP_SAGAW(unit->hw_cap),
381 DMAR_CAP_MGAW(unit->hw_cap), DMAR_CAP_FRO(unit->hw_cap),
382 DMAR_CAP_NFR(unit->hw_cap), DMAR_CAP_SPS(unit->hw_cap));
383 if ((unit->hw_cap & DMAR_CAP_PSI) != 0)
384 printf(", mamv=%d", DMAR_CAP_MAMV(unit->hw_cap));
385 printf("\n");
386 ecaphi = unit->hw_ecap >> 32;
387 device_printf(dev, "ecap=%b,", (u_int)unit->hw_ecap,
388 "\020\001C\002QI\003DI\004IR\005EIM\007PT\010SC");
389 printf("%b, ", ecaphi, "\020");
390 printf("mhmw=%d, iro=%d\n", DMAR_ECAP_MHMV(unit->hw_ecap),
391 DMAR_ECAP_IRO(unit->hw_ecap));
392}
393
394static int
395dmar_attach(device_t dev)
396{
397 struct dmar_unit *unit;
398 ACPI_DMAR_HARDWARE_UNIT *dmaru;
375 int error;
399 int i, error;
376
377 unit = device_get_softc(dev);
378 unit->dev = dev;
379 unit->unit = device_get_unit(dev);
380 dmaru = dmar_find_by_index(unit->unit);
381 if (dmaru == NULL)
382 return (EINVAL);
400
401 unit = device_get_softc(dev);
402 unit->dev = dev;
403 unit->unit = device_get_unit(dev);
404 dmaru = dmar_find_by_index(unit->unit);
405 if (dmaru == NULL)
406 return (EINVAL);
383 unit->irq = -1;
384 unit->segment = dmaru->Segment;
385 unit->base = dmaru->Address;
386 unit->reg_rid = DMAR_REG_RID;
387 unit->regs = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
388 &unit->reg_rid, RF_ACTIVE);
389 if (unit->regs == NULL) {
390 device_printf(dev, "cannot allocate register window\n");
391 return (ENOMEM);
392 }
393 unit->hw_ver = dmar_read4(unit, DMAR_VER_REG);
394 unit->hw_cap = dmar_read8(unit, DMAR_CAP_REG);
395 unit->hw_ecap = dmar_read8(unit, DMAR_ECAP_REG);
396 if (bootverbose)
397 dmar_print_caps(dev, unit, dmaru);
398 dmar_quirks_post_ident(unit);
399
407 unit->segment = dmaru->Segment;
408 unit->base = dmaru->Address;
409 unit->reg_rid = DMAR_REG_RID;
410 unit->regs = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
411 &unit->reg_rid, RF_ACTIVE);
412 if (unit->regs == NULL) {
413 device_printf(dev, "cannot allocate register window\n");
414 return (ENOMEM);
415 }
416 unit->hw_ver = dmar_read4(unit, DMAR_VER_REG);
417 unit->hw_cap = dmar_read8(unit, DMAR_CAP_REG);
418 unit->hw_ecap = dmar_read8(unit, DMAR_ECAP_REG);
419 if (bootverbose)
420 dmar_print_caps(dev, unit, dmaru);
421 dmar_quirks_post_ident(unit);
422
400 error = dmar_alloc_irq(dev, unit);
423 for (i = 0; i < DMAR_INTR_TOTAL; i++)
424 unit->intrs[i].irq = -1;
425
426 unit->intrs[DMAR_INTR_FAULT].name = "fault";
427 unit->intrs[DMAR_INTR_FAULT].irq_rid = DMAR_FAULT_IRQ_RID;
428 unit->intrs[DMAR_INTR_FAULT].handler = dmar_fault_intr;
429 unit->intrs[DMAR_INTR_FAULT].msi_data_reg = DMAR_FEDATA_REG;
430 unit->intrs[DMAR_INTR_FAULT].msi_addr_reg = DMAR_FEADDR_REG;
431 unit->intrs[DMAR_INTR_FAULT].msi_uaddr_reg = DMAR_FEUADDR_REG;
432 unit->intrs[DMAR_INTR_FAULT].enable_intr = dmar_enable_fault_intr;
433 unit->intrs[DMAR_INTR_FAULT].disable_intr = dmar_disable_fault_intr;
434 error = dmar_alloc_irq(dev, unit, DMAR_INTR_FAULT);
401 if (error != 0) {
402 dmar_release_resources(dev, unit);
403 return (error);
404 }
435 if (error != 0) {
436 dmar_release_resources(dev, unit);
437 return (error);
438 }
439 if (DMAR_HAS_QI(unit)) {
440 unit->intrs[DMAR_INTR_QI].name = "qi";
441 unit->intrs[DMAR_INTR_QI].irq_rid = DMAR_QI_IRQ_RID;
442 unit->intrs[DMAR_INTR_QI].handler = dmar_qi_intr;
443 unit->intrs[DMAR_INTR_QI].msi_data_reg = DMAR_IEDATA_REG;
444 unit->intrs[DMAR_INTR_QI].msi_addr_reg = DMAR_IEADDR_REG;
445 unit->intrs[DMAR_INTR_QI].msi_uaddr_reg = DMAR_IEUADDR_REG;
446 unit->intrs[DMAR_INTR_QI].enable_intr = dmar_enable_qi_intr;
447 unit->intrs[DMAR_INTR_QI].disable_intr = dmar_disable_qi_intr;
448 error = dmar_alloc_irq(dev, unit, DMAR_INTR_QI);
449 if (error != 0) {
450 dmar_release_resources(dev, unit);
451 return (error);
452 }
453 }
454
405 mtx_init(&unit->lock, "dmarhw", NULL, MTX_DEF);
406 unit->domids = new_unrhdr(0, dmar_nd2mask(DMAR_CAP_ND(unit->hw_cap)),
407 &unit->lock);
408
409 /*
410 * 9.2 "Context Entry":
411 * When Caching Mode (CM) field is reported as Set, the
412 * domain-id value of zero is architecturally reserved.
413 * Software must not use domain-id value of zero
414 * when CM is Set.
415 */
416 if ((unit->hw_cap & DMAR_CAP_CM) != 0)
417 alloc_unr_specific(unit->domids, 0);
418
419 unit->ctx_obj = vm_pager_allocate(OBJT_PHYS, NULL, IDX_TO_OFF(1 +
420 DMAR_CTX_CNT), 0, 0, NULL);
421
422 /*
423 * Allocate and load the root entry table pointer. Enable the
424 * address translation after the required invalidations are
425 * done.
426 */
427 dmar_pgalloc(unit->ctx_obj, 0, DMAR_PGF_WAITOK | DMAR_PGF_ZERO);
428 DMAR_LOCK(unit);
429 error = dmar_load_root_entry_ptr(unit);
430 if (error != 0) {
431 DMAR_UNLOCK(unit);
432 dmar_release_resources(dev, unit);
433 return (error);
434 }
435 error = dmar_inv_ctx_glob(unit);
436 if (error != 0) {
437 DMAR_UNLOCK(unit);
438 dmar_release_resources(dev, unit);
439 return (error);
440 }
441 if ((unit->hw_ecap & DMAR_ECAP_DI) != 0) {
442 error = dmar_inv_iotlb_glob(unit);
443 if (error != 0) {
444 DMAR_UNLOCK(unit);
445 dmar_release_resources(dev, unit);
446 return (error);
447 }
448 }
449
450 DMAR_UNLOCK(unit);
451 error = dmar_init_fault_log(unit);
452 if (error != 0) {
453 dmar_release_resources(dev, unit);
454 return (error);
455 }
455 mtx_init(&unit->lock, "dmarhw", NULL, MTX_DEF);
456 unit->domids = new_unrhdr(0, dmar_nd2mask(DMAR_CAP_ND(unit->hw_cap)),
457 &unit->lock);
458
459 /*
460 * 9.2 "Context Entry":
461 * When Caching Mode (CM) field is reported as Set, the
462 * domain-id value of zero is architecturally reserved.
463 * Software must not use domain-id value of zero
464 * when CM is Set.
465 */
466 if ((unit->hw_cap & DMAR_CAP_CM) != 0)
467 alloc_unr_specific(unit->domids, 0);
468
469 unit->ctx_obj = vm_pager_allocate(OBJT_PHYS, NULL, IDX_TO_OFF(1 +
470 DMAR_CTX_CNT), 0, 0, NULL);
471
472 /*
473 * Allocate and load the root entry table pointer. Enable the
474 * address translation after the required invalidations are
475 * done.
476 */
477 dmar_pgalloc(unit->ctx_obj, 0, DMAR_PGF_WAITOK | DMAR_PGF_ZERO);
478 DMAR_LOCK(unit);
479 error = dmar_load_root_entry_ptr(unit);
480 if (error != 0) {
481 DMAR_UNLOCK(unit);
482 dmar_release_resources(dev, unit);
483 return (error);
484 }
485 error = dmar_inv_ctx_glob(unit);
486 if (error != 0) {
487 DMAR_UNLOCK(unit);
488 dmar_release_resources(dev, unit);
489 return (error);
490 }
491 if ((unit->hw_ecap & DMAR_ECAP_DI) != 0) {
492 error = dmar_inv_iotlb_glob(unit);
493 if (error != 0) {
494 DMAR_UNLOCK(unit);
495 dmar_release_resources(dev, unit);
496 return (error);
497 }
498 }
499
500 DMAR_UNLOCK(unit);
501 error = dmar_init_fault_log(unit);
502 if (error != 0) {
503 dmar_release_resources(dev, unit);
504 return (error);
505 }
506 error = dmar_init_qi(unit);
507 if (error != 0) {
508 dmar_release_resources(dev, unit);
509 return (error);
510 }
456 error = dmar_init_busdma(unit);
457 if (error != 0) {
458 dmar_release_resources(dev, unit);
459 return (error);
460 }
461
462#ifdef NOTYET
463 DMAR_LOCK(unit);
464 error = dmar_enable_translation(unit);
465 if (error != 0) {
466 DMAR_UNLOCK(unit);
467 dmar_release_resources(dev, unit);
468 return (error);
469 }
470 DMAR_UNLOCK(unit);
471#endif
472
473 return (0);
474}
475
476static int
477dmar_detach(device_t dev)
478{
479
480 return (EBUSY);
481}
482
483static int
484dmar_suspend(device_t dev)
485{
486
487 return (0);
488}
489
490static int
491dmar_resume(device_t dev)
492{
493
494 /* XXXKIB */
495 return (0);
496}
497
498static device_method_t dmar_methods[] = {
499 DEVMETHOD(device_identify, dmar_identify),
500 DEVMETHOD(device_probe, dmar_probe),
501 DEVMETHOD(device_attach, dmar_attach),
502 DEVMETHOD(device_detach, dmar_detach),
503 DEVMETHOD(device_suspend, dmar_suspend),
504 DEVMETHOD(device_resume, dmar_resume),
505#ifdef DEV_APIC
506 DEVMETHOD(bus_remap_intr, dmar_remap_intr),
507#endif
508 DEVMETHOD_END
509};
510
511static driver_t dmar_driver = {
512 "dmar",
513 dmar_methods,
514 sizeof(struct dmar_unit),
515};
516
517DRIVER_MODULE(dmar, acpi, dmar_driver, dmar_devclass, 0, 0);
518MODULE_DEPEND(dmar, acpi, 1, 1, 1);
519
520static void
521dmar_print_path(device_t dev, const char *banner, int busno, int depth,
522 const ACPI_DMAR_PCI_PATH *path)
523{
524 int i;
525
526 device_printf(dev, "%s [%d, ", banner, busno);
527 for (i = 0; i < depth; i++) {
528 if (i != 0)
529 printf(", ");
530 printf("(%d, %d)", path[i].Device, path[i].Function);
531 }
532 printf("]\n");
533}
534
535static int
536dmar_dev_depth(device_t child)
537{
538 devclass_t pci_class;
539 device_t bus, pcib;
540 int depth;
541
542 pci_class = devclass_find("pci");
543 for (depth = 1; ; depth++) {
544 bus = device_get_parent(child);
545 pcib = device_get_parent(bus);
546 if (device_get_devclass(device_get_parent(pcib)) !=
547 pci_class)
548 return (depth);
549 child = pcib;
550 }
551}
552
553static void
554dmar_dev_path(device_t child, int *busno, ACPI_DMAR_PCI_PATH *path, int depth)
555{
556 devclass_t pci_class;
557 device_t bus, pcib;
558
559 pci_class = devclass_find("pci");
560 for (depth--; depth != -1; depth--) {
561 path[depth].Device = pci_get_slot(child);
562 path[depth].Function = pci_get_function(child);
563 bus = device_get_parent(child);
564 pcib = device_get_parent(bus);
565 if (device_get_devclass(device_get_parent(pcib)) !=
566 pci_class) {
567 /* reached a host bridge */
568 *busno = pcib_get_bus(bus);
569 return;
570 }
571 child = pcib;
572 }
573 panic("wrong depth");
574}
575
576static int
577dmar_match_pathes(int busno1, const ACPI_DMAR_PCI_PATH *path1, int depth1,
578 int busno2, const ACPI_DMAR_PCI_PATH *path2, int depth2,
579 enum AcpiDmarScopeType scope_type)
580{
581 int i, depth;
582
583 if (busno1 != busno2)
584 return (0);
585 if (scope_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && depth1 != depth2)
586 return (0);
587 depth = depth1;
588 if (depth2 < depth)
589 depth = depth2;
590 for (i = 0; i < depth; i++) {
591 if (path1[i].Device != path2[i].Device ||
592 path1[i].Function != path2[i].Function)
593 return (0);
594 }
595 return (1);
596}
597
598static int
599dmar_match_devscope(ACPI_DMAR_DEVICE_SCOPE *devscope, device_t dev,
600 int dev_busno, const ACPI_DMAR_PCI_PATH *dev_path, int dev_path_len)
601{
602 ACPI_DMAR_PCI_PATH *path;
603 int path_len;
604
605 if (devscope->Length < sizeof(*devscope)) {
606 printf("dmar_find: corrupted DMAR table, dl %d\n",
607 devscope->Length);
608 return (-1);
609 }
610 if (devscope->EntryType != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
611 devscope->EntryType != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
612 return (0);
613 path_len = devscope->Length - sizeof(*devscope);
614 if (path_len % 2 != 0) {
615 printf("dmar_find_bsf: corrupted DMAR table, dl %d\n",
616 devscope->Length);
617 return (-1);
618 }
619 path_len /= 2;
620 path = (ACPI_DMAR_PCI_PATH *)(devscope + 1);
621 if (path_len == 0) {
622 printf("dmar_find: corrupted DMAR table, dl %d\n",
623 devscope->Length);
624 return (-1);
625 }
626 if (dmar_match_verbose)
627 dmar_print_path(dev, "DMAR", devscope->Bus, path_len, path);
628
629 return (dmar_match_pathes(devscope->Bus, path, path_len, dev_busno,
630 dev_path, dev_path_len, devscope->EntryType));
631}
632
633struct dmar_unit *
634dmar_find(device_t dev)
635{
636 device_t dmar_dev;
637 ACPI_DMAR_HARDWARE_UNIT *dmarh;
638 ACPI_DMAR_DEVICE_SCOPE *devscope;
639 char *ptr, *ptrend;
640 int i, match, dev_domain, dev_busno, dev_path_len;
641
642 dmar_dev = NULL;
643 dev_domain = pci_get_domain(dev);
644 dev_path_len = dmar_dev_depth(dev);
645 ACPI_DMAR_PCI_PATH dev_path[dev_path_len];
646 dmar_dev_path(dev, &dev_busno, dev_path, dev_path_len);
647 if (dmar_match_verbose)
648 dmar_print_path(dev, "PCI", dev_busno, dev_path_len, dev_path);
649
650 for (i = 0; i < dmar_devcnt; i++) {
651 if (dmar_devs[i] == NULL)
652 continue;
653 dmarh = dmar_find_by_index(i);
654 if (dmarh == NULL)
655 continue;
656 if (dmarh->Segment != dev_domain)
657 continue;
658 if ((dmarh->Flags & ACPI_DMAR_INCLUDE_ALL) != 0) {
659 dmar_dev = dmar_devs[i];
660 if (dmar_match_verbose) {
661 device_printf(dev,
662 "pci%d:%d:%d:%d matched dmar%d INCLUDE_ALL\n",
663 dev_domain, pci_get_bus(dev),
664 pci_get_slot(dev),
665 pci_get_function(dev),
666 ((struct dmar_unit *)device_get_softc(
667 dmar_dev))->unit);
668 }
669 goto found;
670 }
671 ptr = (char *)dmarh + sizeof(*dmarh);
672 ptrend = (char *)dmarh + dmarh->Header.Length;
673 for (;;) {
674 if (ptr >= ptrend)
675 break;
676 devscope = (ACPI_DMAR_DEVICE_SCOPE *)ptr;
677 ptr += devscope->Length;
678 if (dmar_match_verbose) {
679 device_printf(dev,
680 "pci%d:%d:%d:%d matching dmar%d\n",
681 dev_domain, pci_get_bus(dev),
682 pci_get_slot(dev),
683 pci_get_function(dev),
684 ((struct dmar_unit *)device_get_softc(
685 dmar_devs[i]))->unit);
686 }
687 match = dmar_match_devscope(devscope, dev, dev_busno,
688 dev_path, dev_path_len);
689 if (dmar_match_verbose) {
690 if (match == -1)
691 printf("table error\n");
692 else if (match == 0)
693 printf("not matched\n");
694 else
695 printf("matched\n");
696 }
697 if (match == -1)
698 return (NULL);
699 else if (match == 1) {
700 dmar_dev = dmar_devs[i];
701 goto found;
702 }
703 }
704 }
705 return (NULL);
706found:
707 return (device_get_softc(dmar_dev));
708}
709
710struct rmrr_iter_args {
711 struct dmar_ctx *ctx;
712 device_t dev;
713 int dev_domain;
714 int dev_busno;
715 ACPI_DMAR_PCI_PATH *dev_path;
716 int dev_path_len;
717 struct dmar_map_entries_tailq *rmrr_entries;
718};
719
720static int
721dmar_rmrr_iter(ACPI_DMAR_HEADER *dmarh, void *arg)
722{
723 struct rmrr_iter_args *ria;
724 ACPI_DMAR_RESERVED_MEMORY *resmem;
725 ACPI_DMAR_DEVICE_SCOPE *devscope;
726 struct dmar_map_entry *entry;
727 char *ptr, *ptrend;
728 int match;
729
730 if (dmarh->Type != ACPI_DMAR_TYPE_RESERVED_MEMORY)
731 return (1);
732
733 ria = arg;
734 resmem = (ACPI_DMAR_RESERVED_MEMORY *)dmarh;
735 if (dmar_match_verbose) {
736 printf("RMRR [%jx,%jx] segment %d\n",
737 (uintmax_t)resmem->BaseAddress,
738 (uintmax_t)resmem->EndAddress,
739 resmem->Segment);
740 }
741 if (resmem->Segment != ria->dev_domain)
742 return (1);
743
744 ptr = (char *)resmem + sizeof(*resmem);
745 ptrend = (char *)resmem + resmem->Header.Length;
746 for (;;) {
747 if (ptr >= ptrend)
748 break;
749 devscope = (ACPI_DMAR_DEVICE_SCOPE *)ptr;
750 ptr += devscope->Length;
751 match = dmar_match_devscope(devscope, ria->dev, ria->dev_busno,
752 ria->dev_path, ria->dev_path_len);
753 if (match == 1) {
754 if (dmar_match_verbose)
755 printf("matched\n");
756 entry = dmar_gas_alloc_entry(ria->ctx, DMAR_PGF_WAITOK);
757 entry->start = resmem->BaseAddress;
758 /* The RMRR entry end address is inclusive. */
759 entry->end = resmem->EndAddress;
760 TAILQ_INSERT_TAIL(ria->rmrr_entries, entry,
761 unroll_link);
762 } else if (dmar_match_verbose) {
763 printf("not matched, err %d\n", match);
764 }
765 }
766
767 return (1);
768}
769
770void
771dmar_ctx_parse_rmrr(struct dmar_ctx *ctx, device_t dev,
772 struct dmar_map_entries_tailq *rmrr_entries)
773{
774 struct rmrr_iter_args ria;
775
776 ria.dev_domain = pci_get_domain(dev);
777 ria.dev_path_len = dmar_dev_depth(dev);
778 ACPI_DMAR_PCI_PATH dev_path[ria.dev_path_len];
779 dmar_dev_path(dev, &ria.dev_busno, dev_path, ria.dev_path_len);
780
781 if (dmar_match_verbose) {
782 device_printf(dev, "parsing RMRR entries for ");
783 dmar_print_path(dev, "PCI", ria.dev_busno, ria.dev_path_len,
784 dev_path);
785 }
786
787 ria.ctx = ctx;
788 ria.dev = dev;
789 ria.dev_path = dev_path;
790 ria.rmrr_entries = rmrr_entries;
791 dmar_iterate_tbl(dmar_rmrr_iter, &ria);
792}
793
794struct inst_rmrr_iter_args {
795 struct dmar_unit *dmar;
796};
797
798static device_t
799dmar_path_dev(int segment, int path_len, int busno,
800 const ACPI_DMAR_PCI_PATH *path)
801{
802 devclass_t pci_class;
803 device_t bus, pcib, dev;
804 int i;
805
806 pci_class = devclass_find("pci");
807 dev = NULL;
808 for (i = 0; i < path_len; i++, path++) {
809 dev = pci_find_dbsf(segment, busno, path->Device,
810 path->Function);
811 if (dev == NULL)
812 break;
813 if (i != path_len - 1) {
814 bus = device_get_parent(dev);
815 pcib = device_get_parent(bus);
816 if (device_get_devclass(device_get_parent(pcib)) !=
817 pci_class)
818 return (NULL);
819 }
820 busno = pcib_get_bus(dev);
821 }
822 return (dev);
823}
824
825static int
826dmar_inst_rmrr_iter(ACPI_DMAR_HEADER *dmarh, void *arg)
827{
828 const ACPI_DMAR_RESERVED_MEMORY *resmem;
829 const ACPI_DMAR_DEVICE_SCOPE *devscope;
830 struct inst_rmrr_iter_args *iria;
831 const char *ptr, *ptrend;
832 struct dmar_unit *dev_dmar;
833 device_t dev;
834
835 if (dmarh->Type != ACPI_DMAR_TYPE_RESERVED_MEMORY)
836 return (1);
837
838 iria = arg;
839 resmem = (ACPI_DMAR_RESERVED_MEMORY *)dmarh;
840 if (resmem->Segment != iria->dmar->segment)
841 return (1);
842 if (dmar_match_verbose) {
843 printf("dmar%d: RMRR [%jx,%jx]\n", iria->dmar->unit,
844 (uintmax_t)resmem->BaseAddress,
845 (uintmax_t)resmem->EndAddress);
846 }
847
511 error = dmar_init_busdma(unit);
512 if (error != 0) {
513 dmar_release_resources(dev, unit);
514 return (error);
515 }
516
517#ifdef NOTYET
518 DMAR_LOCK(unit);
519 error = dmar_enable_translation(unit);
520 if (error != 0) {
521 DMAR_UNLOCK(unit);
522 dmar_release_resources(dev, unit);
523 return (error);
524 }
525 DMAR_UNLOCK(unit);
526#endif
527
528 return (0);
529}
530
531static int
532dmar_detach(device_t dev)
533{
534
535 return (EBUSY);
536}
537
538static int
539dmar_suspend(device_t dev)
540{
541
542 return (0);
543}
544
545static int
546dmar_resume(device_t dev)
547{
548
549 /* XXXKIB */
550 return (0);
551}
552
553static device_method_t dmar_methods[] = {
554 DEVMETHOD(device_identify, dmar_identify),
555 DEVMETHOD(device_probe, dmar_probe),
556 DEVMETHOD(device_attach, dmar_attach),
557 DEVMETHOD(device_detach, dmar_detach),
558 DEVMETHOD(device_suspend, dmar_suspend),
559 DEVMETHOD(device_resume, dmar_resume),
560#ifdef DEV_APIC
561 DEVMETHOD(bus_remap_intr, dmar_remap_intr),
562#endif
563 DEVMETHOD_END
564};
565
566static driver_t dmar_driver = {
567 "dmar",
568 dmar_methods,
569 sizeof(struct dmar_unit),
570};
571
572DRIVER_MODULE(dmar, acpi, dmar_driver, dmar_devclass, 0, 0);
573MODULE_DEPEND(dmar, acpi, 1, 1, 1);
574
575static void
576dmar_print_path(device_t dev, const char *banner, int busno, int depth,
577 const ACPI_DMAR_PCI_PATH *path)
578{
579 int i;
580
581 device_printf(dev, "%s [%d, ", banner, busno);
582 for (i = 0; i < depth; i++) {
583 if (i != 0)
584 printf(", ");
585 printf("(%d, %d)", path[i].Device, path[i].Function);
586 }
587 printf("]\n");
588}
589
590static int
591dmar_dev_depth(device_t child)
592{
593 devclass_t pci_class;
594 device_t bus, pcib;
595 int depth;
596
597 pci_class = devclass_find("pci");
598 for (depth = 1; ; depth++) {
599 bus = device_get_parent(child);
600 pcib = device_get_parent(bus);
601 if (device_get_devclass(device_get_parent(pcib)) !=
602 pci_class)
603 return (depth);
604 child = pcib;
605 }
606}
607
608static void
609dmar_dev_path(device_t child, int *busno, ACPI_DMAR_PCI_PATH *path, int depth)
610{
611 devclass_t pci_class;
612 device_t bus, pcib;
613
614 pci_class = devclass_find("pci");
615 for (depth--; depth != -1; depth--) {
616 path[depth].Device = pci_get_slot(child);
617 path[depth].Function = pci_get_function(child);
618 bus = device_get_parent(child);
619 pcib = device_get_parent(bus);
620 if (device_get_devclass(device_get_parent(pcib)) !=
621 pci_class) {
622 /* reached a host bridge */
623 *busno = pcib_get_bus(bus);
624 return;
625 }
626 child = pcib;
627 }
628 panic("wrong depth");
629}
630
631static int
632dmar_match_pathes(int busno1, const ACPI_DMAR_PCI_PATH *path1, int depth1,
633 int busno2, const ACPI_DMAR_PCI_PATH *path2, int depth2,
634 enum AcpiDmarScopeType scope_type)
635{
636 int i, depth;
637
638 if (busno1 != busno2)
639 return (0);
640 if (scope_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && depth1 != depth2)
641 return (0);
642 depth = depth1;
643 if (depth2 < depth)
644 depth = depth2;
645 for (i = 0; i < depth; i++) {
646 if (path1[i].Device != path2[i].Device ||
647 path1[i].Function != path2[i].Function)
648 return (0);
649 }
650 return (1);
651}
652
653static int
654dmar_match_devscope(ACPI_DMAR_DEVICE_SCOPE *devscope, device_t dev,
655 int dev_busno, const ACPI_DMAR_PCI_PATH *dev_path, int dev_path_len)
656{
657 ACPI_DMAR_PCI_PATH *path;
658 int path_len;
659
660 if (devscope->Length < sizeof(*devscope)) {
661 printf("dmar_find: corrupted DMAR table, dl %d\n",
662 devscope->Length);
663 return (-1);
664 }
665 if (devscope->EntryType != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
666 devscope->EntryType != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
667 return (0);
668 path_len = devscope->Length - sizeof(*devscope);
669 if (path_len % 2 != 0) {
670 printf("dmar_find_bsf: corrupted DMAR table, dl %d\n",
671 devscope->Length);
672 return (-1);
673 }
674 path_len /= 2;
675 path = (ACPI_DMAR_PCI_PATH *)(devscope + 1);
676 if (path_len == 0) {
677 printf("dmar_find: corrupted DMAR table, dl %d\n",
678 devscope->Length);
679 return (-1);
680 }
681 if (dmar_match_verbose)
682 dmar_print_path(dev, "DMAR", devscope->Bus, path_len, path);
683
684 return (dmar_match_pathes(devscope->Bus, path, path_len, dev_busno,
685 dev_path, dev_path_len, devscope->EntryType));
686}
687
688struct dmar_unit *
689dmar_find(device_t dev)
690{
691 device_t dmar_dev;
692 ACPI_DMAR_HARDWARE_UNIT *dmarh;
693 ACPI_DMAR_DEVICE_SCOPE *devscope;
694 char *ptr, *ptrend;
695 int i, match, dev_domain, dev_busno, dev_path_len;
696
697 dmar_dev = NULL;
698 dev_domain = pci_get_domain(dev);
699 dev_path_len = dmar_dev_depth(dev);
700 ACPI_DMAR_PCI_PATH dev_path[dev_path_len];
701 dmar_dev_path(dev, &dev_busno, dev_path, dev_path_len);
702 if (dmar_match_verbose)
703 dmar_print_path(dev, "PCI", dev_busno, dev_path_len, dev_path);
704
705 for (i = 0; i < dmar_devcnt; i++) {
706 if (dmar_devs[i] == NULL)
707 continue;
708 dmarh = dmar_find_by_index(i);
709 if (dmarh == NULL)
710 continue;
711 if (dmarh->Segment != dev_domain)
712 continue;
713 if ((dmarh->Flags & ACPI_DMAR_INCLUDE_ALL) != 0) {
714 dmar_dev = dmar_devs[i];
715 if (dmar_match_verbose) {
716 device_printf(dev,
717 "pci%d:%d:%d:%d matched dmar%d INCLUDE_ALL\n",
718 dev_domain, pci_get_bus(dev),
719 pci_get_slot(dev),
720 pci_get_function(dev),
721 ((struct dmar_unit *)device_get_softc(
722 dmar_dev))->unit);
723 }
724 goto found;
725 }
726 ptr = (char *)dmarh + sizeof(*dmarh);
727 ptrend = (char *)dmarh + dmarh->Header.Length;
728 for (;;) {
729 if (ptr >= ptrend)
730 break;
731 devscope = (ACPI_DMAR_DEVICE_SCOPE *)ptr;
732 ptr += devscope->Length;
733 if (dmar_match_verbose) {
734 device_printf(dev,
735 "pci%d:%d:%d:%d matching dmar%d\n",
736 dev_domain, pci_get_bus(dev),
737 pci_get_slot(dev),
738 pci_get_function(dev),
739 ((struct dmar_unit *)device_get_softc(
740 dmar_devs[i]))->unit);
741 }
742 match = dmar_match_devscope(devscope, dev, dev_busno,
743 dev_path, dev_path_len);
744 if (dmar_match_verbose) {
745 if (match == -1)
746 printf("table error\n");
747 else if (match == 0)
748 printf("not matched\n");
749 else
750 printf("matched\n");
751 }
752 if (match == -1)
753 return (NULL);
754 else if (match == 1) {
755 dmar_dev = dmar_devs[i];
756 goto found;
757 }
758 }
759 }
760 return (NULL);
761found:
762 return (device_get_softc(dmar_dev));
763}
764
765struct rmrr_iter_args {
766 struct dmar_ctx *ctx;
767 device_t dev;
768 int dev_domain;
769 int dev_busno;
770 ACPI_DMAR_PCI_PATH *dev_path;
771 int dev_path_len;
772 struct dmar_map_entries_tailq *rmrr_entries;
773};
774
775static int
776dmar_rmrr_iter(ACPI_DMAR_HEADER *dmarh, void *arg)
777{
778 struct rmrr_iter_args *ria;
779 ACPI_DMAR_RESERVED_MEMORY *resmem;
780 ACPI_DMAR_DEVICE_SCOPE *devscope;
781 struct dmar_map_entry *entry;
782 char *ptr, *ptrend;
783 int match;
784
785 if (dmarh->Type != ACPI_DMAR_TYPE_RESERVED_MEMORY)
786 return (1);
787
788 ria = arg;
789 resmem = (ACPI_DMAR_RESERVED_MEMORY *)dmarh;
790 if (dmar_match_verbose) {
791 printf("RMRR [%jx,%jx] segment %d\n",
792 (uintmax_t)resmem->BaseAddress,
793 (uintmax_t)resmem->EndAddress,
794 resmem->Segment);
795 }
796 if (resmem->Segment != ria->dev_domain)
797 return (1);
798
799 ptr = (char *)resmem + sizeof(*resmem);
800 ptrend = (char *)resmem + resmem->Header.Length;
801 for (;;) {
802 if (ptr >= ptrend)
803 break;
804 devscope = (ACPI_DMAR_DEVICE_SCOPE *)ptr;
805 ptr += devscope->Length;
806 match = dmar_match_devscope(devscope, ria->dev, ria->dev_busno,
807 ria->dev_path, ria->dev_path_len);
808 if (match == 1) {
809 if (dmar_match_verbose)
810 printf("matched\n");
811 entry = dmar_gas_alloc_entry(ria->ctx, DMAR_PGF_WAITOK);
812 entry->start = resmem->BaseAddress;
813 /* The RMRR entry end address is inclusive. */
814 entry->end = resmem->EndAddress;
815 TAILQ_INSERT_TAIL(ria->rmrr_entries, entry,
816 unroll_link);
817 } else if (dmar_match_verbose) {
818 printf("not matched, err %d\n", match);
819 }
820 }
821
822 return (1);
823}
824
825void
826dmar_ctx_parse_rmrr(struct dmar_ctx *ctx, device_t dev,
827 struct dmar_map_entries_tailq *rmrr_entries)
828{
829 struct rmrr_iter_args ria;
830
831 ria.dev_domain = pci_get_domain(dev);
832 ria.dev_path_len = dmar_dev_depth(dev);
833 ACPI_DMAR_PCI_PATH dev_path[ria.dev_path_len];
834 dmar_dev_path(dev, &ria.dev_busno, dev_path, ria.dev_path_len);
835
836 if (dmar_match_verbose) {
837 device_printf(dev, "parsing RMRR entries for ");
838 dmar_print_path(dev, "PCI", ria.dev_busno, ria.dev_path_len,
839 dev_path);
840 }
841
842 ria.ctx = ctx;
843 ria.dev = dev;
844 ria.dev_path = dev_path;
845 ria.rmrr_entries = rmrr_entries;
846 dmar_iterate_tbl(dmar_rmrr_iter, &ria);
847}
848
849struct inst_rmrr_iter_args {
850 struct dmar_unit *dmar;
851};
852
853static device_t
854dmar_path_dev(int segment, int path_len, int busno,
855 const ACPI_DMAR_PCI_PATH *path)
856{
857 devclass_t pci_class;
858 device_t bus, pcib, dev;
859 int i;
860
861 pci_class = devclass_find("pci");
862 dev = NULL;
863 for (i = 0; i < path_len; i++, path++) {
864 dev = pci_find_dbsf(segment, busno, path->Device,
865 path->Function);
866 if (dev == NULL)
867 break;
868 if (i != path_len - 1) {
869 bus = device_get_parent(dev);
870 pcib = device_get_parent(bus);
871 if (device_get_devclass(device_get_parent(pcib)) !=
872 pci_class)
873 return (NULL);
874 }
875 busno = pcib_get_bus(dev);
876 }
877 return (dev);
878}
879
880static int
881dmar_inst_rmrr_iter(ACPI_DMAR_HEADER *dmarh, void *arg)
882{
883 const ACPI_DMAR_RESERVED_MEMORY *resmem;
884 const ACPI_DMAR_DEVICE_SCOPE *devscope;
885 struct inst_rmrr_iter_args *iria;
886 const char *ptr, *ptrend;
887 struct dmar_unit *dev_dmar;
888 device_t dev;
889
890 if (dmarh->Type != ACPI_DMAR_TYPE_RESERVED_MEMORY)
891 return (1);
892
893 iria = arg;
894 resmem = (ACPI_DMAR_RESERVED_MEMORY *)dmarh;
895 if (resmem->Segment != iria->dmar->segment)
896 return (1);
897 if (dmar_match_verbose) {
898 printf("dmar%d: RMRR [%jx,%jx]\n", iria->dmar->unit,
899 (uintmax_t)resmem->BaseAddress,
900 (uintmax_t)resmem->EndAddress);
901 }
902
848 ptr = (char *)resmem + sizeof(*resmem);
849 ptrend = (char *)resmem + resmem->Header.Length;
903 ptr = (const char *)resmem + sizeof(*resmem);
904 ptrend = (const char *)resmem + resmem->Header.Length;
850 for (;;) {
851 if (ptr >= ptrend)
852 break;
905 for (;;) {
906 if (ptr >= ptrend)
907 break;
853 devscope = (ACPI_DMAR_DEVICE_SCOPE *)ptr;
908 devscope = (const ACPI_DMAR_DEVICE_SCOPE *)ptr;
854 ptr += devscope->Length;
855 /* XXXKIB bridge */
856 if (devscope->EntryType != ACPI_DMAR_SCOPE_TYPE_ENDPOINT)
857 continue;
858 if (dmar_match_verbose) {
859 dmar_print_path(iria->dmar->dev, "RMRR scope",
860 devscope->Bus, (devscope->Length -
861 sizeof(ACPI_DMAR_DEVICE_SCOPE)) / 2,
909 ptr += devscope->Length;
910 /* XXXKIB bridge */
911 if (devscope->EntryType != ACPI_DMAR_SCOPE_TYPE_ENDPOINT)
912 continue;
913 if (dmar_match_verbose) {
914 dmar_print_path(iria->dmar->dev, "RMRR scope",
915 devscope->Bus, (devscope->Length -
916 sizeof(ACPI_DMAR_DEVICE_SCOPE)) / 2,
862 (ACPI_DMAR_PCI_PATH *)(devscope + 1));
917 (const ACPI_DMAR_PCI_PATH *)(devscope + 1));
863 }
864 dev = dmar_path_dev(resmem->Segment, (devscope->Length -
865 sizeof(ACPI_DMAR_DEVICE_SCOPE)) / 2, devscope->Bus,
918 }
919 dev = dmar_path_dev(resmem->Segment, (devscope->Length -
920 sizeof(ACPI_DMAR_DEVICE_SCOPE)) / 2, devscope->Bus,
866 (ACPI_DMAR_PCI_PATH *)(devscope + 1));
921 (const ACPI_DMAR_PCI_PATH *)(devscope + 1));
867 if (dev == NULL) {
868 if (dmar_match_verbose)
869 printf("null dev\n");
870 continue;
871 }
872 dev_dmar = dmar_find(dev);
873 if (dev_dmar != iria->dmar) {
874 if (dmar_match_verbose) {
875 printf("dmar%d matched, skipping\n",
876 dev_dmar->unit);
877 }
878 continue;
879 }
880 if (dmar_match_verbose)
881 printf("matched, instantiating RMRR context\n");
882 dmar_instantiate_ctx(iria->dmar, dev, true);
883 }
884
885 return (1);
886
887}
888
889/*
890 * Pre-create all contexts for the DMAR which have RMRR entries.
891 */
892int
893dmar_instantiate_rmrr_ctxs(struct dmar_unit *dmar)
894{
895 struct inst_rmrr_iter_args iria;
896 int error;
897
898 if (!dmar_barrier_enter(dmar, DMAR_BARRIER_RMRR))
899 return (0);
900
901 error = 0;
902 iria.dmar = dmar;
903 if (dmar_match_verbose)
904 printf("dmar%d: instantiating RMRR contexts\n", dmar->unit);
905 dmar_iterate_tbl(dmar_inst_rmrr_iter, &iria);
906 DMAR_LOCK(dmar);
907 if (!LIST_EMPTY(&dmar->contexts)) {
908 KASSERT((dmar->hw_gcmd & DMAR_GCMD_TE) == 0,
909 ("dmar%d: RMRR not handled but translation is already enabled",
910 dmar->unit));
911 error = dmar_enable_translation(dmar);
912 }
913 dmar_barrier_exit(dmar, DMAR_BARRIER_RMRR);
914 return (error);
915}
916
917#ifdef DDB
918#include <ddb/ddb.h>
919#include <ddb/db_lex.h>
920
921static void
922dmar_print_ctx_entry(const struct dmar_map_entry *entry)
923{
924 struct dmar_map_entry *l, *r;
925
926 db_printf(
927 " start %jx end %jx free_after %jx free_down %jx flags %x ",
928 entry->start, entry->end, entry->free_after, entry->free_down,
929 entry->flags);
930 db_printf("left ");
931 l = RB_LEFT(entry, rb_entry);
932 if (l == NULL)
933 db_printf("NULL ");
934 else
935 db_printf("%jx ", l->start);
936 db_printf("right ");
937 r = RB_RIGHT(entry, rb_entry);
938 if (r == NULL)
939 db_printf("NULL");
940 else
941 db_printf("%jx", r->start);
942 db_printf("\n");
943}
944
945static void
946dmar_print_ctx(struct dmar_ctx *ctx, bool show_mappings)
947{
948 struct dmar_map_entry *entry;
949
950 db_printf(
951 " @%p pci%d:%d:%d dom %d mgaw %d agaw %d pglvl %d end %jx\n"
952 " refs %d flags %x pgobj %p map_ents %u loads %lu unloads %lu\n",
953 ctx, ctx->bus, ctx->slot, ctx->func, ctx->domain, ctx->mgaw,
954 ctx->agaw, ctx->pglvl, (uintmax_t)ctx->end, ctx->refs,
955 ctx->flags, ctx->pgtbl_obj, ctx->entries_cnt, ctx->loads,
956 ctx->unloads);
957 if (!show_mappings)
958 return;
959 db_printf(" mapped:\n");
960 RB_FOREACH(entry, dmar_gas_entries_tree, &ctx->rb_root) {
961 dmar_print_ctx_entry(entry);
962 if (db_pager_quit)
963 break;
964 }
965 if (db_pager_quit)
966 return;
967 db_printf(" unloading:\n");
968 TAILQ_FOREACH(entry, &ctx->unload_entries, dmamap_link) {
969 dmar_print_ctx_entry(entry);
970 if (db_pager_quit)
971 break;
972 }
973}
974
975DB_FUNC(dmar_ctx, db_dmar_print_ctx, db_show_table, CS_OWN, NULL)
976{
977 struct dmar_unit *unit;
978 struct dmar_ctx *ctx;
979 bool show_mappings, valid;
980 int domain, bus, device, function, i, t;
981 db_expr_t radix;
982
983 valid = false;
984 radix = db_radix;
985 db_radix = 10;
986 t = db_read_token();
987 if (t == tSLASH) {
988 t = db_read_token();
989 if (t != tIDENT) {
990 db_printf("Bad modifier\n");
991 db_radix = radix;
992 db_skip_to_eol();
993 return;
994 }
995 show_mappings = strchr(db_tok_string, 'm') != NULL;
996 t = db_read_token();
922 if (dev == NULL) {
923 if (dmar_match_verbose)
924 printf("null dev\n");
925 continue;
926 }
927 dev_dmar = dmar_find(dev);
928 if (dev_dmar != iria->dmar) {
929 if (dmar_match_verbose) {
930 printf("dmar%d matched, skipping\n",
931 dev_dmar->unit);
932 }
933 continue;
934 }
935 if (dmar_match_verbose)
936 printf("matched, instantiating RMRR context\n");
937 dmar_instantiate_ctx(iria->dmar, dev, true);
938 }
939
940 return (1);
941
942}
943
944/*
945 * Pre-create all contexts for the DMAR which have RMRR entries.
946 */
947int
948dmar_instantiate_rmrr_ctxs(struct dmar_unit *dmar)
949{
950 struct inst_rmrr_iter_args iria;
951 int error;
952
953 if (!dmar_barrier_enter(dmar, DMAR_BARRIER_RMRR))
954 return (0);
955
956 error = 0;
957 iria.dmar = dmar;
958 if (dmar_match_verbose)
959 printf("dmar%d: instantiating RMRR contexts\n", dmar->unit);
960 dmar_iterate_tbl(dmar_inst_rmrr_iter, &iria);
961 DMAR_LOCK(dmar);
962 if (!LIST_EMPTY(&dmar->contexts)) {
963 KASSERT((dmar->hw_gcmd & DMAR_GCMD_TE) == 0,
964 ("dmar%d: RMRR not handled but translation is already enabled",
965 dmar->unit));
966 error = dmar_enable_translation(dmar);
967 }
968 dmar_barrier_exit(dmar, DMAR_BARRIER_RMRR);
969 return (error);
970}
971
972#ifdef DDB
973#include <ddb/ddb.h>
974#include <ddb/db_lex.h>
975
976static void
977dmar_print_ctx_entry(const struct dmar_map_entry *entry)
978{
979 struct dmar_map_entry *l, *r;
980
981 db_printf(
982 " start %jx end %jx free_after %jx free_down %jx flags %x ",
983 entry->start, entry->end, entry->free_after, entry->free_down,
984 entry->flags);
985 db_printf("left ");
986 l = RB_LEFT(entry, rb_entry);
987 if (l == NULL)
988 db_printf("NULL ");
989 else
990 db_printf("%jx ", l->start);
991 db_printf("right ");
992 r = RB_RIGHT(entry, rb_entry);
993 if (r == NULL)
994 db_printf("NULL");
995 else
996 db_printf("%jx", r->start);
997 db_printf("\n");
998}
999
1000static void
1001dmar_print_ctx(struct dmar_ctx *ctx, bool show_mappings)
1002{
1003 struct dmar_map_entry *entry;
1004
1005 db_printf(
1006 " @%p pci%d:%d:%d dom %d mgaw %d agaw %d pglvl %d end %jx\n"
1007 " refs %d flags %x pgobj %p map_ents %u loads %lu unloads %lu\n",
1008 ctx, ctx->bus, ctx->slot, ctx->func, ctx->domain, ctx->mgaw,
1009 ctx->agaw, ctx->pglvl, (uintmax_t)ctx->end, ctx->refs,
1010 ctx->flags, ctx->pgtbl_obj, ctx->entries_cnt, ctx->loads,
1011 ctx->unloads);
1012 if (!show_mappings)
1013 return;
1014 db_printf(" mapped:\n");
1015 RB_FOREACH(entry, dmar_gas_entries_tree, &ctx->rb_root) {
1016 dmar_print_ctx_entry(entry);
1017 if (db_pager_quit)
1018 break;
1019 }
1020 if (db_pager_quit)
1021 return;
1022 db_printf(" unloading:\n");
1023 TAILQ_FOREACH(entry, &ctx->unload_entries, dmamap_link) {
1024 dmar_print_ctx_entry(entry);
1025 if (db_pager_quit)
1026 break;
1027 }
1028}
1029
1030DB_FUNC(dmar_ctx, db_dmar_print_ctx, db_show_table, CS_OWN, NULL)
1031{
1032 struct dmar_unit *unit;
1033 struct dmar_ctx *ctx;
1034 bool show_mappings, valid;
1035 int domain, bus, device, function, i, t;
1036 db_expr_t radix;
1037
1038 valid = false;
1039 radix = db_radix;
1040 db_radix = 10;
1041 t = db_read_token();
1042 if (t == tSLASH) {
1043 t = db_read_token();
1044 if (t != tIDENT) {
1045 db_printf("Bad modifier\n");
1046 db_radix = radix;
1047 db_skip_to_eol();
1048 return;
1049 }
1050 show_mappings = strchr(db_tok_string, 'm') != NULL;
1051 t = db_read_token();
1052 } else {
1053 show_mappings = false;
997 }
998 if (t == tNUMBER) {
999 domain = db_tok_number;
1000 t = db_read_token();
1001 if (t == tNUMBER) {
1002 bus = db_tok_number;
1003 t = db_read_token();
1004 if (t == tNUMBER) {
1005 device = db_tok_number;
1006 t = db_read_token();
1007 if (t == tNUMBER) {
1008 function = db_tok_number;
1009 valid = true;
1010 }
1011 }
1012 }
1013 }
1014 db_radix = radix;
1015 db_skip_to_eol();
1016 if (!valid) {
1017 db_printf("usage: show dmar_ctx [/m] "
1018 "<domain> <bus> <device> <func>\n");
1019 return;
1020 }
1021 for (i = 0; i < dmar_devcnt; i++) {
1022 unit = device_get_softc(dmar_devs[i]);
1023 LIST_FOREACH(ctx, &unit->contexts, link) {
1024 if (domain == unit->segment && bus == ctx->bus &&
1025 device == ctx->slot && function == ctx->func) {
1026 dmar_print_ctx(ctx, show_mappings);
1027 goto out;
1028 }
1029 }
1030 }
1031out:;
1032}
1033
1034static void
1035dmar_print_one(int idx, bool show_ctxs, bool show_mappings)
1036{
1037 struct dmar_unit *unit;
1038 struct dmar_ctx *ctx;
1039 int i, frir;
1040
1041 unit = device_get_softc(dmar_devs[idx]);
1042 db_printf("dmar%d at %p, root at 0x%jx, ver 0x%x\n", unit->unit, unit,
1043 dmar_read8(unit, DMAR_RTADDR_REG), dmar_read4(unit, DMAR_VER_REG));
1044 db_printf("cap 0x%jx ecap 0x%jx gsts 0x%x fsts 0x%x fectl 0x%x\n",
1045 (uintmax_t)dmar_read8(unit, DMAR_CAP_REG),
1046 (uintmax_t)dmar_read8(unit, DMAR_ECAP_REG),
1047 dmar_read4(unit, DMAR_GSTS_REG),
1048 dmar_read4(unit, DMAR_FSTS_REG),
1049 dmar_read4(unit, DMAR_FECTL_REG));
1050 db_printf("fed 0x%x fea 0x%x feua 0x%x\n",
1051 dmar_read4(unit, DMAR_FEDATA_REG),
1052 dmar_read4(unit, DMAR_FEADDR_REG),
1053 dmar_read4(unit, DMAR_FEUADDR_REG));
1054 db_printf("primary fault log:\n");
1055 for (i = 0; i < DMAR_CAP_NFR(unit->hw_cap); i++) {
1056 frir = (DMAR_CAP_FRO(unit->hw_cap) + i) * 16;
1057 db_printf(" %d at 0x%x: %jx %jx\n", i, frir,
1058 (uintmax_t)dmar_read8(unit, frir),
1059 (uintmax_t)dmar_read8(unit, frir + 8));
1060 }
1054 }
1055 if (t == tNUMBER) {
1056 domain = db_tok_number;
1057 t = db_read_token();
1058 if (t == tNUMBER) {
1059 bus = db_tok_number;
1060 t = db_read_token();
1061 if (t == tNUMBER) {
1062 device = db_tok_number;
1063 t = db_read_token();
1064 if (t == tNUMBER) {
1065 function = db_tok_number;
1066 valid = true;
1067 }
1068 }
1069 }
1070 }
1071 db_radix = radix;
1072 db_skip_to_eol();
1073 if (!valid) {
1074 db_printf("usage: show dmar_ctx [/m] "
1075 "<domain> <bus> <device> <func>\n");
1076 return;
1077 }
1078 for (i = 0; i < dmar_devcnt; i++) {
1079 unit = device_get_softc(dmar_devs[i]);
1080 LIST_FOREACH(ctx, &unit->contexts, link) {
1081 if (domain == unit->segment && bus == ctx->bus &&
1082 device == ctx->slot && function == ctx->func) {
1083 dmar_print_ctx(ctx, show_mappings);
1084 goto out;
1085 }
1086 }
1087 }
1088out:;
1089}
1090
1091static void
1092dmar_print_one(int idx, bool show_ctxs, bool show_mappings)
1093{
1094 struct dmar_unit *unit;
1095 struct dmar_ctx *ctx;
1096 int i, frir;
1097
1098 unit = device_get_softc(dmar_devs[idx]);
1099 db_printf("dmar%d at %p, root at 0x%jx, ver 0x%x\n", unit->unit, unit,
1100 dmar_read8(unit, DMAR_RTADDR_REG), dmar_read4(unit, DMAR_VER_REG));
1101 db_printf("cap 0x%jx ecap 0x%jx gsts 0x%x fsts 0x%x fectl 0x%x\n",
1102 (uintmax_t)dmar_read8(unit, DMAR_CAP_REG),
1103 (uintmax_t)dmar_read8(unit, DMAR_ECAP_REG),
1104 dmar_read4(unit, DMAR_GSTS_REG),
1105 dmar_read4(unit, DMAR_FSTS_REG),
1106 dmar_read4(unit, DMAR_FECTL_REG));
1107 db_printf("fed 0x%x fea 0x%x feua 0x%x\n",
1108 dmar_read4(unit, DMAR_FEDATA_REG),
1109 dmar_read4(unit, DMAR_FEADDR_REG),
1110 dmar_read4(unit, DMAR_FEUADDR_REG));
1111 db_printf("primary fault log:\n");
1112 for (i = 0; i < DMAR_CAP_NFR(unit->hw_cap); i++) {
1113 frir = (DMAR_CAP_FRO(unit->hw_cap) + i) * 16;
1114 db_printf(" %d at 0x%x: %jx %jx\n", i, frir,
1115 (uintmax_t)dmar_read8(unit, frir),
1116 (uintmax_t)dmar_read8(unit, frir + 8));
1117 }
1118 if (DMAR_HAS_QI(unit)) {
1119 db_printf("ied 0x%x iea 0x%x ieua 0x%x\n",
1120 dmar_read4(unit, DMAR_IEDATA_REG),
1121 dmar_read4(unit, DMAR_IEADDR_REG),
1122 dmar_read4(unit, DMAR_IEUADDR_REG));
1123 if (unit->qi_enabled) {
1124 db_printf("qi is enabled: queue @0x%jx (IQA 0x%jx) "
1125 "size 0x%jx\n"
1126 " head 0x%x tail 0x%x avail 0x%x status 0x%x ctrl 0x%x\n"
1127 " hw compl 0x%x@%p/phys@%jx next seq 0x%x gen 0x%x\n",
1128 (uintmax_t)unit->inv_queue,
1129 (uintmax_t)dmar_read8(unit, DMAR_IQA_REG),
1130 (uintmax_t)unit->inv_queue_size,
1131 dmar_read4(unit, DMAR_IQH_REG),
1132 dmar_read4(unit, DMAR_IQT_REG),
1133 unit->inv_queue_avail,
1134 dmar_read4(unit, DMAR_ICS_REG),
1135 dmar_read4(unit, DMAR_IECTL_REG),
1136 unit->inv_waitd_seq_hw,
1137 &unit->inv_waitd_seq_hw,
1138 (uintmax_t)unit->inv_waitd_seq_hw_phys,
1139 unit->inv_waitd_seq,
1140 unit->inv_waitd_gen);
1141 } else {
1142 db_printf("qi is disabled\n");
1143 }
1144 }
1061 if (show_ctxs) {
1062 db_printf("contexts:\n");
1063 LIST_FOREACH(ctx, &unit->contexts, link) {
1064 dmar_print_ctx(ctx, show_mappings);
1065 if (db_pager_quit)
1066 break;
1067 }
1068 }
1069}
1070
1071DB_SHOW_COMMAND(dmar, db_dmar_print)
1072{
1073 bool show_ctxs, show_mappings;
1074
1075 show_ctxs = strchr(modif, 'c') != NULL;
1076 show_mappings = strchr(modif, 'm') != NULL;
1077 if (!have_addr) {
1078 db_printf("usage: show dmar [/c] [/m] index\n");
1079 return;
1080 }
1081 dmar_print_one((int)addr, show_ctxs, show_mappings);
1082}
1083
1084DB_SHOW_ALL_COMMAND(dmars, db_show_all_dmars)
1085{
1086 int i;
1087 bool show_ctxs, show_mappings;
1088
1089 show_ctxs = strchr(modif, 'c') != NULL;
1090 show_mappings = strchr(modif, 'm') != NULL;
1091
1092 for (i = 0; i < dmar_devcnt; i++) {
1093 dmar_print_one(i, show_ctxs, show_mappings);
1094 if (db_pager_quit)
1095 break;
1096 }
1097}
1098#endif
1145 if (show_ctxs) {
1146 db_printf("contexts:\n");
1147 LIST_FOREACH(ctx, &unit->contexts, link) {
1148 dmar_print_ctx(ctx, show_mappings);
1149 if (db_pager_quit)
1150 break;
1151 }
1152 }
1153}
1154
1155DB_SHOW_COMMAND(dmar, db_dmar_print)
1156{
1157 bool show_ctxs, show_mappings;
1158
1159 show_ctxs = strchr(modif, 'c') != NULL;
1160 show_mappings = strchr(modif, 'm') != NULL;
1161 if (!have_addr) {
1162 db_printf("usage: show dmar [/c] [/m] index\n");
1163 return;
1164 }
1165 dmar_print_one((int)addr, show_ctxs, show_mappings);
1166}
1167
1168DB_SHOW_ALL_COMMAND(dmars, db_show_all_dmars)
1169{
1170 int i;
1171 bool show_ctxs, show_mappings;
1172
1173 show_ctxs = strchr(modif, 'c') != NULL;
1174 show_mappings = strchr(modif, 'm') != NULL;
1175
1176 for (i = 0; i < dmar_devcnt; i++) {
1177 dmar_print_one(i, show_ctxs, show_mappings);
1178 if (db_pager_quit)
1179 break;
1180 }
1181}
1182#endif