Deleted Added
full compact
intel_dmar.h (277023) intel_dmar.h (280260)
1/*-
1/*-
2 * Copyright (c) 2013 The FreeBSD Foundation
2 * Copyright (c) 2013-2015 The FreeBSD Foundation
3 * All rights reserved.
4 *
5 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
6 * under sponsorship from the FreeBSD Foundation.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:

--- 10 unchanged lines hidden (view full) ---

21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
3 * All rights reserved.
4 *
5 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
6 * under sponsorship from the FreeBSD Foundation.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:

--- 10 unchanged lines hidden (view full) ---

21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: head/sys/x86/iommu/intel_dmar.h 277023 2015-01-11 20:27:15Z kib $
29 * $FreeBSD: head/sys/x86/iommu/intel_dmar.h 280260 2015-03-19 13:57:47Z kib $
30 */
31
32#ifndef __X86_IOMMU_INTEL_DMAR_H
33#define __X86_IOMMU_INTEL_DMAR_H
34
35/* Host or physical memory address, after translation. */
36typedef uint64_t dmar_haddr_t;
37/* Guest or bus address, before translation. */

--- 142 unchanged lines hidden (view full) ---

180 volatile uint32_t inv_waitd_seq_hw; /* hw writes there on wait
181 descr completion */
182 uint64_t inv_waitd_seq_hw_phys;
183 uint32_t inv_waitd_seq; /* next sequence number to use for wait descr */
184 u_int inv_waitd_gen; /* seq number generation AKA seq overflows */
185 u_int inv_seq_waiters; /* count of waiters for seq */
186 u_int inv_queue_full; /* informational counter */
187
30 */
31
32#ifndef __X86_IOMMU_INTEL_DMAR_H
33#define __X86_IOMMU_INTEL_DMAR_H
34
35/* Host or physical memory address, after translation. */
36typedef uint64_t dmar_haddr_t;
37/* Guest or bus address, before translation. */

--- 142 unchanged lines hidden (view full) ---

180 volatile uint32_t inv_waitd_seq_hw; /* hw writes there on wait
181 descr completion */
182 uint64_t inv_waitd_seq_hw_phys;
183 uint32_t inv_waitd_seq; /* next sequence number to use for wait descr */
184 u_int inv_waitd_gen; /* seq number generation AKA seq overflows */
185 u_int inv_seq_waiters; /* count of waiters for seq */
186 u_int inv_queue_full; /* informational counter */
187
188 /* IR */
189 int ir_enabled;
190 vm_paddr_t irt_phys;
191 dmar_irte_t *irt;
192 u_int irte_cnt;
193 vmem_t *irtids;
194
188 /* Delayed freeing of map entries queue processing */
189 struct dmar_map_entries_tailq tlb_flush_entries;
190 struct task qi_task;
191 struct taskqueue *qi_taskqueue;
192
193 /* Busdma delayed map load */
194 struct task dmamap_load_task;
195 TAILQ_HEAD(, bus_dmamap_dmar) delayed_maps;
196 struct taskqueue *delayed_taskqueue;
195 /* Delayed freeing of map entries queue processing */
196 struct dmar_map_entries_tailq tlb_flush_entries;
197 struct task qi_task;
198 struct taskqueue *qi_taskqueue;
199
200 /* Busdma delayed map load */
201 struct task dmamap_load_task;
202 TAILQ_HEAD(, bus_dmamap_dmar) delayed_maps;
203 struct taskqueue *delayed_taskqueue;
204
205 int dma_enabled;
197};
198
199#define DMAR_LOCK(dmar) mtx_lock(&(dmar)->lock)
200#define DMAR_UNLOCK(dmar) mtx_unlock(&(dmar)->lock)
201#define DMAR_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->lock, MA_OWNED)
202
203#define DMAR_FAULT_LOCK(dmar) mtx_lock_spin(&(dmar)->fault_lock)
204#define DMAR_FAULT_UNLOCK(dmar) mtx_unlock_spin(&(dmar)->fault_lock)
205#define DMAR_FAULT_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->fault_lock, MA_OWNED)
206
207#define DMAR_IS_COHERENT(dmar) (((dmar)->hw_ecap & DMAR_ECAP_C) != 0)
208#define DMAR_HAS_QI(dmar) (((dmar)->hw_ecap & DMAR_ECAP_QI) != 0)
206};
207
208#define DMAR_LOCK(dmar) mtx_lock(&(dmar)->lock)
209#define DMAR_UNLOCK(dmar) mtx_unlock(&(dmar)->lock)
210#define DMAR_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->lock, MA_OWNED)
211
212#define DMAR_FAULT_LOCK(dmar) mtx_lock_spin(&(dmar)->fault_lock)
213#define DMAR_FAULT_UNLOCK(dmar) mtx_unlock_spin(&(dmar)->fault_lock)
214#define DMAR_FAULT_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->fault_lock, MA_OWNED)
215
216#define DMAR_IS_COHERENT(dmar) (((dmar)->hw_ecap & DMAR_ECAP_C) != 0)
217#define DMAR_HAS_QI(dmar) (((dmar)->hw_ecap & DMAR_ECAP_QI) != 0)
218#define DMAR_X2APIC(dmar) \
219 (x2apic_mode && ((dmar)->hw_ecap & DMAR_ECAP_EIM) != 0)
209
210/* Barrier ids */
211#define DMAR_BARRIER_RMRR 0
212#define DMAR_BARRIER_USEQ 1
213
214struct dmar_unit *dmar_find(device_t dev);
220
221/* Barrier ids */
222#define DMAR_BARRIER_RMRR 0
223#define DMAR_BARRIER_USEQ 1
224
225struct dmar_unit *dmar_find(device_t dev);
226struct dmar_unit *dmar_find_hpet(device_t dev, uint16_t *rid);
227struct dmar_unit *dmar_find_ioapic(u_int apic_id, uint16_t *rid);
215
216u_int dmar_nd2mask(u_int nd);
217bool dmar_pglvl_supported(struct dmar_unit *unit, int pglvl);
218int ctx_set_agaw(struct dmar_ctx *ctx, int mgaw);
219int dmar_maxaddr2mgaw(struct dmar_unit* unit, dmar_gaddr_t maxaddr,
220 bool allow_less);
221vm_pindex_t pglvl_max_pages(int pglvl);
222int ctx_is_sp_lvl(struct dmar_ctx *ctx, int lvl);

--- 10 unchanged lines hidden (view full) ---

233int dmar_inv_ctx_glob(struct dmar_unit *unit);
234int dmar_inv_iotlb_glob(struct dmar_unit *unit);
235int dmar_flush_write_bufs(struct dmar_unit *unit);
236void dmar_flush_pte_to_ram(struct dmar_unit *unit, dmar_pte_t *dst);
237void dmar_flush_ctx_to_ram(struct dmar_unit *unit, dmar_ctx_entry_t *dst);
238void dmar_flush_root_to_ram(struct dmar_unit *unit, dmar_root_entry_t *dst);
239int dmar_enable_translation(struct dmar_unit *unit);
240int dmar_disable_translation(struct dmar_unit *unit);
228
229u_int dmar_nd2mask(u_int nd);
230bool dmar_pglvl_supported(struct dmar_unit *unit, int pglvl);
231int ctx_set_agaw(struct dmar_ctx *ctx, int mgaw);
232int dmar_maxaddr2mgaw(struct dmar_unit* unit, dmar_gaddr_t maxaddr,
233 bool allow_less);
234vm_pindex_t pglvl_max_pages(int pglvl);
235int ctx_is_sp_lvl(struct dmar_ctx *ctx, int lvl);

--- 10 unchanged lines hidden (view full) ---

246int dmar_inv_ctx_glob(struct dmar_unit *unit);
247int dmar_inv_iotlb_glob(struct dmar_unit *unit);
248int dmar_flush_write_bufs(struct dmar_unit *unit);
249void dmar_flush_pte_to_ram(struct dmar_unit *unit, dmar_pte_t *dst);
250void dmar_flush_ctx_to_ram(struct dmar_unit *unit, dmar_ctx_entry_t *dst);
251void dmar_flush_root_to_ram(struct dmar_unit *unit, dmar_root_entry_t *dst);
252int dmar_enable_translation(struct dmar_unit *unit);
253int dmar_disable_translation(struct dmar_unit *unit);
254int dmar_load_irt_ptr(struct dmar_unit *unit);
255int dmar_enable_ir(struct dmar_unit *unit);
256int dmar_disable_ir(struct dmar_unit *unit);
241bool dmar_barrier_enter(struct dmar_unit *dmar, u_int barrier_id);
242void dmar_barrier_exit(struct dmar_unit *dmar, u_int barrier_id);
243
244int dmar_fault_intr(void *arg);
245void dmar_enable_fault_intr(struct dmar_unit *unit);
246void dmar_disable_fault_intr(struct dmar_unit *unit);
247int dmar_init_fault_log(struct dmar_unit *unit);
248void dmar_fini_fault_log(struct dmar_unit *unit);
249
250int dmar_qi_intr(void *arg);
251void dmar_enable_qi_intr(struct dmar_unit *unit);
252void dmar_disable_qi_intr(struct dmar_unit *unit);
253int dmar_init_qi(struct dmar_unit *unit);
254void dmar_fini_qi(struct dmar_unit *unit);
255void dmar_qi_invalidate_locked(struct dmar_ctx *ctx, dmar_gaddr_t start,
256 dmar_gaddr_t size, struct dmar_qi_genseq *pseq);
257void dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit);
258void dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit);
257bool dmar_barrier_enter(struct dmar_unit *dmar, u_int barrier_id);
258void dmar_barrier_exit(struct dmar_unit *dmar, u_int barrier_id);
259
260int dmar_fault_intr(void *arg);
261void dmar_enable_fault_intr(struct dmar_unit *unit);
262void dmar_disable_fault_intr(struct dmar_unit *unit);
263int dmar_init_fault_log(struct dmar_unit *unit);
264void dmar_fini_fault_log(struct dmar_unit *unit);
265
266int dmar_qi_intr(void *arg);
267void dmar_enable_qi_intr(struct dmar_unit *unit);
268void dmar_disable_qi_intr(struct dmar_unit *unit);
269int dmar_init_qi(struct dmar_unit *unit);
270void dmar_fini_qi(struct dmar_unit *unit);
271void dmar_qi_invalidate_locked(struct dmar_ctx *ctx, dmar_gaddr_t start,
272 dmar_gaddr_t size, struct dmar_qi_genseq *pseq);
273void dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit);
274void dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit);
275void dmar_qi_invalidate_iec_glob(struct dmar_unit *unit);
276void dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt);
259
260vm_object_t ctx_get_idmap_pgtbl(struct dmar_ctx *ctx, dmar_gaddr_t maxaddr);
261void put_idmap_pgtbl(vm_object_t obj);
262int ctx_map_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
263 vm_page_t *ma, uint64_t pflags, int flags);
264int ctx_unmap_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
265 int flags);
266void ctx_flush_iotlb_sync(struct dmar_ctx *ctx, dmar_gaddr_t base,

--- 10 unchanged lines hidden (view full) ---

277struct dmar_ctx *dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid);
278void dmar_ctx_unload_entry(struct dmar_map_entry *entry, bool free);
279void dmar_ctx_unload(struct dmar_ctx *ctx,
280 struct dmar_map_entries_tailq *entries, bool cansleep);
281void dmar_ctx_free_entry(struct dmar_map_entry *entry, bool free);
282
283int dmar_init_busdma(struct dmar_unit *unit);
284void dmar_fini_busdma(struct dmar_unit *unit);
277
278vm_object_t ctx_get_idmap_pgtbl(struct dmar_ctx *ctx, dmar_gaddr_t maxaddr);
279void put_idmap_pgtbl(vm_object_t obj);
280int ctx_map_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
281 vm_page_t *ma, uint64_t pflags, int flags);
282int ctx_unmap_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
283 int flags);
284void ctx_flush_iotlb_sync(struct dmar_ctx *ctx, dmar_gaddr_t base,

--- 10 unchanged lines hidden (view full) ---

295struct dmar_ctx *dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid);
296void dmar_ctx_unload_entry(struct dmar_map_entry *entry, bool free);
297void dmar_ctx_unload(struct dmar_ctx *ctx,
298 struct dmar_map_entries_tailq *entries, bool cansleep);
299void dmar_ctx_free_entry(struct dmar_map_entry *entry, bool free);
300
301int dmar_init_busdma(struct dmar_unit *unit);
302void dmar_fini_busdma(struct dmar_unit *unit);
303device_t dmar_get_requester(device_t dev, uint16_t *rid);
285
286void dmar_gas_init_ctx(struct dmar_ctx *ctx);
287void dmar_gas_fini_ctx(struct dmar_ctx *ctx);
288struct dmar_map_entry *dmar_gas_alloc_entry(struct dmar_ctx *ctx, u_int flags);
289void dmar_gas_free_entry(struct dmar_ctx *ctx, struct dmar_map_entry *entry);
290void dmar_gas_free_space(struct dmar_ctx *ctx, struct dmar_map_entry *entry);
291int dmar_gas_map(struct dmar_ctx *ctx, const struct bus_dma_tag_common *common,
292 dmar_gaddr_t size, u_int eflags, u_int flags, vm_page_t *ma,

--- 6 unchanged lines hidden (view full) ---

299
300void dmar_ctx_parse_rmrr(struct dmar_ctx *ctx, device_t dev,
301 struct dmar_map_entries_tailq *rmrr_entries);
302int dmar_instantiate_rmrr_ctxs(struct dmar_unit *dmar);
303
304void dmar_quirks_post_ident(struct dmar_unit *dmar);
305void dmar_quirks_pre_use(struct dmar_unit *dmar);
306
304
305void dmar_gas_init_ctx(struct dmar_ctx *ctx);
306void dmar_gas_fini_ctx(struct dmar_ctx *ctx);
307struct dmar_map_entry *dmar_gas_alloc_entry(struct dmar_ctx *ctx, u_int flags);
308void dmar_gas_free_entry(struct dmar_ctx *ctx, struct dmar_map_entry *entry);
309void dmar_gas_free_space(struct dmar_ctx *ctx, struct dmar_map_entry *entry);
310int dmar_gas_map(struct dmar_ctx *ctx, const struct bus_dma_tag_common *common,
311 dmar_gaddr_t size, u_int eflags, u_int flags, vm_page_t *ma,

--- 6 unchanged lines hidden (view full) ---

318
319void dmar_ctx_parse_rmrr(struct dmar_ctx *ctx, device_t dev,
320 struct dmar_map_entries_tailq *rmrr_entries);
321int dmar_instantiate_rmrr_ctxs(struct dmar_unit *dmar);
322
323void dmar_quirks_post_ident(struct dmar_unit *dmar);
324void dmar_quirks_pre_use(struct dmar_unit *dmar);
325
326int dmar_init_irt(struct dmar_unit *unit);
327void dmar_fini_irt(struct dmar_unit *unit);
328
307#define DMAR_GM_CANWAIT 0x0001
308#define DMAR_GM_CANSPLIT 0x0002
309
310#define DMAR_PGF_WAITOK 0x0001
311#define DMAR_PGF_ZERO 0x0002
312#define DMAR_PGF_ALLOC 0x0004
313#define DMAR_PGF_NOALLOC 0x0008
314#define DMAR_PGF_OBJL 0x0010

--- 54 unchanged lines hidden (view full) ---

369}
370
371/*
372 * dmar_pte_store and dmar_pte_clear ensure that on i386, 32bit writes
373 * are issued in the correct order. For store, the lower word,
374 * containing the P or R and W bits, is set only after the high word
375 * is written. For clear, the P bit is cleared first, then the high
376 * word is cleared.
329#define DMAR_GM_CANWAIT 0x0001
330#define DMAR_GM_CANSPLIT 0x0002
331
332#define DMAR_PGF_WAITOK 0x0001
333#define DMAR_PGF_ZERO 0x0002
334#define DMAR_PGF_ALLOC 0x0004
335#define DMAR_PGF_NOALLOC 0x0008
336#define DMAR_PGF_OBJL 0x0010

--- 54 unchanged lines hidden (view full) ---

391}
392
393/*
394 * dmar_pte_store and dmar_pte_clear ensure that on i386, 32bit writes
395 * are issued in the correct order. For store, the lower word,
396 * containing the P or R and W bits, is set only after the high word
397 * is written. For clear, the P bit is cleared first, then the high
398 * word is cleared.
399 *
400 * dmar_pte_update updates the pte. For amd64, the update is atomic.
401 * For i386, it first disables the entry by clearing the word
402 * containing the P bit, and then defer to dmar_pte_store. The locked
403 * cmpxchg8b is probably available on any machine having DMAR support,
404 * but interrupt translation table may be mapped uncached.
377 */
378static inline void
405 */
406static inline void
379dmar_pte_store(volatile uint64_t *dst, uint64_t val)
407dmar_pte_store1(volatile uint64_t *dst, uint64_t val)
380{
408{
381
382 KASSERT(*dst == 0, ("used pte %p oldval %jx newval %jx",
383 dst, (uintmax_t)*dst, (uintmax_t)val));
384#ifdef __i386__
385 volatile uint32_t *p;
386 uint32_t hi, lo;
387
388 hi = val >> 32;
389 lo = val;
390 p = (volatile uint32_t *)dst;
391 *(p + 1) = hi;
392 *p = lo;
393#else
394 *dst = val;
395#endif
396}
397
398static inline void
409#ifdef __i386__
410 volatile uint32_t *p;
411 uint32_t hi, lo;
412
413 hi = val >> 32;
414 lo = val;
415 p = (volatile uint32_t *)dst;
416 *(p + 1) = hi;
417 *p = lo;
418#else
419 *dst = val;
420#endif
421}
422
423static inline void
424dmar_pte_store(volatile uint64_t *dst, uint64_t val)
425{
426
427 KASSERT(*dst == 0, ("used pte %p oldval %jx newval %jx",
428 dst, (uintmax_t)*dst, (uintmax_t)val));
429 dmar_pte_store1(dst, val);
430}
431
432static inline void
433dmar_pte_update(volatile uint64_t *dst, uint64_t val)
434{
435
436#ifdef __i386__
437 volatile uint32_t *p;
438
439 p = (volatile uint32_t *)dst;
440 *p = 0;
441#endif
442 dmar_pte_store1(dst, val);
443}
444
445static inline void
399dmar_pte_clear(volatile uint64_t *dst)
400{
401#ifdef __i386__
402 volatile uint32_t *p;
403
404 p = (volatile uint32_t *)dst;
405 *p = 0;
406 *(p + 1) = 0;

--- 29 unchanged lines hidden ---
446dmar_pte_clear(volatile uint64_t *dst)
447{
448#ifdef __i386__
449 volatile uint32_t *p;
450
451 p = (volatile uint32_t *)dst;
452 *p = 0;
453 *(p + 1) = 0;

--- 29 unchanged lines hidden ---