intel_dmar.h revision 302408
1/*-
2 * Copyright (c) 2013-2015 The FreeBSD Foundation
3 * All rights reserved.
4 *
5 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
6 * under sponsorship from the FreeBSD Foundation.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: stable/11/sys/x86/iommu/intel_dmar.h 298144 2016-04-17 10:56:56Z kib $
30 */
31
32#ifndef __X86_IOMMU_INTEL_DMAR_H
33#define	__X86_IOMMU_INTEL_DMAR_H
34
35/* Host or physical memory address, after translation. */
36typedef uint64_t dmar_haddr_t;
37/* Guest or bus address, before translation. */
38typedef uint64_t dmar_gaddr_t;
39
40struct dmar_qi_genseq {
41	u_int gen;
42	uint32_t seq;
43};
44
45struct dmar_map_entry {
46	dmar_gaddr_t start;
47	dmar_gaddr_t end;
48	dmar_gaddr_t free_after;	/* Free space after the entry */
49	dmar_gaddr_t free_down;		/* Max free space below the
50					   current R/B tree node */
51	u_int flags;
52	TAILQ_ENTRY(dmar_map_entry) dmamap_link; /* Link for dmamap entries */
53	RB_ENTRY(dmar_map_entry) rb_entry;	 /* Links for domain entries */
54	TAILQ_ENTRY(dmar_map_entry) unroll_link; /* Link for unroll after
55						    dmamap_load failure */
56	struct dmar_domain *domain;
57	struct dmar_qi_genseq gseq;
58};
59
60RB_HEAD(dmar_gas_entries_tree, dmar_map_entry);
61RB_PROTOTYPE(dmar_gas_entries_tree, dmar_map_entry, rb_entry,
62    dmar_gas_cmp_entries);
63
64#define	DMAR_MAP_ENTRY_PLACE	0x0001	/* Fake entry */
65#define	DMAR_MAP_ENTRY_RMRR	0x0002	/* Permanent, not linked by
66					   dmamap_link */
67#define	DMAR_MAP_ENTRY_MAP	0x0004	/* Busdma created, linked by
68					   dmamap_link */
69#define	DMAR_MAP_ENTRY_UNMAPPED	0x0010	/* No backing pages */
70#define	DMAR_MAP_ENTRY_QI_NF	0x0020	/* qi task, do not free entry */
71#define	DMAR_MAP_ENTRY_READ	0x1000	/* Read permitted */
72#define	DMAR_MAP_ENTRY_WRITE	0x2000	/* Write permitted */
73#define	DMAR_MAP_ENTRY_SNOOP	0x4000	/* Snoop */
74#define	DMAR_MAP_ENTRY_TM	0x8000	/* Transient */
75
76/*
77 * Locking annotations:
78 * (u) - Protected by dmar unit lock
79 * (d) - Protected by domain lock
80 * (c) - Immutable after initialization
81 */
82
83/*
84 * The domain abstraction.  Most non-constant members of the domain
85 * are locked by the owning dmar unit lock, not by the domain lock.
86 * Most important, dmar lock protects the contexts list.
87 *
88 * The domain lock protects the address map for the domain, and list
89 * of unload entries delayed.
90 *
91 * Page tables pages and pages content is protected by the vm object
92 * lock pgtbl_obj, which contains the page tables pages.
93 */
94struct dmar_domain {
95	int domain;			/* (c) DID, written in context entry */
96	int mgaw;			/* (c) Real max address width */
97	int agaw;			/* (c) Adjusted guest address width */
98	int pglvl;			/* (c) The pagelevel */
99	int awlvl;			/* (c) The pagelevel as the bitmask,
100					   to set in context entry */
101	dmar_gaddr_t end;		/* (c) Highest address + 1 in
102					   the guest AS */
103	u_int ctx_cnt;			/* (u) Number of contexts owned */
104	u_int refs;			/* (u) Refs, including ctx */
105	struct dmar_unit *dmar;		/* (c) */
106	struct mtx lock;		/* (c) */
107	LIST_ENTRY(dmar_domain) link;	/* (u) Member in the dmar list */
108	LIST_HEAD(, dmar_ctx) contexts;	/* (u) */
109	vm_object_t pgtbl_obj;		/* (c) Page table pages */
110	u_int flags;			/* (u) */
111	u_int entries_cnt;		/* (d) */
112	struct dmar_gas_entries_tree rb_root; /* (d) */
113	struct dmar_map_entries_tailq unload_entries; /* (d) Entries to
114							 unload */
115	struct dmar_map_entry *first_place, *last_place; /* (d) */
116	struct task unload_task;	/* (c) */
117	u_int batch_no;
118};
119
120struct dmar_ctx {
121	struct bus_dma_tag_dmar ctx_tag; /* (c) Root tag */
122	uint16_t rid;			/* (c) pci RID */
123	uint64_t last_fault_rec[2];	/* Last fault reported */
124	struct dmar_domain *domain;	/* (c) */
125	LIST_ENTRY(dmar_ctx) link;	/* (u) Member in the domain list */
126	u_int refs;			/* (u) References from tags */
127	u_int flags;			/* (u) */
128	u_long loads;			/* atomic updates, for stat only */
129	u_long unloads;			/* same */
130};
131
132#define	DMAR_DOMAIN_GAS_INITED		0x0001
133#define	DMAR_DOMAIN_PGTBL_INITED	0x0002
134#define	DMAR_DOMAIN_IDMAP		0x0010	/* Domain uses identity
135						   page table */
136#define	DMAR_DOMAIN_RMRR		0x0020	/* Domain contains RMRR entry,
137						   cannot be turned off */
138
139/* struct dmar_ctx flags */
140#define	DMAR_CTX_FAULTED	0x0001	/* Fault was reported,
141					   last_fault_rec is valid */
142#define	DMAR_CTX_DISABLED	0x0002	/* Device is disabled, the
143					   ephemeral reference is kept
144					   to prevent context destruction */
145
146#define	DMAR_DOMAIN_PGLOCK(dom)		VM_OBJECT_WLOCK((dom)->pgtbl_obj)
147#define	DMAR_DOMAIN_PGTRYLOCK(dom)	VM_OBJECT_TRYWLOCK((dom)->pgtbl_obj)
148#define	DMAR_DOMAIN_PGUNLOCK(dom)	VM_OBJECT_WUNLOCK((dom)->pgtbl_obj)
149#define	DMAR_DOMAIN_ASSERT_PGLOCKED(dom) \
150	VM_OBJECT_ASSERT_WLOCKED((dom)->pgtbl_obj)
151
152#define	DMAR_DOMAIN_LOCK(dom)	mtx_lock(&(dom)->lock)
153#define	DMAR_DOMAIN_UNLOCK(dom)	mtx_unlock(&(dom)->lock)
154#define	DMAR_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->lock, MA_OWNED)
155
156struct dmar_msi_data {
157	int irq;
158	int irq_rid;
159	struct resource *irq_res;
160	void *intr_handle;
161	int (*handler)(void *);
162	int msi_data_reg;
163	int msi_addr_reg;
164	int msi_uaddr_reg;
165	void (*enable_intr)(struct dmar_unit *);
166	void (*disable_intr)(struct dmar_unit *);
167	const char *name;
168};
169
170#define	DMAR_INTR_FAULT		0
171#define	DMAR_INTR_QI		1
172#define	DMAR_INTR_TOTAL		2
173
174struct dmar_unit {
175	device_t dev;
176	int unit;
177	uint16_t segment;
178	uint64_t base;
179
180	/* Resources */
181	int reg_rid;
182	struct resource *regs;
183
184	struct dmar_msi_data intrs[DMAR_INTR_TOTAL];
185
186	/* Hardware registers cache */
187	uint32_t hw_ver;
188	uint64_t hw_cap;
189	uint64_t hw_ecap;
190	uint32_t hw_gcmd;
191
192	/* Data for being a dmar */
193	struct mtx lock;
194	LIST_HEAD(, dmar_domain) domains;
195	struct unrhdr *domids;
196	vm_object_t ctx_obj;
197	u_int barrier_flags;
198
199	/* Fault handler data */
200	struct mtx fault_lock;
201	uint64_t *fault_log;
202	int fault_log_head;
203	int fault_log_tail;
204	int fault_log_size;
205	struct task fault_task;
206	struct taskqueue *fault_taskqueue;
207
208	/* QI */
209	int qi_enabled;
210	vm_offset_t inv_queue;
211	vm_size_t inv_queue_size;
212	uint32_t inv_queue_avail;
213	uint32_t inv_queue_tail;
214	volatile uint32_t inv_waitd_seq_hw; /* hw writes there on wait
215					       descr completion */
216	uint64_t inv_waitd_seq_hw_phys;
217	uint32_t inv_waitd_seq; /* next sequence number to use for wait descr */
218	u_int inv_waitd_gen;	/* seq number generation AKA seq overflows */
219	u_int inv_seq_waiters;	/* count of waiters for seq */
220	u_int inv_queue_full;	/* informational counter */
221
222	/* IR */
223	int ir_enabled;
224	vm_paddr_t irt_phys;
225	dmar_irte_t *irt;
226	u_int irte_cnt;
227	vmem_t *irtids;
228
229	/* Delayed freeing of map entries queue processing */
230	struct dmar_map_entries_tailq tlb_flush_entries;
231	struct task qi_task;
232	struct taskqueue *qi_taskqueue;
233
234	/* Busdma delayed map load */
235	struct task dmamap_load_task;
236	TAILQ_HEAD(, bus_dmamap_dmar) delayed_maps;
237	struct taskqueue *delayed_taskqueue;
238
239	int dma_enabled;
240};
241
242#define	DMAR_LOCK(dmar)		mtx_lock(&(dmar)->lock)
243#define	DMAR_UNLOCK(dmar)	mtx_unlock(&(dmar)->lock)
244#define	DMAR_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->lock, MA_OWNED)
245
246#define	DMAR_FAULT_LOCK(dmar)	mtx_lock_spin(&(dmar)->fault_lock)
247#define	DMAR_FAULT_UNLOCK(dmar)	mtx_unlock_spin(&(dmar)->fault_lock)
248#define	DMAR_FAULT_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->fault_lock, MA_OWNED)
249
250#define	DMAR_IS_COHERENT(dmar)	(((dmar)->hw_ecap & DMAR_ECAP_C) != 0)
251#define	DMAR_HAS_QI(dmar)	(((dmar)->hw_ecap & DMAR_ECAP_QI) != 0)
252#define	DMAR_X2APIC(dmar) \
253	(x2apic_mode && ((dmar)->hw_ecap & DMAR_ECAP_EIM) != 0)
254
255/* Barrier ids */
256#define	DMAR_BARRIER_RMRR	0
257#define	DMAR_BARRIER_USEQ	1
258
259struct dmar_unit *dmar_find(device_t dev);
260struct dmar_unit *dmar_find_hpet(device_t dev, uint16_t *rid);
261struct dmar_unit *dmar_find_ioapic(u_int apic_id, uint16_t *rid);
262
263u_int dmar_nd2mask(u_int nd);
264bool dmar_pglvl_supported(struct dmar_unit *unit, int pglvl);
265int domain_set_agaw(struct dmar_domain *domain, int mgaw);
266int dmar_maxaddr2mgaw(struct dmar_unit *unit, dmar_gaddr_t maxaddr,
267    bool allow_less);
268vm_pindex_t pglvl_max_pages(int pglvl);
269int domain_is_sp_lvl(struct dmar_domain *domain, int lvl);
270dmar_gaddr_t pglvl_page_size(int total_pglvl, int lvl);
271dmar_gaddr_t domain_page_size(struct dmar_domain *domain, int lvl);
272int calc_am(struct dmar_unit *unit, dmar_gaddr_t base, dmar_gaddr_t size,
273    dmar_gaddr_t *isizep);
274struct vm_page *dmar_pgalloc(vm_object_t obj, vm_pindex_t idx, int flags);
275void dmar_pgfree(vm_object_t obj, vm_pindex_t idx, int flags);
276void *dmar_map_pgtbl(vm_object_t obj, vm_pindex_t idx, int flags,
277    struct sf_buf **sf);
278void dmar_unmap_pgtbl(struct sf_buf *sf);
279int dmar_load_root_entry_ptr(struct dmar_unit *unit);
280int dmar_inv_ctx_glob(struct dmar_unit *unit);
281int dmar_inv_iotlb_glob(struct dmar_unit *unit);
282int dmar_flush_write_bufs(struct dmar_unit *unit);
283void dmar_flush_pte_to_ram(struct dmar_unit *unit, dmar_pte_t *dst);
284void dmar_flush_ctx_to_ram(struct dmar_unit *unit, dmar_ctx_entry_t *dst);
285void dmar_flush_root_to_ram(struct dmar_unit *unit, dmar_root_entry_t *dst);
286int dmar_enable_translation(struct dmar_unit *unit);
287int dmar_disable_translation(struct dmar_unit *unit);
288int dmar_load_irt_ptr(struct dmar_unit *unit);
289int dmar_enable_ir(struct dmar_unit *unit);
290int dmar_disable_ir(struct dmar_unit *unit);
291bool dmar_barrier_enter(struct dmar_unit *dmar, u_int barrier_id);
292void dmar_barrier_exit(struct dmar_unit *dmar, u_int barrier_id);
293
294int dmar_fault_intr(void *arg);
295void dmar_enable_fault_intr(struct dmar_unit *unit);
296void dmar_disable_fault_intr(struct dmar_unit *unit);
297int dmar_init_fault_log(struct dmar_unit *unit);
298void dmar_fini_fault_log(struct dmar_unit *unit);
299
300int dmar_qi_intr(void *arg);
301void dmar_enable_qi_intr(struct dmar_unit *unit);
302void dmar_disable_qi_intr(struct dmar_unit *unit);
303int dmar_init_qi(struct dmar_unit *unit);
304void dmar_fini_qi(struct dmar_unit *unit);
305void dmar_qi_invalidate_locked(struct dmar_domain *domain, dmar_gaddr_t start,
306    dmar_gaddr_t size, struct dmar_qi_genseq *pseq);
307void dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit);
308void dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit);
309void dmar_qi_invalidate_iec_glob(struct dmar_unit *unit);
310void dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt);
311
312vm_object_t domain_get_idmap_pgtbl(struct dmar_domain *domain,
313    dmar_gaddr_t maxaddr);
314void put_idmap_pgtbl(vm_object_t obj);
315int domain_map_buf(struct dmar_domain *domain, dmar_gaddr_t base,
316    dmar_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags);
317int domain_unmap_buf(struct dmar_domain *domain, dmar_gaddr_t base,
318    dmar_gaddr_t size, int flags);
319void domain_flush_iotlb_sync(struct dmar_domain *domain, dmar_gaddr_t base,
320    dmar_gaddr_t size);
321int domain_alloc_pgtbl(struct dmar_domain *domain);
322void domain_free_pgtbl(struct dmar_domain *domain);
323
324struct dmar_ctx *dmar_instantiate_ctx(struct dmar_unit *dmar, device_t dev,
325    bool rmrr);
326struct dmar_ctx *dmar_get_ctx_for_dev(struct dmar_unit *dmar, device_t dev,
327    uint16_t rid, bool id_mapped, bool rmrr_init);
328int dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx);
329void dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx);
330void dmar_free_ctx(struct dmar_ctx *ctx);
331struct dmar_ctx *dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid);
332void dmar_domain_unload_entry(struct dmar_map_entry *entry, bool free);
333void dmar_domain_unload(struct dmar_domain *domain,
334    struct dmar_map_entries_tailq *entries, bool cansleep);
335void dmar_domain_free_entry(struct dmar_map_entry *entry, bool free);
336
337int dmar_init_busdma(struct dmar_unit *unit);
338void dmar_fini_busdma(struct dmar_unit *unit);
339device_t dmar_get_requester(device_t dev, uint16_t *rid);
340
341void dmar_gas_init_domain(struct dmar_domain *domain);
342void dmar_gas_fini_domain(struct dmar_domain *domain);
343struct dmar_map_entry *dmar_gas_alloc_entry(struct dmar_domain *domain,
344    u_int flags);
345void dmar_gas_free_entry(struct dmar_domain *domain,
346    struct dmar_map_entry *entry);
347void dmar_gas_free_space(struct dmar_domain *domain,
348    struct dmar_map_entry *entry);
349int dmar_gas_map(struct dmar_domain *domain,
350    const struct bus_dma_tag_common *common, dmar_gaddr_t size, int offset,
351    u_int eflags, u_int flags, vm_page_t *ma, struct dmar_map_entry **res);
352void dmar_gas_free_region(struct dmar_domain *domain,
353    struct dmar_map_entry *entry);
354int dmar_gas_map_region(struct dmar_domain *domain,
355    struct dmar_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma);
356int dmar_gas_reserve_region(struct dmar_domain *domain, dmar_gaddr_t start,
357    dmar_gaddr_t end);
358
359void dmar_dev_parse_rmrr(struct dmar_domain *domain, device_t dev,
360    struct dmar_map_entries_tailq *rmrr_entries);
361int dmar_instantiate_rmrr_ctxs(struct dmar_unit *dmar);
362
363void dmar_quirks_post_ident(struct dmar_unit *dmar);
364void dmar_quirks_pre_use(struct dmar_unit *dmar);
365
366int dmar_init_irt(struct dmar_unit *unit);
367void dmar_fini_irt(struct dmar_unit *unit);
368
369#define	DMAR_GM_CANWAIT	0x0001
370#define	DMAR_GM_CANSPLIT 0x0002
371
372#define	DMAR_PGF_WAITOK	0x0001
373#define	DMAR_PGF_ZERO	0x0002
374#define	DMAR_PGF_ALLOC	0x0004
375#define	DMAR_PGF_NOALLOC 0x0008
376#define	DMAR_PGF_OBJL	0x0010
377
378extern dmar_haddr_t dmar_high;
379extern int haw;
380extern int dmar_tbl_pagecnt;
381extern int dmar_match_verbose;
382extern int dmar_batch_coalesce;
383extern int dmar_check_free;
384
385static inline uint32_t
386dmar_read4(const struct dmar_unit *unit, int reg)
387{
388
389	return (bus_read_4(unit->regs, reg));
390}
391
392static inline uint64_t
393dmar_read8(const struct dmar_unit *unit, int reg)
394{
395#ifdef __i386__
396	uint32_t high, low;
397
398	low = bus_read_4(unit->regs, reg);
399	high = bus_read_4(unit->regs, reg + 4);
400	return (low | ((uint64_t)high << 32));
401#else
402	return (bus_read_8(unit->regs, reg));
403#endif
404}
405
406static inline void
407dmar_write4(const struct dmar_unit *unit, int reg, uint32_t val)
408{
409
410	KASSERT(reg != DMAR_GCMD_REG || (val & DMAR_GCMD_TE) ==
411	    (unit->hw_gcmd & DMAR_GCMD_TE),
412	    ("dmar%d clearing TE 0x%08x 0x%08x", unit->unit,
413	    unit->hw_gcmd, val));
414	bus_write_4(unit->regs, reg, val);
415}
416
417static inline void
418dmar_write8(const struct dmar_unit *unit, int reg, uint64_t val)
419{
420
421	KASSERT(reg != DMAR_GCMD_REG, ("8byte GCMD write"));
422#ifdef __i386__
423	uint32_t high, low;
424
425	low = val;
426	high = val >> 32;
427	bus_write_4(unit->regs, reg, low);
428	bus_write_4(unit->regs, reg + 4, high);
429#else
430	bus_write_8(unit->regs, reg, val);
431#endif
432}
433
434/*
435 * dmar_pte_store and dmar_pte_clear ensure that on i386, 32bit writes
436 * are issued in the correct order.  For store, the lower word,
437 * containing the P or R and W bits, is set only after the high word
438 * is written.  For clear, the P bit is cleared first, then the high
439 * word is cleared.
440 *
441 * dmar_pte_update updates the pte.  For amd64, the update is atomic.
442 * For i386, it first disables the entry by clearing the word
443 * containing the P bit, and then defer to dmar_pte_store.  The locked
444 * cmpxchg8b is probably available on any machine having DMAR support,
445 * but interrupt translation table may be mapped uncached.
446 */
447static inline void
448dmar_pte_store1(volatile uint64_t *dst, uint64_t val)
449{
450#ifdef __i386__
451	volatile uint32_t *p;
452	uint32_t hi, lo;
453
454	hi = val >> 32;
455	lo = val;
456	p = (volatile uint32_t *)dst;
457	*(p + 1) = hi;
458	*p = lo;
459#else
460	*dst = val;
461#endif
462}
463
464static inline void
465dmar_pte_store(volatile uint64_t *dst, uint64_t val)
466{
467
468	KASSERT(*dst == 0, ("used pte %p oldval %jx newval %jx",
469	    dst, (uintmax_t)*dst, (uintmax_t)val));
470	dmar_pte_store1(dst, val);
471}
472
473static inline void
474dmar_pte_update(volatile uint64_t *dst, uint64_t val)
475{
476
477#ifdef __i386__
478	volatile uint32_t *p;
479
480	p = (volatile uint32_t *)dst;
481	*p = 0;
482#endif
483	dmar_pte_store1(dst, val);
484}
485
486static inline void
487dmar_pte_clear(volatile uint64_t *dst)
488{
489#ifdef __i386__
490	volatile uint32_t *p;
491
492	p = (volatile uint32_t *)dst;
493	*p = 0;
494	*(p + 1) = 0;
495#else
496	*dst = 0;
497#endif
498}
499
500static inline bool
501dmar_test_boundary(dmar_gaddr_t start, dmar_gaddr_t size,
502    dmar_gaddr_t boundary)
503{
504
505	if (boundary == 0)
506		return (true);
507	return (start + size <= ((start + boundary) & ~(boundary - 1)));
508}
509
510#ifdef INVARIANTS
511#define	TD_PREP_PINNED_ASSERT						\
512	int old_td_pinned;						\
513	old_td_pinned = curthread->td_pinned
514#define	TD_PINNED_ASSERT						\
515	KASSERT(curthread->td_pinned == old_td_pinned,			\
516	    ("pin count leak: %d %d %s:%d", curthread->td_pinned,	\
517	    old_td_pinned, __FILE__, __LINE__))
518#else
519#define	TD_PREP_PINNED_ASSERT
520#define	TD_PINNED_ASSERT
521#endif
522
523#endif
524