Searched refs:mo (Results 1 - 25 of 300) sorted by relevance

1234567891011>>

/netbsd-current/sys/kern/
H A Dkern_mutex_obj.c58 struct kmutexobj *mo; local
60 mo = kmem_intr_alloc(sizeof(*mo), KM_SLEEP);
61 KASSERT(ALIGNED_POINTER(mo, coherency_unit));
62 _mutex_init(&mo->mo_lock, type, ipl,
64 mo->mo_magic = MUTEX_OBJ_MAGIC;
65 mo->mo_refcnt = 1;
67 return (kmutex_t *)mo;
78 struct kmutexobj *mo; local
80 mo
101 struct kmutexobj *mo = (struct kmutexobj *)lock; local
122 struct kmutexobj *mo = (struct kmutexobj *)lock; local
149 struct kmutexobj *mo = (struct kmutexobj *)lock; local
[all...]
/netbsd-current/sys/external/bsd/compiler_rt/dist/include/sanitizer/
H A Dtsan_interface_atomic.h45 __tsan_memory_order mo);
47 __tsan_memory_order mo);
49 __tsan_memory_order mo);
51 __tsan_memory_order mo);
54 __tsan_memory_order mo);
58 __tsan_memory_order mo);
60 __tsan_memory_order mo);
62 __tsan_memory_order mo);
64 __tsan_memory_order mo);
67 __tsan_memory_order mo);
[all...]
/netbsd-current/external/gpl3/gcc.old/dist/libsanitizer/include/sanitizer/
H A Dtsan_interface_atomic.h43 __tsan_memory_order mo);
45 __tsan_memory_order mo);
47 __tsan_memory_order mo);
49 __tsan_memory_order mo);
52 __tsan_memory_order mo);
56 __tsan_memory_order mo);
58 __tsan_memory_order mo);
60 __tsan_memory_order mo);
62 __tsan_memory_order mo);
65 __tsan_memory_order mo);
[all...]
/netbsd-current/external/gpl3/gcc/dist/libsanitizer/include/sanitizer/
H A Dtsan_interface_atomic.h44 __tsan_memory_order mo);
46 __tsan_memory_order mo);
48 __tsan_memory_order mo);
50 __tsan_memory_order mo);
53 __tsan_memory_order mo);
57 __tsan_memory_order mo);
59 __tsan_memory_order mo);
61 __tsan_memory_order mo);
63 __tsan_memory_order mo);
66 __tsan_memory_order mo);
[all...]
/netbsd-current/sys/external/bsd/compiler_rt/dist/lib/tsan/rtl/
H A Dtsan_interface_atomic.cc36 static bool IsLoadOrder(morder mo) { argument
37 return mo == mo_relaxed || mo == mo_consume
38 || mo == mo_acquire || mo == mo_seq_cst;
41 static bool IsStoreOrder(morder mo) { argument
42 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
45 static bool IsReleaseOrder(morder mo) { argument
49 IsAcquireOrder(morder mo) argument
54 IsAcqRelOrder(morder mo) argument
197 to_mo(morder mo) argument
211 NoTsanAtomicLoad(const volatile T *a, morder mo) argument
216 NoTsanAtomicLoad(const volatile a128 *a, morder mo) argument
223 AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) argument
247 NoTsanAtomicStore(volatile T *a, T v, morder mo) argument
252 NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) argument
259 AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
282 AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
304 NoTsanAtomicExchange(volatile T *a, T v, morder mo) argument
309 NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) argument
314 NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) argument
319 NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) argument
324 NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) argument
329 NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) argument
334 NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) argument
339 AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
345 AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
351 AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
357 AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
363 AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
369 AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
375 AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
381 NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) argument
386 NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v, morder mo, morder fmo) argument
398 NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) argument
404 AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v, morder mo, morder fmo) argument
437 AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T c, T v, morder mo, morder fmo) argument
444 NoTsanAtomicFence(morder mo) argument
448 AtomicFence(ThreadState *thr, uptr pc, morder mo) argument
459 convert_morder(morder mo) argument
492 ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a, morder mo, const char *func) argument
506 AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) argument
524 __tsan_atomic8_load(const volatile a8 *a, morder mo) argument
529 __tsan_atomic16_load(const volatile a16 *a, morder mo) argument
534 __tsan_atomic32_load(const volatile a32 *a, morder mo) argument
539 __tsan_atomic64_load(const volatile a64 *a, morder mo) argument
545 __tsan_atomic128_load(const volatile a128 *a, morder mo) argument
551 __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) argument
556 __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) argument
561 __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) argument
566 __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) argument
572 __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) argument
578 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) argument
583 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) argument
588 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) argument
593 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) argument
599 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) argument
605 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) argument
610 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) argument
615 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) argument
620 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) argument
626 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) argument
632 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) argument
637 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) argument
642 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) argument
647 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) argument
653 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) argument
659 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) argument
664 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) argument
669 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) argument
674 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) argument
680 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) argument
686 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) argument
691 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) argument
696 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) argument
701 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) argument
707 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) argument
713 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) argument
718 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) argument
723 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) argument
728 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) argument
734 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) argument
740 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) argument
745 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) argument
750 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) argument
755 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) argument
761 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) argument
767 __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v, morder mo, morder fmo) argument
773 __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v, morder mo, morder fmo) argument
779 __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v, morder mo, morder fmo) argument
785 __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v, morder mo, morder fmo) argument
792 __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v, morder mo, morder fmo) argument
799 __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, morder mo, morder fmo) argument
805 __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v, morder mo, morder fmo) argument
811 __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v, morder mo, morder fmo) argument
817 __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v, morder mo, morder fmo) argument
824 __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v, morder mo, morder fmo) argument
831 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, morder mo, morder fmo) argument
837 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v, morder mo, morder fmo) argument
843 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v, morder mo, morder fmo) argument
849 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v, morder mo, morder fmo) argument
856 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v, morder mo, morder fmo) argument
863 __tsan_atomic_thread_fence(morder mo) argument
869 __tsan_atomic_signal_fence(morder mo) argument
[all...]
/netbsd-current/external/gpl3/gcc.old/dist/libsanitizer/tsan/
H A Dtsan_interface_atomic.cc34 static bool IsLoadOrder(morder mo) { argument
35 return mo == mo_relaxed || mo == mo_consume
36 || mo == mo_acquire || mo == mo_seq_cst;
39 static bool IsStoreOrder(morder mo) { argument
40 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
43 static bool IsReleaseOrder(morder mo) { argument
47 IsAcquireOrder(morder mo) argument
52 IsAcqRelOrder(morder mo) argument
195 to_mo(morder mo) argument
209 NoTsanAtomicLoad(const volatile T *a, morder mo) argument
214 NoTsanAtomicLoad(const volatile a128 *a, morder mo) argument
221 AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) argument
245 NoTsanAtomicStore(volatile T *a, T v, morder mo) argument
250 NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) argument
257 AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
280 AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
302 NoTsanAtomicExchange(volatile T *a, T v, morder mo) argument
307 NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) argument
312 NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) argument
317 NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) argument
322 NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) argument
327 NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) argument
332 NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) argument
337 AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
343 AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
349 AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
355 AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
361 AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
367 AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
373 AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
379 NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) argument
384 NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v, morder mo, morder fmo) argument
396 NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) argument
402 AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v, morder mo, morder fmo) argument
435 AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T c, T v, morder mo, morder fmo) argument
442 NoTsanAtomicFence(morder mo) argument
446 AtomicFence(ThreadState *thr, uptr pc, morder mo) argument
457 convert_morder(morder mo) argument
490 ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a, morder mo, const char *func) argument
504 AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) argument
522 __tsan_atomic8_load(const volatile a8 *a, morder mo) argument
527 __tsan_atomic16_load(const volatile a16 *a, morder mo) argument
532 __tsan_atomic32_load(const volatile a32 *a, morder mo) argument
537 __tsan_atomic64_load(const volatile a64 *a, morder mo) argument
543 __tsan_atomic128_load(const volatile a128 *a, morder mo) argument
549 __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) argument
554 __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) argument
559 __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) argument
564 __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) argument
570 __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) argument
576 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) argument
581 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) argument
586 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) argument
591 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) argument
597 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) argument
603 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) argument
608 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) argument
613 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) argument
618 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) argument
624 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) argument
630 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) argument
635 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) argument
640 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) argument
645 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) argument
651 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) argument
657 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) argument
662 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) argument
667 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) argument
672 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) argument
678 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) argument
684 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) argument
689 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) argument
694 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) argument
699 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) argument
705 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) argument
711 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) argument
716 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) argument
721 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) argument
726 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) argument
732 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) argument
738 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) argument
743 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) argument
748 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) argument
753 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) argument
759 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) argument
765 __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v, morder mo, morder fmo) argument
771 __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v, morder mo, morder fmo) argument
777 __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v, morder mo, morder fmo) argument
783 __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v, morder mo, morder fmo) argument
790 __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v, morder mo, morder fmo) argument
797 __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, morder mo, morder fmo) argument
803 __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v, morder mo, morder fmo) argument
809 __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v, morder mo, morder fmo) argument
815 __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v, morder mo, morder fmo) argument
822 __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v, morder mo, morder fmo) argument
829 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, morder mo, morder fmo) argument
835 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v, morder mo, morder fmo) argument
841 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v, morder mo, morder fmo) argument
847 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v, morder mo, morder fmo) argument
854 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v, morder mo, morder fmo) argument
861 __tsan_atomic_thread_fence(morder mo) argument
867 __tsan_atomic_signal_fence(morder mo) argument
[all...]
/netbsd-current/external/gpl3/gcc/dist/libsanitizer/tsan/
H A Dtsan_interface_atomic.cpp36 static bool IsLoadOrder(morder mo) { argument
37 return mo == mo_relaxed || mo == mo_consume
38 || mo == mo_acquire || mo == mo_seq_cst;
41 static bool IsStoreOrder(morder mo) { argument
42 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
46 static bool IsReleaseOrder(morder mo) { argument
50 IsAcquireOrder(morder mo) argument
55 IsAcqRelOrder(morder mo) argument
198 to_mo(morder mo) argument
212 NoTsanAtomicLoad(const volatile T *a, morder mo) argument
217 NoTsanAtomicLoad(const volatile a128 *a, morder mo) argument
224 AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) argument
249 NoTsanAtomicStore(volatile T *a, T v, morder mo) argument
254 NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) argument
261 AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
284 AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
303 NoTsanAtomicExchange(volatile T *a, T v, morder mo) argument
308 NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) argument
313 NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) argument
318 NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) argument
323 NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) argument
328 NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) argument
333 NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) argument
338 AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
344 AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
350 AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
356 AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
362 AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
368 AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
374 AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
380 NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) argument
385 NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v, morder mo, morder fmo) argument
397 NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) argument
403 AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v, morder mo, morder fmo) argument
444 AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T c, T v, morder mo, morder fmo) argument
451 NoTsanAtomicFence(morder mo) argument
455 AtomicFence(ThreadState *thr, uptr pc, morder mo) argument
466 convert_morder(morder mo) argument
493 __tsan_atomic8_load(const volatile a8 *a, morder mo) argument
498 __tsan_atomic16_load(const volatile a16 *a, morder mo) argument
503 __tsan_atomic32_load(const volatile a32 *a, morder mo) argument
508 __tsan_atomic64_load(const volatile a64 *a, morder mo) argument
514 __tsan_atomic128_load(const volatile a128 *a, morder mo) argument
520 __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) argument
525 __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) argument
530 __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) argument
535 __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) argument
541 __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) argument
547 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) argument
552 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) argument
557 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) argument
562 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) argument
568 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) argument
574 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) argument
579 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) argument
584 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) argument
589 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) argument
595 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) argument
601 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) argument
606 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) argument
611 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) argument
616 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) argument
622 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) argument
628 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) argument
633 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) argument
638 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) argument
643 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) argument
649 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) argument
655 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) argument
660 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) argument
665 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) argument
670 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) argument
676 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) argument
682 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) argument
687 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) argument
692 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) argument
697 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) argument
703 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) argument
709 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) argument
714 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) argument
719 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) argument
724 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) argument
730 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) argument
736 __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v, morder mo, morder fmo) argument
742 __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v, morder mo, morder fmo) argument
748 __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v, morder mo, morder fmo) argument
754 __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v, morder mo, morder fmo) argument
761 __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v, morder mo, morder fmo) argument
768 __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, morder mo, morder fmo) argument
774 __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v, morder mo, morder fmo) argument
780 __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v, morder mo, morder fmo) argument
786 __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v, morder mo, morder fmo) argument
793 __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v, morder mo, morder fmo) argument
800 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, morder mo, morder fmo) argument
806 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v, morder mo, morder fmo) argument
812 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v, morder mo, morder fmo) argument
818 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v, morder mo, morder fmo) argument
825 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v, morder mo, morder fmo) argument
832 __tsan_atomic_thread_fence(morder mo) argument
835 __tsan_atomic_signal_fence(morder mo) argument
[all...]
/netbsd-current/external/bsd/jemalloc/include/jemalloc/internal/
H A Datomic_c11.h23 atomic_memory_order_t mo) { \
31 return atomic_load_explicit(a_nonconst, mo); \
36 type val, atomic_memory_order_t mo) { \
37 atomic_store_explicit(a, val, mo); \
42 atomic_memory_order_t mo) { \
43 return atomic_exchange_explicit(a, val, mo); \
72 type val, atomic_memory_order_t mo) { \
73 return atomic_fetch_add_explicit(a, val, mo); \
78 type val, atomic_memory_order_t mo) { \
79 return atomic_fetch_sub_explicit(a, val, mo); \
[all...]
H A Datomic_gcc_atomic.h17 atomic_enum_to_builtin(atomic_memory_order_t mo) { argument
18 switch (mo) {
35 atomic_fence(atomic_memory_order_t mo) { argument
36 __atomic_thread_fence(atomic_enum_to_builtin(mo));
46 atomic_memory_order_t mo) { \
48 __atomic_load(&a->repr, &result, atomic_enum_to_builtin(mo)); \
54 atomic_memory_order_t mo) { \
55 __atomic_store(&a->repr, &val, atomic_enum_to_builtin(mo)); \
60 atomic_memory_order_t mo) { \
63 atomic_enum_to_builtin(mo)); \
[all...]
/netbsd-current/external/bsd/jemalloc/dist/include/jemalloc/internal/
H A Datomic_c11.h23 atomic_memory_order_t mo) { \
31 return atomic_load_explicit(a_nonconst, mo); \
36 type val, atomic_memory_order_t mo) { \
37 atomic_store_explicit(a, val, mo); \
42 atomic_memory_order_t mo) { \
43 return atomic_exchange_explicit(a, val, mo); \
72 type val, atomic_memory_order_t mo) { \
73 return atomic_fetch_add_explicit(a, val, mo); \
78 type val, atomic_memory_order_t mo) { \
79 return atomic_fetch_sub_explicit(a, val, mo); \
[all...]
H A Datomic_gcc_atomic.h17 atomic_enum_to_builtin(atomic_memory_order_t mo) { argument
18 switch (mo) {
35 atomic_fence(atomic_memory_order_t mo) { argument
36 __atomic_thread_fence(atomic_enum_to_builtin(mo));
46 atomic_memory_order_t mo) { \
48 __atomic_load(&a->repr, &result, atomic_enum_to_builtin(mo)); \
54 atomic_memory_order_t mo) { \
55 __atomic_store(&a->repr, &val, atomic_enum_to_builtin(mo)); \
60 atomic_memory_order_t mo) { \
63 atomic_enum_to_builtin(mo)); \
[all...]
/netbsd-current/external/gpl3/gcc/dist/libsanitizer/sanitizer_common/
H A Dsanitizer_atomic_clang_other.h26 const volatile T *a, memory_order mo) {
27 DCHECK(mo & (memory_order_relaxed | memory_order_consume
34 if (mo == memory_order_relaxed) {
36 } else if (mo == memory_order_consume) {
42 } else if (mo == memory_order_acquire) {
60 inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) { argument
61 DCHECK(mo & (memory_order_relaxed | memory_order_release
67 if (mo == memory_order_relaxed) {
69 } else if (mo == memory_order_release) {
25 atomic_load( const volatile T *a, memory_order mo) argument
H A Dsanitizer_atomic_clang.h47 typename T::Type v, memory_order mo) {
48 (void)mo;
55 typename T::Type v, memory_order mo) {
56 (void)mo;
63 typename T::Type v, memory_order mo) {
65 if (mo & (memory_order_release | memory_order_acq_rel | memory_order_seq_cst))
68 if (mo == memory_order_seq_cst)
76 memory_order mo) {
90 memory_order mo) {
91 return atomic_compare_exchange_strong(a, cmp, xchg, mo);
46 atomic_fetch_add(volatile T *a, typename T::Type v, memory_order mo) argument
54 atomic_fetch_sub(volatile T *a, typename T::Type v, memory_order mo) argument
62 atomic_exchange(volatile T *a, typename T::Type v, memory_order mo) argument
74 atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp, typename T::Type xchg, memory_order mo) argument
87 atomic_compare_exchange_weak(volatile T *a, typename T::Type *cmp, typename T::Type xchg, memory_order mo) argument
[all...]
H A Dsanitizer_atomic_clang_x86.h28 const volatile T *a, memory_order mo) {
29 DCHECK(mo & (memory_order_relaxed | memory_order_consume
36 if (mo == memory_order_relaxed) {
38 } else if (mo == memory_order_consume) {
44 } else if (mo == memory_order_acquire) {
73 inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) { argument
74 DCHECK(mo & (memory_order_relaxed | memory_order_release
80 if (mo == memory_order_relaxed) {
82 } else if (mo == memory_order_release) {
106 if (mo
27 atomic_load( const volatile T *a, memory_order mo) argument
[all...]
H A Dsanitizer_atomic_msvc.h72 const volatile T *a, memory_order mo) {
73 DCHECK(mo & (memory_order_relaxed | memory_order_consume
78 if (mo == memory_order_relaxed) {
89 inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) { argument
90 DCHECK(mo & (memory_order_relaxed | memory_order_release
94 if (mo == memory_order_relaxed) {
101 if (mo == memory_order_seq_cst)
106 u32 v, memory_order mo) {
107 (void)mo;
114 uptr v, memory_order mo) {
71 atomic_load( const volatile T *a, memory_order mo) argument
105 atomic_fetch_add(volatile atomic_uint32_t *a, u32 v, memory_order mo) argument
113 atomic_fetch_add(volatile atomic_uintptr_t *a, uptr v, memory_order mo) argument
126 atomic_fetch_sub(volatile atomic_uint32_t *a, u32 v, memory_order mo) argument
134 atomic_fetch_sub(volatile atomic_uintptr_t *a, uptr v, memory_order mo) argument
147 atomic_exchange(volatile atomic_uint8_t *a, u8 v, memory_order mo) argument
154 atomic_exchange(volatile atomic_uint16_t *a, u16 v, memory_order mo) argument
161 atomic_exchange(volatile atomic_uint32_t *a, u32 v, memory_order mo) argument
168 atomic_compare_exchange_strong(volatile atomic_uint8_t *a, u8 *cmp, u8 xchgv, memory_order mo) argument
194 atomic_compare_exchange_strong(volatile atomic_uintptr_t *a, uptr *cmp, uptr xchg, memory_order mo) argument
207 atomic_compare_exchange_strong(volatile atomic_uint16_t *a, u16 *cmp, u16 xchg, memory_order mo) argument
220 atomic_compare_exchange_strong(volatile atomic_uint32_t *a, u32 *cmp, u32 xchg, memory_order mo) argument
233 atomic_compare_exchange_strong(volatile atomic_uint64_t *a, u64 *cmp, u64 xchg, memory_order mo) argument
247 atomic_compare_exchange_weak(volatile T *a, typename T::Type *cmp, typename T::Type xchg, memory_order mo) argument
[all...]
/netbsd-current/lib/libintl/
H A Dgettext.c204 * the code reads in *.mo files generated by GNU gettext. *.mo is a host-
336 int rv = snprintf(buf, len, "%s/%s/%s/%s.mo", dir, p,
455 mohandle->mo.mo_sysdep_segs[str->segs[i].ref].len;
469 memcpy(dst, mohandle->mo.mo_sysdep_segs[str->segs[i].ref].str,
470 mohandle->mo.mo_sysdep_segs[str->segs[i].ref].len);
471 dst += mohandle->mo.mo_sysdep_segs[str->segs[i].ref].len;
494 setup_sysdep_stuffs(struct mo *mo, struct mohandle *mohandle, char *base) argument
503 magic = mo
579 struct mo *mo; local
[all...]
/netbsd-current/sys/external/bsd/compiler_rt/dist/lib/sanitizer_common/
H A Dsanitizer_atomic_clang.h48 typename T::Type v, memory_order mo) {
49 (void)mo;
56 typename T::Type v, memory_order mo) {
57 (void)mo;
64 typename T::Type v, memory_order mo) {
66 if (mo & (memory_order_release | memory_order_acq_rel | memory_order_seq_cst))
69 if (mo == memory_order_seq_cst)
77 memory_order mo) {
91 memory_order mo) {
92 return atomic_compare_exchange_strong(a, cmp, xchg, mo);
47 atomic_fetch_add(volatile T *a, typename T::Type v, memory_order mo) argument
55 atomic_fetch_sub(volatile T *a, typename T::Type v, memory_order mo) argument
63 atomic_exchange(volatile T *a, typename T::Type v, memory_order mo) argument
75 atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp, typename T::Type xchg, memory_order mo) argument
88 atomic_compare_exchange_weak(volatile T *a, typename T::Type *cmp, typename T::Type xchg, memory_order mo) argument
[all...]
H A Dsanitizer_atomic_clang_x86.h29 const volatile T *a, memory_order mo) {
30 DCHECK(mo & (memory_order_relaxed | memory_order_consume
37 if (mo == memory_order_relaxed) {
39 } else if (mo == memory_order_consume) {
45 } else if (mo == memory_order_acquire) {
74 INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) { argument
75 DCHECK(mo & (memory_order_relaxed | memory_order_release
81 if (mo == memory_order_relaxed) {
83 } else if (mo == memory_order_release) {
107 if (mo
28 atomic_load( const volatile T *a, memory_order mo) argument
[all...]
H A Dsanitizer_atomic_msvc.h82 const volatile T *a, memory_order mo) {
83 DCHECK(mo & (memory_order_relaxed | memory_order_consume
88 if (mo == memory_order_relaxed) {
99 INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) { argument
100 DCHECK(mo & (memory_order_relaxed | memory_order_release
104 if (mo == memory_order_relaxed) {
111 if (mo == memory_order_seq_cst)
116 u32 v, memory_order mo) {
117 (void)mo;
124 uptr v, memory_order mo) {
81 atomic_load( const volatile T *a, memory_order mo) argument
115 atomic_fetch_add(volatile atomic_uint32_t *a, u32 v, memory_order mo) argument
123 atomic_fetch_add(volatile atomic_uintptr_t *a, uptr v, memory_order mo) argument
136 atomic_fetch_sub(volatile atomic_uint32_t *a, u32 v, memory_order mo) argument
144 atomic_fetch_sub(volatile atomic_uintptr_t *a, uptr v, memory_order mo) argument
157 atomic_exchange(volatile atomic_uint8_t *a, u8 v, memory_order mo) argument
164 atomic_exchange(volatile atomic_uint16_t *a, u16 v, memory_order mo) argument
171 atomic_exchange(volatile atomic_uint32_t *a, u32 v, memory_order mo) argument
178 atomic_compare_exchange_strong(volatile atomic_uint8_t *a, u8 *cmp, u8 xchgv, memory_order mo) argument
204 atomic_compare_exchange_strong(volatile atomic_uintptr_t *a, uptr *cmp, uptr xchg, memory_order mo) argument
217 atomic_compare_exchange_strong(volatile atomic_uint16_t *a, u16 *cmp, u16 xchg, memory_order mo) argument
230 atomic_compare_exchange_strong(volatile atomic_uint32_t *a, u32 *cmp, u32 xchg, memory_order mo) argument
243 atomic_compare_exchange_strong(volatile atomic_uint64_t *a, u64 *cmp, u64 xchg, memory_order mo) argument
257 atomic_compare_exchange_weak(volatile T *a, typename T::Type *cmp, typename T::Type xchg, memory_order mo) argument
[all...]
H A Dsanitizer_atomic_clang_other.h27 const volatile T *a, memory_order mo) {
28 DCHECK(mo & (memory_order_relaxed | memory_order_consume
35 if (mo == memory_order_relaxed) {
37 } else if (mo == memory_order_consume) {
43 } else if (mo == memory_order_acquire) {
64 INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) { argument
65 DCHECK(mo & (memory_order_relaxed | memory_order_release
71 if (mo == memory_order_relaxed) {
73 } else if (mo == memory_order_release) {
26 atomic_load( const volatile T *a, memory_order mo) argument
/netbsd-current/external/gpl3/gcc.old/dist/libsanitizer/sanitizer_common/
H A Dsanitizer_atomic_clang_x86.h27 const volatile T *a, memory_order mo) {
28 DCHECK(mo & (memory_order_relaxed | memory_order_consume
35 if (mo == memory_order_relaxed) {
37 } else if (mo == memory_order_consume) {
43 } else if (mo == memory_order_acquire) {
72 INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) { argument
73 DCHECK(mo & (memory_order_relaxed | memory_order_release
79 if (mo == memory_order_relaxed) {
81 } else if (mo == memory_order_release) {
105 if (mo
26 atomic_load( const volatile T *a, memory_order mo) argument
[all...]
H A Dsanitizer_atomic_clang.h46 typename T::Type v, memory_order mo) {
47 (void)mo;
54 typename T::Type v, memory_order mo) {
55 (void)mo;
62 typename T::Type v, memory_order mo) {
64 if (mo & (memory_order_release | memory_order_acq_rel | memory_order_seq_cst))
67 if (mo == memory_order_seq_cst)
75 memory_order mo) {
89 memory_order mo) {
90 return atomic_compare_exchange_strong(a, cmp, xchg, mo);
45 atomic_fetch_add(volatile T *a, typename T::Type v, memory_order mo) argument
53 atomic_fetch_sub(volatile T *a, typename T::Type v, memory_order mo) argument
61 atomic_exchange(volatile T *a, typename T::Type v, memory_order mo) argument
73 atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp, typename T::Type xchg, memory_order mo) argument
86 atomic_compare_exchange_weak(volatile T *a, typename T::Type *cmp, typename T::Type xchg, memory_order mo) argument
[all...]
H A Dsanitizer_atomic_msvc.h80 const volatile T *a, memory_order mo) {
81 DCHECK(mo & (memory_order_relaxed | memory_order_consume
86 if (mo == memory_order_relaxed) {
97 INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) { argument
98 DCHECK(mo & (memory_order_relaxed | memory_order_release
102 if (mo == memory_order_relaxed) {
109 if (mo == memory_order_seq_cst)
114 u32 v, memory_order mo) {
115 (void)mo;
122 uptr v, memory_order mo) {
79 atomic_load( const volatile T *a, memory_order mo) argument
113 atomic_fetch_add(volatile atomic_uint32_t *a, u32 v, memory_order mo) argument
121 atomic_fetch_add(volatile atomic_uintptr_t *a, uptr v, memory_order mo) argument
134 atomic_fetch_sub(volatile atomic_uint32_t *a, u32 v, memory_order mo) argument
142 atomic_fetch_sub(volatile atomic_uintptr_t *a, uptr v, memory_order mo) argument
155 atomic_exchange(volatile atomic_uint8_t *a, u8 v, memory_order mo) argument
162 atomic_exchange(volatile atomic_uint16_t *a, u16 v, memory_order mo) argument
169 atomic_exchange(volatile atomic_uint32_t *a, u32 v, memory_order mo) argument
176 atomic_compare_exchange_strong(volatile atomic_uint8_t *a, u8 *cmp, u8 xchgv, memory_order mo) argument
202 atomic_compare_exchange_strong(volatile atomic_uintptr_t *a, uptr *cmp, uptr xchg, memory_order mo) argument
215 atomic_compare_exchange_strong(volatile atomic_uint16_t *a, u16 *cmp, u16 xchg, memory_order mo) argument
228 atomic_compare_exchange_strong(volatile atomic_uint32_t *a, u32 *cmp, u32 xchg, memory_order mo) argument
241 atomic_compare_exchange_strong(volatile atomic_uint64_t *a, u64 *cmp, u64 xchg, memory_order mo) argument
255 atomic_compare_exchange_weak(volatile T *a, typename T::Type *cmp, typename T::Type xchg, memory_order mo) argument
[all...]
H A Dsanitizer_atomic_clang_other.h25 const volatile T *a, memory_order mo) {
26 DCHECK(mo & (memory_order_relaxed | memory_order_consume
33 if (mo == memory_order_relaxed) {
35 } else if (mo == memory_order_consume) {
41 } else if (mo == memory_order_acquire) {
62 INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) { argument
63 DCHECK(mo & (memory_order_relaxed | memory_order_release
69 if (mo == memory_order_relaxed) {
71 } else if (mo == memory_order_release) {
24 atomic_load( const volatile T *a, memory_order mo) argument
/netbsd-current/usr.bin/netstat/
H A Dmbuf.c98 struct mowner_user *mo; local
211 len += 10 * sizeof(*mo); /* add some slop */
226 for (mo = (void *) data, lines = 0; len >= sizeof(*mo);
227 len -= sizeof(*mo), mo++) {
230 mo->mo_counter[MOWNER_COUNTER_CLAIMS] == 0 &&
231 mo->mo_counter[MOWNER_COUNTER_EXT_CLAIMS] == 0 &&
232 mo->mo_counter[MOWNER_COUNTER_CLUSTER_CLAIMS] == 0)
235 mo
[all...]

Completed in 256 milliseconds

1234567891011>>