1// Copyright 2016, 2018 The Fuchsia Authors
2// Copyright (c) 2008-2015 Travis Geiselbrecht
3//
4// Use of this source code is governed by a MIT-style
5// license that can be found in the LICENSE file or at
6// https://opensource.org/licenses/MIT
7
8#include "tests.h"
9
10#include <assert.h>
11#include <debug.h>
12#include <err.h>
13#include <fbl/algorithm.h>
14#include <fbl/mutex.h>
15#include <inttypes.h>
16#include <kernel/event.h>
17#include <kernel/mp.h>
18#include <kernel/mutex.h>
19#include <kernel/thread.h>
20#include <platform.h>
21#include <pow2.h>
22#include <rand.h>
23#include <string.h>
24#include <trace.h>
25#include <zircon/types.h>
26
27static uint rand_range(uint low, uint high) {
28    uint r = rand();
29    uint result = ((r ^ (r >> 16)) % (high - low + 1u)) + low;
30
31    return result;
32}
33
34static int sleep_thread(void* arg) {
35    for (;;) {
36        printf("sleeper %p\n", get_current_thread());
37        thread_sleep_relative(ZX_MSEC(rand() % 500));
38    }
39    return 0;
40}
41
42static int sleep_test(void) {
43    int i;
44    for (i = 0; i < 16; i++)
45        thread_detach_and_resume(thread_create("sleeper", &sleep_thread, NULL, DEFAULT_PRIORITY));
46    return 0;
47}
48
49static int mutex_thread(void* arg) {
50    int i;
51    const int iterations = 1000000;
52    int count = 0;
53
54    static volatile uintptr_t shared = 0;
55
56    mutex_t* m = (mutex_t*)arg;
57
58    printf("mutex tester thread %p starting up, will go for %d iterations\n", get_current_thread(), iterations);
59
60    for (i = 0; i < iterations; i++) {
61        mutex_acquire(m);
62
63        if (shared != 0)
64            panic("someone else has messed with the shared data\n");
65
66        shared = (intptr_t)get_current_thread();
67        if ((rand() % 5) == 0)
68            thread_yield();
69
70        if (++count % 10000 == 0)
71            printf("%p: count %d\n", get_current_thread(), count);
72        shared = 0;
73
74        mutex_release(m);
75        if ((rand() % 5) == 0)
76            thread_yield();
77    }
78
79    printf("mutex tester %p done\n", get_current_thread());
80
81    return 0;
82}
83
84static int mutex_test(void) {
85    static mutex_t imutex = MUTEX_INITIAL_VALUE(imutex);
86    printf("preinitialized mutex:\n");
87    hexdump(&imutex, sizeof(imutex));
88
89    mutex_t m;
90    mutex_init(&m);
91
92    thread_t* threads[5];
93
94    for (uint i = 0; i < fbl::count_of(threads); i++) {
95        threads[i] = thread_create("mutex tester", &mutex_thread, &m,
96                                   get_current_thread()->base_priority);
97        thread_resume(threads[i]);
98    }
99
100    for (uint i = 0; i < fbl::count_of(threads); i++) {
101        thread_join(threads[i], NULL, ZX_TIME_INFINITE);
102    }
103
104    thread_sleep_relative(ZX_MSEC(100));
105
106    printf("done with mutex tests\n");
107
108    return 0;
109}
110
111static int mutex_inherit_test() {
112    printf("running mutex inheritance test\n");
113
114    constexpr uint inherit_test_mutex_count = 4;
115    constexpr uint inherit_test_thread_count = 5;
116
117    // working variables to pass the working thread
118    struct args {
119        event_t test_blocker = EVENT_INITIAL_VALUE(test_blocker, false, 0);
120        mutex_t test_mutex[inherit_test_mutex_count];
121    } args;
122
123    // worker thread to stress the priority inheritance mechanism
124    auto inherit_worker = [](void* arg) TA_NO_THREAD_SAFETY_ANALYSIS -> int {
125        struct args* args = static_cast<struct args*>(arg);
126
127        for (int count = 0; count < 100000; count++) {
128            uint r = rand_range(1, inherit_test_mutex_count);
129
130            // pick a random priority
131            thread_set_priority(
132                get_current_thread(), rand_range(DEFAULT_PRIORITY - 4, DEFAULT_PRIORITY + 4));
133
134            // grab a random number of mutexes
135            for (uint j = 0; j < r; j++) {
136                mutex_acquire(&args->test_mutex[j]);
137            }
138
139            if (count % 1000 == 0)
140                printf("%p: count %d\n", get_current_thread(), count);
141
142            // wait on a event for a period of time, to try to have other grabber threads
143            // need to tweak our priority in either one of the mutexes we hold or the
144            // blocking event
145            event_wait_deadline(&args->test_blocker, current_time() + ZX_USEC(rand() % 10u), true);
146
147            // release in reverse order
148            for (int j = r - 1; j >= 0; j--) {
149                mutex_release(&args->test_mutex[j]);
150            }
151        }
152
153        return 0;
154    };
155
156    // create a stack of mutexes and a few threads
157    for (auto &m: args.test_mutex) {
158        mutex_init(&m);
159    }
160
161    thread_t* test_thread[inherit_test_thread_count];
162    for (auto &t: test_thread) {
163        t = thread_create("mutex tester", inherit_worker, &args,
164                                   get_current_thread()->base_priority);
165        thread_resume(t);
166    }
167
168    for (auto &t: test_thread) {
169        thread_join(t, NULL, ZX_TIME_INFINITE);
170    }
171
172    for (auto &m: args.test_mutex) {
173        mutex_destroy(&m);
174    }
175
176    thread_sleep_relative(ZX_MSEC(100));
177
178    printf("done with mutex inheirit test\n");
179
180    return 0;
181}
182
183static event_t e;
184
185static int event_signaler(void* arg) {
186    printf("event signaler pausing\n");
187    thread_sleep_relative(ZX_SEC(1));
188
189    //  for (;;) {
190    printf("signaling event\n");
191    event_signal(&e, true);
192    printf("done signaling event\n");
193    thread_yield();
194    //  }
195
196    return 0;
197}
198
199static int event_waiter(void* arg) {
200    uintptr_t count = (uintptr_t)arg;
201
202    while (count > 0) {
203        printf("thread %p: waiting on event...\n", get_current_thread());
204        zx_status_t err = event_wait_deadline(&e, ZX_TIME_INFINITE, true);
205        if (err == ZX_ERR_INTERNAL_INTR_KILLED) {
206            printf("thread %p: killed\n", get_current_thread());
207            return -1;
208        } else if (err < 0) {
209            printf("thread %p: event_wait() returned error %d\n", get_current_thread(), err);
210            return -1;
211        }
212        printf("thread %p: done waiting on event\n", get_current_thread());
213        thread_yield();
214        count--;
215    }
216
217    return 0;
218}
219
220static void event_test(void) {
221    thread_t* threads[5];
222
223    static event_t ievent = EVENT_INITIAL_VALUE(ievent, true, 0x1234);
224    printf("preinitialized event:\n");
225    hexdump(&ievent, sizeof(ievent));
226
227    printf("event tests starting\n");
228
229    /* make sure signaling the event wakes up all the threads and stays signaled */
230    printf("creating event, waiting on it with 4 threads, signaling it and making sure all threads fall through twice\n");
231    event_init(&e, false, 0);
232    threads[0] = thread_create("event signaler", &event_signaler, NULL, DEFAULT_PRIORITY);
233    threads[1] = thread_create("event waiter 0", &event_waiter, (void*)2, DEFAULT_PRIORITY);
234    threads[2] = thread_create("event waiter 1", &event_waiter, (void*)2, DEFAULT_PRIORITY);
235    threads[3] = thread_create("event waiter 2", &event_waiter, (void*)2, DEFAULT_PRIORITY);
236    threads[4] = thread_create("event waiter 3", &event_waiter, (void*)2, DEFAULT_PRIORITY);
237
238    for (uint i = 0; i < fbl::count_of(threads); i++)
239        thread_resume(threads[i]);
240
241    for (uint i = 0; i < fbl::count_of(threads); i++)
242        thread_join(threads[i], NULL, ZX_TIME_INFINITE);
243
244    thread_sleep_relative(ZX_SEC(2));
245    printf("destroying event\n");
246    event_destroy(&e);
247
248    /* make sure signaling the event wakes up precisely one thread */
249    printf("creating event, waiting on it with 4 threads, signaling it and making sure only one thread wakes up\n");
250    event_init(&e, false, EVENT_FLAG_AUTOUNSIGNAL);
251    threads[0] = thread_create("event signaler", &event_signaler, NULL, DEFAULT_PRIORITY);
252    threads[1] = thread_create("event waiter 0", &event_waiter, (void*)99, DEFAULT_PRIORITY);
253    threads[2] = thread_create("event waiter 1", &event_waiter, (void*)99, DEFAULT_PRIORITY);
254    threads[3] = thread_create("event waiter 2", &event_waiter, (void*)99, DEFAULT_PRIORITY);
255    threads[4] = thread_create("event waiter 3", &event_waiter, (void*)99, DEFAULT_PRIORITY);
256
257    for (uint i = 0; i < fbl::count_of(threads); i++)
258        thread_resume(threads[i]);
259
260    thread_sleep_relative(ZX_SEC(2));
261
262    for (uint i = 0; i < fbl::count_of(threads); i++) {
263        thread_kill(threads[i]);
264        thread_join(threads[i], NULL, ZX_TIME_INFINITE);
265    }
266
267    event_destroy(&e);
268
269    printf("event tests done\n");
270}
271
272static int quantum_tester(void* arg) {
273    for (;;) {
274        printf("%p: in this thread. rq %" PRIi64 "\n", get_current_thread(), get_current_thread()->remaining_time_slice);
275    }
276    return 0;
277}
278
279static void quantum_test(void) {
280    thread_detach_and_resume(thread_create("quantum tester 0", &quantum_tester, NULL, DEFAULT_PRIORITY));
281    thread_detach_and_resume(thread_create("quantum tester 1", &quantum_tester, NULL, DEFAULT_PRIORITY));
282    thread_detach_and_resume(thread_create("quantum tester 2", &quantum_tester, NULL, DEFAULT_PRIORITY));
283    thread_detach_and_resume(thread_create("quantum tester 3", &quantum_tester, NULL, DEFAULT_PRIORITY));
284}
285
286static event_t context_switch_event;
287static event_t context_switch_done_event;
288
289static int context_switch_tester(void* arg) {
290    int i;
291    uint64_t total_count = 0;
292    const int iter = 100000;
293    uintptr_t thread_count = (uintptr_t)arg;
294
295    event_wait(&context_switch_event);
296
297    uint64_t count = arch_cycle_count();
298    for (i = 0; i < iter; i++) {
299        thread_yield();
300    }
301    total_count += arch_cycle_count() - count;
302    thread_sleep_relative(ZX_SEC(1));
303    printf("took %" PRIu64 " cycles to yield %d times, %" PRIu64 " per yield, %" PRIu64 " per yield per thread\n",
304           total_count, iter, total_count / iter, total_count / iter / thread_count);
305
306    event_signal(&context_switch_done_event, true);
307
308    return 0;
309}
310
311static void context_switch_test(void) {
312    event_init(&context_switch_event, false, 0);
313    event_init(&context_switch_done_event, false, 0);
314
315    thread_detach_and_resume(thread_create("context switch idle", &context_switch_tester, (void*)1, DEFAULT_PRIORITY));
316    thread_sleep_relative(ZX_MSEC(100));
317    event_signal(&context_switch_event, true);
318    event_wait(&context_switch_done_event);
319    thread_sleep_relative(ZX_MSEC(100));
320
321    event_unsignal(&context_switch_event);
322    event_unsignal(&context_switch_done_event);
323    thread_detach_and_resume(thread_create("context switch 2a", &context_switch_tester, (void*)2, DEFAULT_PRIORITY));
324    thread_detach_and_resume(thread_create("context switch 2b", &context_switch_tester, (void*)2, DEFAULT_PRIORITY));
325    thread_sleep_relative(ZX_MSEC(100));
326    event_signal(&context_switch_event, true);
327    event_wait(&context_switch_done_event);
328    thread_sleep_relative(ZX_MSEC(100));
329
330    event_unsignal(&context_switch_event);
331    event_unsignal(&context_switch_done_event);
332    thread_detach_and_resume(thread_create("context switch 4a", &context_switch_tester, (void*)4, DEFAULT_PRIORITY));
333    thread_detach_and_resume(thread_create("context switch 4b", &context_switch_tester, (void*)4, DEFAULT_PRIORITY));
334    thread_detach_and_resume(thread_create("context switch 4c", &context_switch_tester, (void*)4, DEFAULT_PRIORITY));
335    thread_detach_and_resume(thread_create("context switch 4d", &context_switch_tester, (void*)4, DEFAULT_PRIORITY));
336    thread_sleep_relative(ZX_MSEC(100));
337    event_signal(&context_switch_event, true);
338    event_wait(&context_switch_done_event);
339    thread_sleep_relative(ZX_MSEC(100));
340}
341
342static volatile int atomic;
343static volatile int atomic_count;
344
345static int atomic_tester(void* arg) {
346    int add = (int)(uintptr_t)arg;
347    int i;
348
349    const int iter = 10000000;
350
351    TRACEF("add %d, %d iterations\n", add, iter);
352
353    for (i = 0; i < iter; i++) {
354        atomic_add(&atomic, add);
355    }
356
357    int old = atomic_add(&atomic_count, -1);
358    TRACEF("exiting, old count %d\n", old);
359
360    return 0;
361}
362
363static void atomic_test(void) {
364    atomic = 0;
365    atomic_count = 8;
366
367    printf("testing atomic routines\n");
368
369    thread_t* threads[8];
370    threads[0] = thread_create("atomic tester 1", &atomic_tester, (void*)1, LOW_PRIORITY);
371    threads[1] = thread_create("atomic tester 1", &atomic_tester, (void*)1, LOW_PRIORITY);
372    threads[2] = thread_create("atomic tester 1", &atomic_tester, (void*)1, LOW_PRIORITY);
373    threads[3] = thread_create("atomic tester 1", &atomic_tester, (void*)1, LOW_PRIORITY);
374    threads[4] = thread_create("atomic tester 2", &atomic_tester, (void*)-1, LOW_PRIORITY);
375    threads[5] = thread_create("atomic tester 2", &atomic_tester, (void*)-1, LOW_PRIORITY);
376    threads[6] = thread_create("atomic tester 2", &atomic_tester, (void*)-1, LOW_PRIORITY);
377    threads[7] = thread_create("atomic tester 2", &atomic_tester, (void*)-1, LOW_PRIORITY);
378
379    /* start all the threads */
380    for (uint i = 0; i < fbl::count_of(threads); i++)
381        thread_resume(threads[i]);
382
383    /* wait for them to all stop */
384    for (uint i = 0; i < fbl::count_of(threads); i++) {
385        thread_join(threads[i], NULL, ZX_TIME_INFINITE);
386    }
387
388    printf("atomic count == %d (should be zero)\n", atomic);
389}
390
391static volatile int preempt_count;
392
393static int preempt_tester(void* arg) {
394    spin(1000000);
395
396    printf("exiting ts %" PRIi64 " ns\n", current_time());
397
398    atomic_add(&preempt_count, -1);
399
400    return 0;
401}
402
403static void preempt_test(void) {
404    /* create 5 threads, let them run. If the system is properly timer preempting,
405     * the threads should interleave each other at a fine enough granularity so
406     * that they complete at roughly the same time. */
407    printf("testing preemption\n");
408
409    preempt_count = 5;
410
411    for (int i = 0; i < preempt_count; i++)
412        thread_detach_and_resume(thread_create("preempt tester", &preempt_tester, NULL, LOW_PRIORITY));
413
414    while (preempt_count > 0) {
415        thread_sleep_relative(ZX_SEC(1));
416    }
417
418    printf("done with preempt test, above time stamps should be very close\n");
419
420    /* do the same as above, but mark the threads as real time, which should
421     * effectively disable timer based preemption for them. They should
422     * complete in order, about a second apart. */
423    printf("testing real time preemption\n");
424
425    const int num_threads = 5;
426    preempt_count = num_threads;
427
428    for (int i = 0; i < num_threads; i++) {
429        thread_t* t = thread_create("preempt tester", &preempt_tester, NULL, LOW_PRIORITY);
430        thread_set_real_time(t);
431        thread_set_cpu_affinity(t, cpu_num_to_mask(0));
432        thread_detach_and_resume(t);
433    }
434
435    while (preempt_count > 0) {
436        thread_sleep_relative(ZX_SEC(1));
437    }
438
439    printf("done with real-time preempt test, above time stamps should be 1 second apart\n");
440}
441
442static int join_tester(void* arg) {
443    int val = (int)(uintptr_t)arg;
444
445    printf("\t\tjoin tester starting\n");
446    thread_sleep_relative(ZX_MSEC(500));
447    printf("\t\tjoin tester exiting with result %d\n", val);
448
449    return val;
450}
451
452static int join_tester_server(void* arg) {
453    int ret;
454    zx_status_t err;
455    thread_t* t;
456
457    printf("\ttesting thread_join/thread_detach\n");
458
459    printf("\tcreating and waiting on thread to exit with thread_join\n");
460    t = thread_create("join tester", &join_tester, (void*)1, DEFAULT_PRIORITY);
461    thread_resume(t);
462    ret = 99;
463    printf("\tthread magic is 0x%x (should be 0x%x)\n", (unsigned)t->magic, (unsigned)THREAD_MAGIC);
464    err = thread_join(t, &ret, ZX_TIME_INFINITE);
465    printf("\tthread_join returns err %d, retval %d\n", err, ret);
466    printf("\tthread magic is 0x%x (should be 0)\n", (unsigned)t->magic);
467
468    printf("\tcreating and waiting on thread to exit with thread_join, after thread has exited\n");
469    t = thread_create("join tester", &join_tester, (void*)2, DEFAULT_PRIORITY);
470    thread_resume(t);
471    thread_sleep_relative(ZX_SEC(1)); // wait until thread is already dead
472    ret = 99;
473    printf("\tthread magic is 0x%x (should be 0x%x)\n", (unsigned)t->magic, (unsigned)THREAD_MAGIC);
474    err = thread_join(t, &ret, ZX_TIME_INFINITE);
475    printf("\tthread_join returns err %d, retval %d\n", err, ret);
476    printf("\tthread magic is 0x%x (should be 0)\n", (unsigned)t->magic);
477
478    printf("\tcreating a thread, detaching it, let it exit on its own\n");
479    t = thread_create("join tester", &join_tester, (void*)3, DEFAULT_PRIORITY);
480    thread_detach(t);
481    thread_resume(t);
482    thread_sleep_relative(ZX_SEC(1)); // wait until the thread should be dead
483    printf("\tthread magic is 0x%x (should be 0)\n", (unsigned)t->magic);
484
485    printf("\tcreating a thread, detaching it after it should be dead\n");
486    t = thread_create("join tester", &join_tester, (void*)4, DEFAULT_PRIORITY);
487    thread_resume(t);
488    thread_sleep_relative(ZX_SEC(1)); // wait until thread is already dead
489    printf("\tthread magic is 0x%x (should be 0x%x)\n", (unsigned)t->magic, (unsigned)THREAD_MAGIC);
490    thread_detach(t);
491    printf("\tthread magic is 0x%x\n", (unsigned)t->magic);
492
493    printf("\texiting join tester server\n");
494
495    return 55;
496}
497
498static void join_test(void) {
499    int ret;
500    zx_status_t err;
501    thread_t* t;
502
503    printf("testing thread_join/thread_detach\n");
504
505    printf("creating thread join server thread\n");
506    t = thread_create("join tester server", &join_tester_server, (void*)1, DEFAULT_PRIORITY);
507    thread_resume(t);
508    ret = 99;
509    err = thread_join(t, &ret, ZX_TIME_INFINITE);
510    printf("thread_join returns err %d, retval %d (should be 0 and 55)\n", err, ret);
511}
512
513struct lock_pair_t {
514    spin_lock_t first = SPIN_LOCK_INITIAL_VALUE;
515    spin_lock_t second = SPIN_LOCK_INITIAL_VALUE;
516};
517
518// Acquires lock on "second" and holds it until it sees that "first" is released.
519static int hold_and_release(void* arg) {
520    lock_pair_t* pair = reinterpret_cast<lock_pair_t*>(arg);
521    ASSERT(pair != nullptr);
522    spin_lock_saved_state_t state;
523    spin_lock_irqsave(&pair->second, state);
524    while (spin_lock_holder_cpu(&pair->first) != UINT_MAX) {
525        thread_yield();
526    }
527    spin_unlock_irqrestore(&pair->second, state);
528    return 0;
529}
530
531static void spinlock_test(void) {
532    spin_lock_saved_state_t state;
533    spin_lock_t lock;
534
535    spin_lock_init(&lock);
536
537    // Verify basic functionality (single core).
538    printf("testing spinlock:\n");
539    ASSERT(!spin_lock_held(&lock));
540    ASSERT(!arch_ints_disabled());
541    spin_lock_irqsave(&lock, state);
542    ASSERT(arch_ints_disabled());
543    ASSERT(spin_lock_held(&lock));
544    ASSERT(spin_lock_holder_cpu(&lock) == arch_curr_cpu_num());
545    spin_unlock_irqrestore(&lock, state);
546    ASSERT(!spin_lock_held(&lock));
547    ASSERT(!arch_ints_disabled());
548
549    // Verify slightly more advanced functionality that requires multiple cores.
550    cpu_mask_t online = mp_get_online_mask();
551    if (!online || ispow2(online)) {
552        printf("skipping rest of spinlock_test, not enough online cpus\n");
553        return;
554    }
555
556    // Hold the first lock, then create a thread and wait for it to acquire the lock.
557    lock_pair_t pair;
558    spin_lock_irqsave(&pair.first, state);
559    thread_t* holder_thread = thread_create("hold_and_release", &hold_and_release, &pair,
560                                            DEFAULT_PRIORITY);
561    ASSERT(holder_thread != nullptr);
562    thread_resume(holder_thread);
563    while (spin_lock_holder_cpu(&pair.second) == UINT_MAX) {
564        thread_yield();
565    }
566
567    // See that from our perspective "second" is not held.
568    ASSERT(!spin_lock_held(&pair.second));
569    spin_unlock_irqrestore(&pair.first, state);
570    thread_join(holder_thread, NULL, ZX_TIME_INFINITE);
571
572    printf("seems to work\n");
573}
574
575static void sleeper_thread_exit(enum thread_user_state_change new_state, void* arg) {
576    TRACEF("arg %p\n", arg);
577}
578
579static int sleeper_kill_thread(void* arg) {
580    thread_sleep_relative(ZX_MSEC(100));
581
582    zx_time_t t = current_time();
583    zx_status_t err = thread_sleep_etc(t + ZX_SEC(5), true);
584    zx_duration_t duration = (current_time() - t) / ZX_MSEC(1);
585    TRACEF("thread_sleep_etc returns %d after %" PRIi64 " msecs\n", err, duration);
586
587    return 0;
588}
589
590static void waiter_thread_exit(enum thread_user_state_change new_state, void* arg) {
591    TRACEF("arg %p\n", arg);
592}
593
594static int waiter_kill_thread_infinite_wait(void* arg) {
595    event_t* e = (event_t*)arg;
596
597    thread_sleep_relative(ZX_MSEC(100));
598
599    zx_time_t t = current_time();
600    zx_status_t err = event_wait_deadline(e, ZX_TIME_INFINITE, true);
601    zx_duration_t duration = (current_time() - t) / ZX_MSEC(1);
602    TRACEF("event_wait_deadline returns %d after %" PRIi64 " msecs\n", err, duration);
603
604    return 0;
605}
606
607static int waiter_kill_thread(void* arg) {
608    event_t* e = (event_t*)arg;
609
610    thread_sleep_relative(ZX_MSEC(100));
611
612    zx_time_t t = current_time();
613    zx_status_t err = event_wait_deadline(e, t + ZX_SEC(5), true);
614    zx_duration_t duration = (current_time() - t) / ZX_MSEC(1);
615    TRACEF("event_wait_deadline with deadline returns %d after %" PRIi64 " msecs\n", err, duration);
616
617    return 0;
618}
619
620static void kill_tests(void) {
621    thread_t* t;
622
623    printf("starting sleeper thread, then killing it while it sleeps.\n");
624    t = thread_create("sleeper", sleeper_kill_thread, 0, LOW_PRIORITY);
625    t->user_thread = t;
626    thread_set_user_callback(t, &sleeper_thread_exit);
627    thread_resume(t);
628    thread_sleep_relative(ZX_MSEC(200));
629    thread_kill(t);
630    thread_join(t, NULL, ZX_TIME_INFINITE);
631
632    printf("starting sleeper thread, then killing it before it wakes up.\n");
633    t = thread_create("sleeper", sleeper_kill_thread, 0, LOW_PRIORITY);
634    t->user_thread = t;
635    thread_set_user_callback(t, &sleeper_thread_exit);
636    thread_resume(t);
637    thread_kill(t);
638    thread_join(t, NULL, ZX_TIME_INFINITE);
639
640    printf("starting sleeper thread, then killing it before it is unsuspended.\n");
641    t = thread_create("sleeper", sleeper_kill_thread, 0, LOW_PRIORITY);
642    t->user_thread = t;
643    thread_set_user_callback(t, &sleeper_thread_exit);
644    thread_kill(t); // kill it before it is resumed
645    thread_resume(t);
646    thread_join(t, NULL, ZX_TIME_INFINITE);
647
648    event_t e;
649
650    printf("starting waiter thread that waits forever, then killing it while it blocks.\n");
651    event_init(&e, false, 0);
652    t = thread_create("waiter", waiter_kill_thread_infinite_wait, &e, LOW_PRIORITY);
653    t->user_thread = t;
654    thread_set_user_callback(t, &waiter_thread_exit);
655    thread_resume(t);
656    thread_sleep_relative(ZX_MSEC(200));
657    thread_kill(t);
658    thread_join(t, NULL, ZX_TIME_INFINITE);
659    event_destroy(&e);
660
661    printf("starting waiter thread that waits forever, then killing it before it wakes up.\n");
662    event_init(&e, false, 0);
663    t = thread_create("waiter", waiter_kill_thread_infinite_wait, &e, LOW_PRIORITY);
664    t->user_thread = t;
665    thread_set_user_callback(t, &waiter_thread_exit);
666    thread_resume(t);
667    thread_kill(t);
668    thread_join(t, NULL, ZX_TIME_INFINITE);
669    event_destroy(&e);
670
671    printf("starting waiter thread that waits some time, then killing it while it blocks.\n");
672    event_init(&e, false, 0);
673    t = thread_create("waiter", waiter_kill_thread, &e, LOW_PRIORITY);
674    t->user_thread = t;
675    thread_set_user_callback(t, &waiter_thread_exit);
676    thread_resume(t);
677    thread_sleep_relative(ZX_MSEC(200));
678    thread_kill(t);
679    thread_join(t, NULL, ZX_TIME_INFINITE);
680    event_destroy(&e);
681
682    printf("starting waiter thread that waits some time, then killing it before it wakes up.\n");
683    event_init(&e, false, 0);
684    t = thread_create("waiter", waiter_kill_thread, &e, LOW_PRIORITY);
685    t->user_thread = t;
686    thread_set_user_callback(t, &waiter_thread_exit);
687    thread_resume(t);
688    thread_kill(t);
689    thread_join(t, NULL, ZX_TIME_INFINITE);
690    event_destroy(&e);
691}
692
693struct affinity_test_state {
694    thread_t* threads[16] = {};
695    volatile bool shutdown = false;
696};
697
698template <typename T>
699static void spin_while(zx_time_t t, T func) {
700    zx_time_t start = current_time();
701
702    while ((current_time() - start) < t) {
703        func();
704    }
705}
706
707static int affinity_test_thread(void* arg) {
708    thread_t* t = get_current_thread();
709    affinity_test_state* state = static_cast<affinity_test_state*>(arg);
710
711    printf("top of affinity tester %p\n", t);
712
713    while (!state->shutdown) {
714        int which = rand() % static_cast<int>(fbl::count_of(state->threads));
715        switch (rand() % 5) {
716        case 0: // set affinity
717            //printf("%p set aff %p\n", t, state->threads[which]);
718            thread_set_cpu_affinity(state->threads[which], (cpu_mask_t)rand());
719            break;
720        case 1: // sleep for a bit
721            //printf("%p sleep\n", t);
722            thread_sleep_relative(ZX_USEC(rand() % 100));
723            break;
724        case 2: // spin for a bit
725            //printf("%p spin\n", t);
726            spin((uint32_t)rand() % 100);
727            //printf("%p spin done\n", t);
728            break;
729        case 3: // yield
730            //printf("%p yield\n", t);
731            spin_while(ZX_USEC((uint32_t)rand() % 100), thread_yield);
732            //printf("%p yield done\n", t);
733            break;
734        case 4: // reschedule
735            //printf("%p reschedule\n", t);
736            spin_while(ZX_USEC((uint32_t)rand() % 100), thread_reschedule);
737            //printf("%p reschedule done\n", t);
738            break;
739        }
740    }
741
742    printf("affinity tester %p exiting\n", t);
743
744    return 0;
745}
746
747// start a bunch of threads that randomly set the affinity of the other threads
748// to random masks while doing various work.
749// a sucessful pass is one where it completes the run without tripping over any asserts
750// in the scheduler code.
751__NO_INLINE static void affinity_test() {
752    printf("starting thread affinity test\n");
753
754    cpu_mask_t online = mp_get_online_mask();
755    if (!online || ispow2(online)) {
756        printf("aborting test, not enough online cpus\n");
757        return;
758    }
759
760    affinity_test_state state;
761
762    for (auto& t : state.threads) {
763        t = thread_create("affinity_tester", &affinity_test_thread, &state,
764                          LOW_PRIORITY);
765    }
766
767    for (auto& t : state.threads) {
768        thread_resume(t);
769    }
770
771    static const int duration = 30;
772    printf("running tests for %i seconds\n", duration);
773    for (int i = 0; i < duration; i++) {
774        thread_sleep_relative(ZX_SEC(1));
775        printf("%d sec elapsed\n", i + 1);
776    }
777    state.shutdown = true;
778    thread_sleep_relative(ZX_SEC(1));
779
780    for (auto& t : state.threads) {
781        printf("joining thread %p\n", t);
782        thread_join(t, nullptr, ZX_TIME_INFINITE);
783    }
784
785    printf("done with affinity test\n");
786}
787
788#define TLS_TEST_TAGV   ((void*)0x666)
789
790static void tls_test_callback(void *tls) {
791    ASSERT(tls == TLS_TEST_TAGV);
792    atomic_add(&atomic_count, 1);
793}
794
795static int tls_test_thread(void* arg) {
796    tls_set(0u, TLS_TEST_TAGV);
797    tls_set_callback(0u, &tls_test_callback);
798    tls_set(1u, TLS_TEST_TAGV);
799    tls_set_callback(1u, &tls_test_callback);
800    return 0;
801}
802
803static void tls_tests() {
804    printf("starting tls tests\n");
805    atomic_count = 0;
806
807    thread_t* t = thread_create("tls-test", tls_test_thread, 0, LOW_PRIORITY);
808    thread_resume(t);
809    thread_sleep_relative(ZX_MSEC(200));
810    thread_join(t, nullptr, ZX_TIME_INFINITE);
811
812    ASSERT(atomic_count == 2);
813    atomic_count = 0;
814
815    printf("done with tls tests\n");
816}
817
818static int prio_test_thread(void* arg) {
819    thread_t* t = get_current_thread();
820    ASSERT(t->base_priority == LOW_PRIORITY);
821
822    auto ev = (event_t*)arg;
823    event_signal(ev, false);
824
825    // Busy loop until our priority changes.
826    volatile int* v_pri = &t->base_priority;
827    int count = 0;
828    for (;;) {
829        if (*v_pri == DEFAULT_PRIORITY) {
830            break;
831        }
832        ++count;
833    }
834
835    event_signal(ev, false);
836
837    // And then when it changes again.
838    for (;;) {
839        if (*v_pri == HIGH_PRIORITY) {
840            break;
841        }
842        ++count;
843    }
844
845    return count;
846}
847
848__NO_INLINE static void priority_test() {
849    printf("starting priority tests\n");
850
851    thread_t* t = get_current_thread();
852    int base_priority = t->base_priority;
853
854    if (base_priority != DEFAULT_PRIORITY) {
855        printf("unexpected intial state, aborting test\n");
856        return;
857    }
858
859    thread_set_priority(t, DEFAULT_PRIORITY + 2);
860    thread_sleep_relative(ZX_MSEC(1));
861    ASSERT(t->base_priority == DEFAULT_PRIORITY + 2);
862
863    thread_set_priority(t, DEFAULT_PRIORITY - 2);
864    thread_sleep_relative(ZX_MSEC(1));
865    ASSERT(t->base_priority == DEFAULT_PRIORITY - 2);
866
867    cpu_mask_t online = mp_get_online_mask();
868    if (!online || ispow2(online)) {
869        printf("skipping rest, not enough online cpus\n");
870        return;
871    }
872
873    event_t ev;
874    event_init(&ev, false, EVENT_FLAG_AUTOUNSIGNAL);
875
876    thread_t* nt = thread_create(
877        "prio-test", prio_test_thread, &ev, LOW_PRIORITY);
878
879    cpu_num_t curr = arch_curr_cpu_num();
880    cpu_num_t other;
881    if (mp_is_cpu_online(curr + 1)) {
882        other = curr + 1;
883    } else if (mp_is_cpu_online(curr -1)) {
884        other = curr - 1;
885    } else {
886        ASSERT(false);
887    }
888
889    thread_set_cpu_affinity(nt, cpu_num_to_mask(other));
890    thread_resume(nt);
891
892    zx_status_t status = event_wait_deadline(&ev, ZX_TIME_INFINITE, true);
893    ASSERT(status == ZX_OK);
894    thread_set_priority(nt, DEFAULT_PRIORITY);
895
896    status = event_wait_deadline(&ev, ZX_TIME_INFINITE, true);
897    ASSERT(status == ZX_OK);
898    thread_set_priority(nt, HIGH_PRIORITY);
899
900    int count = 0;
901    thread_join(nt, &count, ZX_TIME_INFINITE);
902    printf("%d loops\n", count);
903
904    printf("done with priority tests\n");
905}
906
907
908int thread_tests(int, const cmd_args*, uint32_t) {
909    kill_tests();
910
911    mutex_test();
912    event_test();
913    mutex_inherit_test();
914
915    spinlock_test();
916    atomic_test();
917
918    thread_sleep_relative(ZX_MSEC(200));
919    context_switch_test();
920
921    preempt_test();
922
923    join_test();
924
925    affinity_test();
926
927    tls_tests();
928
929    priority_test();
930
931    return 0;
932}
933
934static int spinner_thread(void* arg) {
935    for (;;)
936        ;
937
938    return 0;
939}
940
941int spinner(int argc, const cmd_args* argv, uint32_t) {
942    if (argc < 2) {
943        printf("not enough args\n");
944        printf("usage: %s <priority> <rt>\n", argv[0].str);
945        return -1;
946    }
947
948    thread_t* t = thread_create("spinner", spinner_thread, NULL, (int)argv[1].u);
949    if (!t)
950        return ZX_ERR_NO_MEMORY;
951
952    if (argc >= 3 && !strcmp(argv[2].str, "rt")) {
953        thread_set_real_time(t);
954    }
955    thread_detach_and_resume(t);
956
957    return 0;
958}
959