1/*
2 * Copyright 2017, Data61
3 * Commonwealth Scientific and Industrial Research Organisation (CSIRO)
4 * ABN 41 687 119 230.
5 *
6 * This software may be distributed and modified according to the terms of
7 * the GNU General Public License version 2. Note that NO WARRANTY is provided.
8 * See "LICENSE_GPLv2.txt" for details.
9 *
10 * @TAG(DATA61_GPL)
11 */
12
13#include <config.h>
14#include <mode/smp/ipi.h>
15#include <smp/ipi.h>
16#include <smp/lock.h>
17
18#ifdef ENABLE_SMP_SUPPORT
19/* This function switches the core it is called on to the idle thread,
20 * in order to avoid IPI storms. If the core is waiting on the lock, the actual
21 * switch will not occur until the core attempts to obtain the lock, at which
22 * point the core will capture the pending IPI, which is discarded.
23
24 * The core who triggered the store is responsible for triggering a reschedule,
25 * or this call will idle forever */
26void ipiStallCoreCallback(bool_t irqPath)
27{
28    if (clh_is_self_in_queue() && !irqPath) {
29        /* The current thread is running as we would replace this thread with an idle thread
30         *
31         * The instruction should be re-executed if we are in kernel to handle syscalls.
32         * Also, thread in 'ThreadState_RunningVM' should remain in same state.
33         * Note that, 'ThreadState_Restart' does not always result in regenerating exception
34         * if we are in kernel to handle them, e.g. hardware single step exception. */
35        if (thread_state_ptr_get_tsType(&NODE_STATE(ksCurThread)->tcbState) == ThreadState_Running) {
36            setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
37        }
38
39        SCHED_ENQUEUE_CURRENT_TCB;
40        switchToIdleThread();
41        NODE_STATE(ksSchedulerAction) = SchedulerAction_ResumeCurrentThread;
42
43        /* Let the cpu requesting this IPI to continue while we waiting on lock */
44        big_kernel_lock.node_owners[getCurrentCPUIndex()].ipi = 0;
45        ipi_wait(totalCoreBarrier);
46
47        /* Continue waiting on lock */
48        while (big_kernel_lock.node_owners[getCurrentCPUIndex()].next->value != CLHState_Granted) {
49            if (clh_is_ipi_pending(getCurrentCPUIndex())) {
50
51                /* Multiple calls for similar reason could result in stack overflow */
52                assert((IpiRemoteCall_t)remoteCall != IpiRemoteCall_Stall);
53                handleIPI(irq_remote_call_ipi, irqPath);
54            }
55            arch_pause();
56        }
57
58        /* make sure no resource access passes from this point */
59        asm volatile("" ::: "memory");
60
61        /* Start idle thread to capture the pending IPI */
62        activateThread();
63        restore_user_context();
64    } else {
65        /* We get here either without grabbing the lock from normal interrupt path or from
66         * inside the lock while waiting to grab the lock for handling pending interrupt.
67         * In latter case, we return to the 'clh_lock_acquire' to grab the lock and
68         * handle the pending interrupt. Its valid as interrups are async events! */
69        SCHED_ENQUEUE_CURRENT_TCB;
70        switchToIdleThread();
71
72        NODE_STATE(ksSchedulerAction) = SchedulerAction_ResumeCurrentThread;
73    }
74}
75
76void handleIPI(irq_t irq, bool_t irqPath)
77{
78    if (irq == irq_remote_call_ipi) {
79        handleRemoteCall(remoteCall, get_ipi_arg(0), get_ipi_arg(1), get_ipi_arg(2), irqPath);
80    } else if (irq == irq_reschedule_ipi) {
81        rescheduleRequired();
82    } else {
83        fail("Invalid IPI");
84    }
85}
86
87void doRemoteMaskOp(IpiRemoteCall_t func, word_t data1, word_t data2, word_t data3, word_t mask)
88{
89    /* make sure the current core is not set in the mask */
90    mask &= ~BIT(getCurrentCPUIndex());
91
92    /* this may happen, e.g. the caller tries to map a pagetable in
93     * newly created PD which has not been run yet. Guard against them! */
94    if (mask != 0) {
95        init_ipi_args(func, data1, data2, data3, mask);
96
97        /* make sure no resource access passes from this point */
98        asm volatile("" ::: "memory");
99        ipi_send_mask(irq_remote_call_ipi, mask, true);
100        ipi_wait(totalCoreBarrier);
101    }
102}
103
104void doMaskReschedule(word_t mask)
105{
106    /* make sure the current core is not set in the mask */
107    mask &= ~BIT(getCurrentCPUIndex());
108    if (mask != 0) {
109        ipi_send_mask(irq_reschedule_ipi, mask, false);
110    }
111}
112
113void generic_ipi_send_mask(irq_t ipi, word_t mask, bool_t isBlocking)
114{
115    word_t nr_target_cores = 0;
116    uint16_t target_cores[CONFIG_MAX_NUM_NODES];
117
118    while (mask) {
119        int index = wordBits - 1 - clzl(mask);
120        if (isBlocking) {
121            big_kernel_lock.node_owners[index].ipi = 1;
122            target_cores[nr_target_cores] = index;
123            nr_target_cores++;
124        } else {
125            ipi_send_target(ipi, cpuIndexToID(index));
126        }
127        mask &= ~BIT(index);
128    }
129
130    if (nr_target_cores > 0) {
131        /* sending IPIs... */
132        IPI_MEM_BARRIER;
133        for (int i = 0; i < nr_target_cores; i++) {
134            ipi_send_target(ipi, cpuIndexToID(target_cores[i]));
135        }
136    }
137}
138#endif /* ENABLE_SMP_SUPPORT */
139