1/*
2 * Copyright 2017, Data61
3 * Commonwealth Scientific and Industrial Research Organisation (CSIRO)
4 * ABN 41 687 119 230.
5 *
6 * This software may be distributed and modified according to the terms of
7 * the GNU General Public License version 2. Note that NO WARRANTY is provided.
8 * See "LICENSE_GPLv2.txt" for details.
9 *
10 * @TAG(DATA61_GPL)
11 */
12
13#include <config.h>
14#include <mode/smp/ipi.h>
15#include <smp/ipi.h>
16#include <smp/lock.h>
17
18#ifdef ENABLE_SMP_SUPPORT
19
20static IpiModeRemoteCall_t remoteCall;   /* the remote call being requested */
21
22static inline void init_ipi_args(IpiModeRemoteCall_t func,
23                                 word_t data1, word_t data2, word_t data3,
24                                 word_t mask)
25{
26    remoteCall = func;
27    ipi_args[0] = data1;
28    ipi_args[1] = data2;
29    ipi_args[2] = data3;
30
31    /* get number of cores involved in this IPI */
32    totalCoreBarrier = popcountl(mask);
33}
34
35static void handleRemoteCall(IpiModeRemoteCall_t call, word_t arg0,
36                             word_t arg1, word_t arg2, bool_t irqPath)
37{
38    /* we gets spurious irq_remote_call_ipi calls, e.g. when handling IPI
39     * in lock while hardware IPI is pending. Guard against spurious IPIs! */
40    if (clh_is_ipi_pending(getCurrentCPUIndex())) {
41        switch ((IpiRemoteCall_t)call) {
42        case IpiRemoteCall_Stall:
43            ipiStallCoreCallback(irqPath);
44            break;
45
46        case IpiRemoteCall_InvalidatePageStructureCacheASID:
47            invalidateLocalPageStructureCacheASID(arg0, arg1);
48            break;
49
50        case IpiRemoteCall_InvalidateTranslationSingle:
51            invalidateLocalTranslationSingle(arg0);
52            break;
53
54        case IpiRemoteCall_InvalidateTranslationSingleASID:
55            invalidateLocalTranslationSingleASID(arg0, arg1);
56            break;
57
58        case IpiRemoteCall_InvalidateTranslationAll:
59            invalidateLocalTranslationAll();
60            break;
61
62        case IpiRemoteCall_switchFpuOwner:
63            switchLocalFpuOwner((user_fpu_state_t *)arg0);
64            break;
65
66#ifdef CONFIG_VTX
67        case IpiRemoteCall_ClearCurrentVCPU:
68            clearCurrentVCPU();
69            break;
70        case IpiRemoteCall_VMCheckBoundNotification:
71            VMCheckBoundNotification((tcb_t*)arg0);
72            break;
73#endif
74        default:
75            Mode_handleRemoteCall(call, arg0, arg1, arg2);
76            break;
77        }
78
79        big_kernel_lock.node_owners[getCurrentCPUIndex()].ipi = 0;
80        ipi_wait(totalCoreBarrier);
81    }
82}
83
84/* make sure all cpu IDs for number of core fit in bitwise word */
85compile_assert(invalid_number_of_supported_nodes, CONFIG_MAX_NUM_NODES <= wordBits);
86
87#ifdef CONFIG_USE_LOGICAL_IDS
88static void x86_ipi_send_mask(interrupt_t ipi, word_t mask, bool_t isBlocking)
89{
90    word_t nr_target_clusters = 0;
91    word_t target_clusters[CONFIG_MAX_NUM_NODES];
92
93    do {
94        int core = wordBits - 1 - clzl(mask);
95        target_clusters[nr_target_clusters] = 0;
96
97        /* get mask of all cores in bitmask which are in same cluster as 'core' */
98        word_t sub_mask = mask & cpu_mapping.other_indexes_in_cluster[core];
99        target_clusters[nr_target_clusters] |= cpu_mapping.index_to_logical_id[core];
100        if (isBlocking) {
101            big_kernel_lock.node_owners[core].ipi = 1;
102        }
103
104        /* check if there is any other core in this cluster */
105        while (sub_mask) {
106            int index = wordBits - 1 - clzl(sub_mask);
107            target_clusters[nr_target_clusters] |= cpu_mapping.index_to_logical_id[index];
108            if (isBlocking) {
109                big_kernel_lock.node_owners[index].ipi = 1;
110            }
111            sub_mask &= ~BIT(index);
112        }
113
114        mask &= ~(cpu_mapping.other_indexes_in_cluster[core] | BIT(core));
115        nr_target_clusters++;
116    } while (mask != 0);
117
118    /* broadcast IPIs to clusters... */
119    IPI_ICR_BARRIER;
120    for (int i = 0; i < nr_target_clusters; i++) {
121        apic_send_ipi_cluster(ipi, target_clusters[i]);
122    }
123}
124#endif /* CONFIG_USE_LOGICAL_IDS */
125
126void ipi_send_mask(irq_t ipi, word_t mask, bool_t isBlocking)
127{
128    interrupt_t interrupt_ipi = ipi + IRQ_INT_OFFSET;
129
130#ifdef CONFIG_USE_LOGICAL_IDS
131    x86_ipi_send_mask(interrupt_ipi, mask, isBlocking);
132#else
133    generic_ipi_send_mask(interrupt_ipi, mask, isBlocking);
134#endif /* CONFIG_USE_LOGICAL_IDS */
135}
136#endif /* ENABLE_SMP_SUPPORT */
137