1/*
2 * Copyright 2013, winocm. <winocm@icloud.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without modification,
6 * are permitted provided that the following conditions are met:
7 *
8 *   Redistributions of source code must retain the above copyright notice, this
9 *   list of conditions and the following disclaimer.
10 *
11 *   Redistributions in binary form must reproduce the above copyright notice, this
12 *   list of conditions and the following disclaimer in the documentation and/or
13 *   other materials provided with the distribution.
14 *
15 *   If you are going to use this software in any form that does not involve
16 *   releasing the source to this project or improving it, let me know beforehand.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
25 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/*
31 * BSD stuff for ARM.
32 */
33
34#include <mach_rt.h>
35#include <mach_debug.h>
36#include <mach_ldebug.h>
37
38#include <mach/kern_return.h>
39#include <mach/mach_traps.h>
40#include <mach/thread_status.h>
41#include <mach/vm_param.h>
42
43#include <kern/counters.h>
44#include <kern/cpu_data.h>
45#include <kern/mach_param.h>
46#include <kern/task.h>
47#include <kern/thread.h>
48#include <kern/sched_prim.h>
49#include <kern/misc_protos.h>
50#include <kern/assert.h>
51#include <kern/debug.h>
52#include <kern/spl.h>
53#include <kern/syscall_sw.h>
54#include <ipc/ipc_port.h>
55#include <vm/vm_kern.h>
56#include <vm/pmap.h>
57
58#define USRSTACK    0x2FE00000
59
60/*
61 * thread_userstack:
62 *
63 * Return the user stack pointer from the machine
64 * dependent thread state info.
65 */
66kern_return_t thread_userstack(thread_t thread, int flavor,
67                               thread_state_t tstate, unsigned int count,
68                               mach_vm_offset_t * user_stack, int *customstack)
69{
70    struct arm_thread_state *state;
71
72    if (!(*user_stack))
73        *user_stack = USRSTACK;
74
75    switch (flavor) {
76    case ARM_THREAD_STATE:
77        if (count < ARM_THREAD_STATE_COUNT)
78            return KERN_INVALID_ARGUMENT;
79
80        state = (struct arm_thread_state *) tstate;
81        *user_stack = state->sp ? state->sp : USRSTACK;
82        break;
83    default:
84        return KERN_INVALID_ARGUMENT;
85    }
86
87    return KERN_SUCCESS;
88}
89
90/**
91 * sanitise_cpsr
92 *
93 * Clear the bad bits off the CPSR and set ModeBits to 0x10 (user)
94 */
95uint32_t sanitise_cpsr(uint32_t cpsr)
96{
97    uint32_t final_cpsr;
98
99    final_cpsr = (cpsr) & ~(1 << 9);    /* Data endianness */
100    final_cpsr &= ~(1 << 7);    /* IRQ */
101    final_cpsr &= ~(1 << 6);    /* FIQ */
102    final_cpsr &= 0xFFFFFFE0;   /* Mode bits */
103    final_cpsr |= 0x10;         /* Set user mode */
104
105    return final_cpsr;
106}
107
108/**
109 * thread_entrypoint
110 *
111 * Set the current thread entry point.
112 */
113kern_return_t thread_entrypoint(thread_t thread, int flavor,
114                                thread_state_t tstate, unsigned int count,
115                                mach_vm_offset_t * entry_point)
116{
117    struct arm_thread_state *state;
118
119    /*
120     * Set a default.
121     */
122    if (*entry_point == 0)
123        *entry_point = VM_MIN_ADDRESS;
124
125    switch (flavor) {
126    case ARM_THREAD_STATE:
127        if (count < ARM_THREAD_STATE_COUNT)
128            return (KERN_INVALID_ARGUMENT);
129        state = (struct arm_thread_state *) tstate;
130        *entry_point = state->pc ? state->pc : VM_MIN_ADDRESS;
131        break;
132    default:
133        return (KERN_INVALID_ARGUMENT);
134    }
135
136    return (KERN_SUCCESS);
137}
138
139/**
140 * thread_userstackdefault
141 *
142 * Set the default user stack.
143 */
144kern_return_t thread_userstackdefault(thread_t thread,
145                                      mach_vm_offset_t * default_user_stack)
146{
147    *default_user_stack = USRSTACK;
148    return (KERN_SUCCESS);
149}
150
151/*
152 *  thread_getstatus:
153 *
154 *  Get the status of the specified thread.
155 */
156kern_return_t machine_thread_get_state(thread_t thr_act, thread_flavor_t flavor,
157                                       thread_state_t tstate,
158                                       mach_msg_type_number_t * count)
159{
160    switch (flavor) {
161    case THREAD_STATE_FLAVOR_LIST:
162        {
163            if (*count < 3)
164                return (KERN_INVALID_ARGUMENT);
165
166            tstate[0] = ARM_THREAD_STATE;
167            tstate[1] = ARM_VFP_STATE;
168            tstate[2] = ARM_EXCEPTION_STATE;
169            *count = 3;
170            break;
171        }
172
173    case THREAD_STATE_FLAVOR_LIST_NEW:
174        {
175            if (*count < 4)
176                return (KERN_INVALID_ARGUMENT);
177
178            tstate[0] = ARM_THREAD_STATE;
179            tstate[1] = ARM_VFP_STATE;
180            tstate[2] = ARM_EXCEPTION_STATE;
181            tstate[3] = ARM_DEBUG_STATE;
182
183            *count = 4;
184            break;
185        }
186
187    case ARM_THREAD_STATE:
188        {
189            struct arm_thread_state *state;
190            struct arm_thread_state *saved_state;
191
192            if (*count < ARM_THREAD_STATE_COUNT)
193                return (KERN_INVALID_ARGUMENT);
194
195            state = (struct arm_thread_state *) tstate;
196            saved_state = (struct arm_thread_state *) thr_act->machine.uss;
197
198            /*
199             * First, copy everything:
200             */
201            ovbcopy((void*)saved_state, (void*)state, sizeof(struct arm_thread_state));
202
203            *count = ARM_THREAD_STATE_COUNT;
204            break;
205        }
206    case ARM_VFP_STATE:
207        {
208            struct arm_vfp_state *state;
209            struct arm_vfp_state *saved_state;
210
211            if (*count < ARM_VFP_STATE_COUNT)
212                return (KERN_INVALID_ARGUMENT);
213
214            state = (struct arm_vfp_state *) tstate;
215            saved_state = (struct arm_vfp_state *) &thr_act->machine.vfp_regs;
216
217            /*
218             * First, copy everything:
219             */
220            ovbcopy((void*)saved_state, (void*)state, sizeof(struct arm_vfp_state)); /* *state = *saved_state; */
221
222            *count = ARM_VFP_STATE_COUNT;
223            break;
224        }
225    case ARM_EXCEPTION_STATE:
226        {
227            struct arm_exception_state *state;
228            struct arm_exception_state *saved_state;
229
230            if (*count < ARM_EXCEPTION_STATE_COUNT)
231                return (KERN_INVALID_ARGUMENT);
232
233            state = (struct arm_exception_state *)tstate;
234            saved_state = (struct arm_exception_state *) &thr_act->machine.es;
235
236            /*
237             * First, copy everything:
238             */
239            ovbcopy((void*)saved_state, (void*)state, sizeof(struct arm_exception_state));
240
241            *count = ARM_EXCEPTION_STATE_COUNT;
242            break;
243        }
244    default:
245        return (KERN_INVALID_ARGUMENT);
246    }
247
248    return (KERN_SUCCESS);
249}
250
251/**
252 * machine_thread_set_state
253 *
254 * Set the current thread state.
255 */
256kern_return_t machine_thread_set_state(thread_t thread, thread_flavor_t flavor,
257                                       thread_state_t tstate,
258                                       mach_msg_type_number_t count)
259{
260
261    switch (flavor) {           /* Validate the count before we do anything else */
262    case ARM_THREAD_STATE:
263        if (count < ARM_THREAD_STATE_COUNT) {   /* Is it too short? */
264            return KERN_INVALID_ARGUMENT;   /* Yeah, just leave... */
265        }
266        break;
267    case ARM_DEBUG_STATE:
268        if (count < ARM_DEBUG_STATE_COUNT) {    /* Is it too short? */
269            return KERN_INVALID_ARGUMENT;   /* Yeah, just leave... */
270        }
271    case ARM_VFP_STATE:
272        if (count < ARM_VFP_STATE_COUNT) {  /* Is it too short? */
273            return KERN_INVALID_ARGUMENT;   /* Yeah, just leave... */
274        }
275        break;
276    case ARM_EXCEPTION_STATE:
277        if (count < ARM_EXCEPTION_STATE_COUNT) {    /* Is it too short? */
278            return KERN_INVALID_ARGUMENT;   /* Yeah, just leave... */
279        }
280        break;
281    default:
282        return KERN_INVALID_ARGUMENT;
283    }
284
285    /*
286     * Now set user registers.
287     */
288    assert(thread != NULL);
289    assert(tstate);
290
291    switch (flavor) {
292    case ARM_THREAD_STATE:
293        {
294        struct arm_thread_state *ts;
295
296        ts = (struct arm_thread_state *) tstate;
297
298        thread->machine.user_regs.r[0] = ts->r[0];
299        thread->machine.user_regs.r[1] = ts->r[1];
300        thread->machine.user_regs.r[2] = ts->r[2];
301        thread->machine.user_regs.r[3] = ts->r[3];
302        thread->machine.user_regs.r[4] = ts->r[4];
303        thread->machine.user_regs.r[5] = ts->r[5];
304        thread->machine.user_regs.r[6] = ts->r[6];
305        thread->machine.user_regs.r[7] = ts->r[7];
306        thread->machine.user_regs.r[8] = ts->r[8];
307        thread->machine.user_regs.r[9] = ts->r[9];
308        thread->machine.user_regs.r[10] = ts->r[10];
309        thread->machine.user_regs.r[11] = ts->r[11];
310        thread->machine.user_regs.r[12] = ts->r[12];
311        thread->machine.user_regs.sp = ts->sp;
312        thread->machine.user_regs.lr = ts->lr;
313        thread->machine.user_regs.pc = ts->pc;
314        thread->machine.user_regs.cpsr = sanitise_cpsr(ts->cpsr);
315
316        return KERN_SUCCESS;
317        }
318    case ARM_VFP_STATE:
319        {
320        int i;
321        struct arm_vfp_state *fs;
322
323        fs = (struct arm_vfp_state *) tstate;
324
325        for (i = 0; i <= 64; i++)
326            thread->machine.vfp_regs.r[i] = fs->r[i];
327        thread->machine.vfp_regs.fpscr = fs->fpscr;
328
329        return KERN_SUCCESS;
330        }
331    case ARM_EXCEPTION_STATE:
332        {
333        struct arm_exception_state *es;
334
335        es = (struct arm_exception_state *) tstate;
336
337        thread->machine.es.fsr = es->fsr;
338        thread->machine.es.far = es->far;
339        thread->machine.es.exception = es->exception;
340
341        return KERN_SUCCESS;
342        }
343    default:
344        return KERN_INVALID_ARGUMENT;
345    }
346
347    return KERN_INVALID_ARGUMENT;
348}
349
350/**
351 * thread_setuserstack
352 *
353 * Set a sepcified user stack for sp.
354 */
355void thread_setuserstack(thread_t thread, mach_vm_address_t user_stack)
356{
357    assert(thread);
358    thread->machine.user_regs.sp = CAST_DOWN(unsigned int, user_stack);
359}
360
361/**
362 * thread_adjuserstack
363 *
364 * Decrement/increment user stack sp by adj amount.
365 */
366uint64_t thread_adjuserstack(thread_t thread, int adj)
367{
368    assert(thread);
369    thread->machine.user_regs.sp += adj;
370    return thread->machine.user_regs.sp;
371}
372
373/**
374 * thread_setentrypoint
375 *
376 * Set the user pc/entrypoint.
377 */
378void thread_setentrypoint(thread_t thread, uint32_t entry)
379{
380    assert(thread);
381    thread->machine.user_regs.pc = entry;
382}
383
384/**
385 * thread_set_parent
386 *
387 * Set the parent owner of the specified thread.
388 */
389void thread_set_parent(thread_t parent, int pid)
390{
391    struct arm_thread_state *iss;
392    iss = parent->machine.uss;
393    assert(iss);
394
395    iss->r[0] = pid;
396    iss->r[1] = 0;
397    return;
398}
399
400struct arm_act_context {
401    arm_saved_state_t ss;
402    arm_vfp_state_t fs;
403};
404
405/**
406 * act_thread_csave
407 *
408 * Save the current thread context, used for the internal uthread structure.
409 */
410void *act_thread_csave(void)
411{
412    kern_return_t kret;
413    mach_msg_type_number_t val;
414    thread_t thr_act = current_thread();
415
416    struct arm_act_context *ic;
417
418    ic = (struct arm_act_context *)kalloc(sizeof(struct arm_act_context));
419
420    if (ic == (struct arm_act_context *)NULL)
421        return((void *)0);
422
423    val = ARM_THREAD_STATE_COUNT;
424    if (machine_thread_get_state(thr_act, ARM_THREAD_STATE, (thread_state_t) &ic->ss, &val) != KERN_SUCCESS) {
425        kfree(ic, sizeof(struct arm_act_context));
426        return((void *)0);
427    }
428
429    val = ARM_VFP_STATE_COUNT;
430    if (machine_thread_get_state(thr_act, ARM_THREAD_STATE, (thread_state_t) &ic->fs, &val) != KERN_SUCCESS) {
431        kfree(ic, sizeof(struct arm_act_context));
432        return((void *)0);
433    }
434
435    return (ic);
436}
437
438/**
439 * act_thread_catt
440 *
441 * Restore the current thread context, used for the internal uthread structure.
442 */
443void act_thread_catt(void *ctx)
444{
445    kern_return_t kret;
446    thread_t thr_act = current_thread();
447
448    struct arm_act_context *ic;
449
450    if (ctx == (void *)NULL)
451        return;
452
453    ic = (struct arm_act_context *)ctx;
454
455    if (machine_thread_set_state(thr_act, ARM_THREAD_STATE, (thread_state_t) &ic->ss, ARM_THREAD_STATE_COUNT) == KERN_SUCCESS)
456        (void)machine_thread_set_state(thr_act, ARM_VFP_STATE, (thread_state_t) &ic->fs, ARM_VFP_STATE_COUNT);
457
458    kfree(ic, sizeof(struct arm_act_context));
459}
460
461
462/**
463 * thread_set_child
464 *
465 * Set the child thread, used in forking.
466 */
467void thread_set_child(thread_t child, int pid)
468{
469    assert(child->machine.uss == &child->machine.user_regs);
470    child->machine.uss->r[0] = pid;
471    child->machine.uss->r[1] = 1;
472    return;
473}
474
475/**
476 * thread_set_wq_state32
477 *
478 * Set the thread state (used for psynch support).
479 */
480void thread_set_wq_state32(thread_t thread, thread_state_t tstate)
481{
482    arm_thread_state_t *state;
483    arm_saved_state_t *saved_state;
484    thread_t curth = current_thread();
485    spl_t s = 0;
486
487    saved_state = thread->machine.uss;
488    assert(thread->machine.uss == &thread->machine.user_regs);
489
490    state = (arm_thread_state_t *) tstate;
491
492    if (curth != thread) {
493        s = splsched();
494        thread_lock(thread);
495    }
496
497    bzero(saved_state, sizeof(arm_thread_state_t));
498    saved_state->r[0] = state->r[0];
499    saved_state->r[1] = state->r[1];
500    saved_state->r[2] = state->r[2];
501    saved_state->r[3] = state->r[3];
502    saved_state->r[4] = state->r[4];
503    saved_state->r[5] = state->r[5];
504
505    saved_state->sp = state->sp;
506    saved_state->lr = state->lr;
507    saved_state->pc = state->pc;
508    saved_state->cpsr = sanitise_cpsr(state->cpsr);
509
510    if (curth != thread) {
511        thread_unlock(thread);
512        splx(s);
513    }
514}
515