1/*
2 * Copyright (c) 2013, 2014, University of Washington.
3 * All rights reserved.
4 *
5 * This file is distributed under the terms in the attached LICENSE file.
6 * If you do not find this file, copies can be found by writing to:
7 * ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
8 */
9
10#include <pthread.h>
11#include <pthread_np.h>
12#include <assert.h>
13#include <barrelfish/barrelfish.h>
14#include <errno.h>
15#include <string.h>
16#include <signal.h>
17
18#include <posixcompat.h> // for pthread_placement stuff
19
20#include "posixcompat.h"
21#include "pthreads_private.h"
22
23typedef void (*destructor_fn_t)(void *);
24typedef void *(*start_fn_t)(void *);
25
26struct   pthread_mutex_attr
27{
28  int pshared;
29  int kind;
30  int robustness;
31};
32
33
34struct pthread_mutex {
35    struct thread_mutex mutex;
36    int locked;
37    struct pthread_mutex_attr attrs;
38};
39
40
41
42
43struct pthread_cond {
44    struct thread_cond cond;
45};
46
47#define PTHREADS_RWLOCK_MAGIC 0xdeadbeef
48
49struct pthread_rwlock
50{
51  pthread_mutex_t mtxExclusiveAccess;
52  pthread_mutex_t mtxSharedAccessCompleted;
53  pthread_cond_t cndSharedAccessCompleted;
54  int nSharedAccessCount;
55  int nExclusiveAccessCount;
56  int nCompletedSharedAccessCount;
57  int nMagic;
58};
59
60
61struct pthread {
62    struct thread *thread;
63    int core; //< for spanned domains core on which thread is running
64    const void *keys[PTHREAD_KEYS_MAX];
65    start_fn_t start_fn;
66    void *arg;
67    void *retval;
68};
69
70static pthread_key_t key_index = 0;
71static struct thread_mutex key_mutex = THREAD_MUTEX_INITIALIZER;
72static destructor_fn_t destructors[PTHREAD_KEYS_MAX];
73
74static int start_pthread(void *arg)
75{
76    struct pthread *myself = arg;
77
78    // Initialize TLS
79    thread_set_tls_key(0, myself);
80
81    // Run the thread
82    myself->retval = myself->start_fn(myself->arg);
83
84    // Call all key destructors
85    for(pthread_key_t i = 0; i < key_index; i++) {
86        if ((destructors[i] != NULL) && (myself->keys[i] != NULL)) {
87            void *value = (void *) myself->keys[i];
88            myself->keys[i] = NULL;
89            destructors[i](value);
90        }
91    }
92
93    // 'myself' data structure is freed when joined with this thread
94    return 0;
95}
96
97/*
98 * Optional pthread placement policy for spanned domains
99 */
100static pthread_placement_fn pthread_placement = NULL;
101errval_t posixcompat_pthread_set_placement_fn(pthread_placement_fn fn)
102{
103    pthread_placement = fn;
104    return SYS_ERR_OK;
105}
106
107int pthread_create(pthread_t *pthread, const pthread_attr_t *attr,
108                   void *(*start_routine) (void *), void *arg)
109{
110    size_t stacksize = THREADS_DEFAULT_STACK_BYTES;
111
112    if(attr != NULL) {
113        stacksize = (*attr)->stacksize;
114    }
115
116    *pthread = malloc(sizeof(struct pthread));
117    assert(*pthread != NULL);
118    memset(*pthread, 0, sizeof(struct pthread));
119
120    // XXX: attributes are ignored.
121    (*pthread)->start_fn = start_routine;
122    (*pthread)->arg = arg;
123
124    // Start the thread
125    (*pthread)->core = disp_get_core_id();
126    if (attr && (*attr)->affinity_set) {
127        // Simple allocation policy: Pick the first core that is in the set
128        for (size_t i = 0; i < MAX_COREID; i++) {
129            if (CPU_ISSET(i, &(*attr)->affinity)) {
130                POSIXCOMPAT_DEBUG("pthread affinity: spawn new thread on core %zu\n", i);
131                (*pthread)->core = i;
132                break;
133            }
134        }
135    }
136    else if (pthread_placement) {
137        (*pthread)->core = pthread_placement(PTHREAD_ACTION_CREATE, 0);
138    }
139
140    struct thread *nt;
141    errval_t err = domain_thread_create_on_varstack(
142                     (*pthread)->core, start_pthread, *pthread, stacksize, &nt);
143    if (err_is_fail(err)) {
144        DEBUG_ERR(err, "pthread_create");
145        return 1;
146    }
147
148    (*pthread)->thread = nt;
149    POSIXCOMPAT_DEBUG("%s: %p -> %"PRIuPTR"\n", __FUNCTION__, *pthread,
150            thread_get_id((*pthread)->thread));
151    return 0;
152}
153
154pthread_t pthread_self(void)
155{
156    pthread_t self = thread_get_tls_key(0);
157
158    // If NULL, we're the first thread, not created via pthreads.
159    // Create a pthread structure.
160    if(self == NULL) {
161        struct pthread *myself = malloc(sizeof(struct pthread));
162        assert(myself != NULL);
163        memset(myself, 0, sizeof(struct pthread));
164        myself->thread = thread_self();
165        thread_set_tls_key(0, myself);
166        self = myself;
167    }
168
169    return self;
170}
171
172void *pthread_getspecific(pthread_key_t key)
173{
174    if(key >= PTHREAD_KEYS_MAX) {
175        return NULL;
176    }
177
178    return (void *)pthread_self()->keys[key];
179}
180
181int pthread_setspecific(pthread_key_t key, const void *val)
182{
183    if(key >= PTHREAD_KEYS_MAX) {
184        return EINVAL;
185    }
186
187    pthread_self()->keys[key] = val;
188    return 0;
189}
190
191int pthread_attr_init(pthread_attr_t *attr)
192{
193    *attr = malloc(sizeof(struct pthread_attr));
194    (*attr)->stacksize = THREADS_DEFAULT_STACK_BYTES;
195    CPU_ZERO(&(*attr)->affinity);
196    (*attr)->affinity_set = 0;
197    return 0;
198}
199
200int pthread_attr_destroy(pthread_attr_t *attr)
201{
202    if (!attr) {
203        return EINVAL;
204    }
205
206    if (*attr == NULL) {
207        return EINVAL;
208    }
209    free(*attr);
210
211    (*attr) = NULL;
212
213    return 0;
214}
215
216static struct thread_mutex mutex_mutex = THREAD_MUTEX_INITIALIZER;
217
218int pthread_mutex_init(pthread_mutex_t *mutex,
219                       const pthread_mutexattr_t *attr)
220{
221    // XXX: Attributes ignored.
222    *mutex = malloc(sizeof(struct pthread_mutex));
223    if(*mutex == NULL) {
224        return -1;
225    }
226
227    thread_mutex_init(&(*mutex)->mutex);
228    (*mutex)->locked = 0;
229    if (attr && *attr) {
230        POSIXCOMPAT_DEBUG("kind = %u\n", (*attr)->kind);
231        memcpy(&(*mutex)->attrs, *attr, sizeof(struct pthread_mutex_attr));
232    } else {
233        (*mutex)->attrs.kind = PTHREAD_MUTEX_NORMAL;
234        (*mutex)->attrs.robustness = 0;
235        (*mutex)->attrs.pshared = PTHREAD_PROCESS_PRIVATE;
236    }
237    return  0;
238}
239
240int pthread_mutex_destroy(pthread_mutex_t *mutex)
241{
242    if(*mutex != PTHREAD_MUTEX_INITIALIZER) {
243        free(*mutex);
244    }
245
246    return 0;
247}
248
249int pthread_mutex_lock(pthread_mutex_t *mutex)
250{
251    thread_mutex_lock(&mutex_mutex);
252
253    if(*mutex == PTHREAD_MUTEX_INITIALIZER) {
254        pthread_mutex_init(mutex, NULL);
255    }
256
257    (*mutex)->locked++;
258    thread_mutex_unlock(&mutex_mutex);
259    if ((*mutex)->attrs.kind == PTHREAD_MUTEX_RECURSIVE) {
260        thread_mutex_lock_nested(&(*mutex)->mutex);
261    } else {
262        thread_mutex_lock(&(*mutex)->mutex);
263    }
264    return 0;
265}
266
267int pthread_mutex_unlock(pthread_mutex_t *mutex)
268{
269    thread_mutex_lock(&mutex_mutex);
270
271    if(*mutex == PTHREAD_MUTEX_INITIALIZER) {
272        pthread_mutex_init(mutex, NULL);
273    }
274
275    if((*mutex)->locked == 0) {
276        thread_mutex_unlock(&mutex_mutex);
277        return 0;
278    }
279
280    (*mutex)->locked--;
281    thread_mutex_unlock(&mutex_mutex);
282    thread_mutex_unlock(&(*mutex)->mutex);
283    return 0;
284}
285
286int pthread_mutex_trylock(pthread_mutex_t *mutex)
287{
288    thread_mutex_lock(&mutex_mutex);
289
290    if(*mutex == PTHREAD_MUTEX_INITIALIZER) {
291        pthread_mutex_init(mutex, NULL);
292    }
293
294    thread_mutex_unlock(&mutex_mutex);
295
296    int retval = (thread_mutex_trylock(&(*mutex)->mutex) ? 0 : EBUSY);
297
298    if(retval != EBUSY) {
299        thread_mutex_lock(&mutex_mutex);
300        (*mutex)->locked++;
301        thread_mutex_unlock(&mutex_mutex);
302    }
303
304    return retval;
305}
306
307int pthread_cond_init(pthread_cond_t *cond,
308			const pthread_condattr_t *attr)
309{
310    *cond = malloc(sizeof(struct pthread_cond));
311    if(*cond == NULL) {
312        return -1;
313    }
314
315    thread_cond_init(&(*cond)->cond);
316    return 0;
317}
318
319int pthread_cond_signal(pthread_cond_t *cond)
320{
321    thread_mutex_lock(&mutex_mutex);
322    if(*cond == PTHREAD_COND_INITIALIZER) {
323        pthread_cond_init(cond, NULL);
324    }
325    thread_mutex_unlock(&mutex_mutex);
326
327    thread_cond_signal(&(*cond)->cond);
328    return 0;
329}
330
331int pthread_cond_timedwait(pthread_cond_t *cond,
332                           pthread_mutex_t *mutex,
333                           const struct timespec *timeout)
334{
335    thread_mutex_lock(&mutex_mutex);
336    if(*cond == PTHREAD_COND_INITIALIZER) {
337        pthread_cond_init(cond, NULL);
338    }
339    if(*mutex == PTHREAD_MUTEX_INITIALIZER) {
340        pthread_mutex_init(mutex, NULL);
341    }
342    thread_mutex_unlock(&mutex_mutex);
343
344    assert(!"NYI");
345    return -1;
346}
347
348int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
349{
350    thread_mutex_lock(&mutex_mutex);
351    if(*cond == PTHREAD_COND_INITIALIZER) {
352        pthread_cond_init(cond, NULL);
353    }
354    if(*mutex == PTHREAD_MUTEX_INITIALIZER) {
355        pthread_mutex_init(mutex, NULL);
356    }
357    thread_mutex_unlock(&mutex_mutex);
358
359    thread_cond_wait(&(*cond)->cond, &(*mutex)->mutex);
360    return 0;
361}
362
363int pthread_cond_broadcast(pthread_cond_t *cond)
364{
365    thread_mutex_lock(&mutex_mutex);
366
367    if(*cond == PTHREAD_COND_INITIALIZER) {
368         pthread_cond_init(cond, NULL);
369    }
370    thread_mutex_unlock(&mutex_mutex);
371
372    thread_cond_broadcast(&(*cond)->cond);
373
374    return 0;
375}
376
377int pthread_cond_destroy(pthread_cond_t *cond)
378{
379    if (cond != NULL) {
380        free(*cond);
381    }
382    return 0;
383}
384
385int pthread_join(pthread_t thread, void **retval)
386{
387    POSIXCOMPAT_DEBUG("%s: %p\n", __FUNCTION__, thread);
388    errval_t err = domain_thread_join(thread->thread, NULL);
389    assert(err_is_ok(err));
390
391    if (pthread_placement) {
392        pthread_placement(PTHREAD_ACTION_DESTROY, thread->core);
393    }
394
395    if (retval != NULL) {
396        *retval = thread->retval;
397    }
398    free(thread);
399    return 0;
400}
401
402int pthread_key_create(pthread_key_t *key,
403                       void (*callback) (void *))
404{
405    int retval = 0;
406
407    thread_mutex_lock(&key_mutex);
408
409    if(key_index == PTHREAD_KEYS_MAX) {
410        retval = EAGAIN;
411        goto out;
412    }
413
414    *key = key_index;
415    destructors[key_index] = callback;
416    key_index++;
417
418 out:
419    thread_mutex_unlock(&key_mutex);
420    return retval;
421}
422
423int pthread_key_delete(pthread_key_t key)
424{
425    thread_mutex_lock(&key_mutex);
426
427    int result = EINVAL;
428    if ((key < PTHREAD_KEYS_MAX) && (destructors[key] != NULL)) {
429        destructors[key] = NULL;
430        result = 0;
431    }
432
433    thread_mutex_unlock(&key_mutex);
434    return result;
435}
436
437int pthread_mutexattr_init(pthread_mutexattr_t *attr)
438{
439    int result = 0;
440    pthread_mutexattr_t ma;
441
442    ma = (pthread_mutexattr_t) calloc (1, sizeof (*ma));
443
444    if (ma == NULL) {
445        result = ENOMEM;
446    }
447    else {
448        ma->pshared = PTHREAD_PROCESS_PRIVATE;
449        ma->kind = PTHREAD_MUTEX_DEFAULT;
450    }
451
452    *attr = ma;
453
454    return result;
455}
456
457int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
458{
459    int result = 0;
460
461    if (attr == NULL || *attr == NULL) {
462        result = EINVAL;
463    } else {
464        pthread_mutexattr_t ma = *attr;
465
466        *attr = NULL;
467        free (ma);
468    }
469
470    return result;
471}
472
473int pthread_mutexattr_getpshared(const pthread_mutexattr_t *attr,
474                                 int *pshared)
475{
476    int result;
477
478    if ((attr != NULL && *attr != NULL) && (pshared != NULL)) {
479        *pshared = (*attr)->pshared;
480        result = 0;
481    } else {
482        result = EINVAL;
483    }
484
485    return result;
486}
487
488int pthread_mutexattr_gettype(pthread_mutexattr_t *attr, int *type)
489{
490    int result = 0;
491
492    if (attr != NULL && *attr != NULL && type != NULL) {
493        *type = (*attr)->kind;
494    } else {
495        result = EINVAL;
496    }
497
498    return result;
499}
500
501int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
502{
503    int result = 0;
504
505    if ((attr != NULL && *attr != NULL)) {
506        switch (type) {
507            case PTHREAD_MUTEX_NORMAL:
508            case PTHREAD_MUTEX_RECURSIVE:
509            case PTHREAD_MUTEX_ERRORCHECK:
510                (*attr)->kind = type;
511                break;
512            default:
513                result = EINVAL;
514                break;
515        }
516    } else {
517        result = EINVAL;
518    }
519
520    return result;
521}
522
523int pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)
524{
525    int result;
526
527    if ((attr != NULL && *attr != NULL)
528         && ((pshared == PTHREAD_PROCESS_SHARED)
529                  || (pshared == PTHREAD_PROCESS_PRIVATE))) {
530
531        if (pshared == PTHREAD_PROCESS_SHARED) {
532#if !defined( _POSIX_THREAD_PROCESS_SHARED )
533            result = ENOSYS;
534            pshared = PTHREAD_PROCESS_PRIVATE;
535#else
536            result = 0;
537#endif /* _POSIX_THREAD_PROCESS_SHARED */
538        } else {
539            result = 0;
540        }
541
542        (*attr)->pshared = pshared;
543    } else {
544        result = EINVAL;
545    }
546
547    return (result);
548}
549
550int pthread_barrier_init(pthread_barrier_t *barrier,
551			const pthread_barrierattr_t *attr,
552			unsigned max_count)
553{
554	barrier->count = 0;
555	barrier->max_count = max_count;
556
557	thread_sem_init(&barrier->mutex, 1);
558	thread_sem_init(&barrier->barrier, 0);
559	thread_sem_init(&barrier->reset, 1);
560
561	return 0;
562}
563
564int pthread_barrier_wait(pthread_barrier_t *barrier)
565{
566	// waiting at the barrier
567	thread_sem_wait(&barrier->mutex);
568	barrier->count++;
569	if (barrier->count == barrier->max_count) {
570		thread_sem_wait(&barrier->reset);
571		thread_sem_post(&barrier->barrier);
572	}
573	thread_sem_post(&barrier->mutex);
574
575	thread_sem_wait(&barrier->barrier);
576	thread_sem_post(&barrier->barrier);
577
578	// reseting the barrier to be reused further
579	thread_sem_wait(&barrier->mutex);
580	barrier->count--;
581	if (barrier->count == 0) {
582		thread_sem_wait(&barrier->barrier);
583		thread_sem_post(&barrier->reset);
584	}
585	thread_sem_post(&barrier->mutex);
586
587	thread_sem_wait(&barrier->reset);
588	thread_sem_post(&barrier->reset);
589
590	return 0;
591}
592
593
594int pthread_barrier_destroy(pthread_barrier_t *barrier)
595{
596	// no dynamically allocated objects to be freed
597	return 0;
598}
599
600int pthread_equal(pthread_t pt1, pthread_t pt2)
601{
602    if (pt1 == NULL && pt2 == NULL) {
603        return 1;
604    }
605    return pt1->thread == pt2->thread;
606}
607
608int pthread_rwlock_init(pthread_rwlock_t *rwlock,
609            const pthread_rwlockattr_t *attr)
610{
611    pthread_rwlock_t rwl;
612
613    rwl = calloc(1, sizeof(struct pthread_rwlock));
614    if (rwl == NULL) {
615        return ENOMEM;
616    }
617
618    rwl->nMagic = PTHREADS_RWLOCK_MAGIC;
619    rwl->mtxExclusiveAccess = PTHREAD_MUTEX_INITIALIZER;
620    rwl->mtxSharedAccessCompleted = PTHREAD_MUTEX_INITIALIZER;
621
622    pthread_cond_init (&rwl->cndSharedAccessCompleted, NULL);
623    *rwlock = rwl;
624
625    return 0;
626}
627
628int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
629{
630    int result, result1;
631    pthread_rwlock_t rwl;
632
633    if (rwlock == NULL || *rwlock == NULL) {
634        return (EINVAL);
635    }
636
637    if (*rwlock == PTHREAD_RWLOCK_INITIALIZER) {
638        result = pthread_rwlock_init(rwlock, NULL);
639        if (result) {
640            return result;
641        }
642    }
643
644    rwl = *rwlock;
645
646    if (rwl->nMagic != PTHREADS_RWLOCK_MAGIC) {
647        return EINVAL;
648    }
649
650    if (rwl->nExclusiveAccessCount == 0) {
651        if ((result = pthread_mutex_lock (&(rwl->mtxSharedAccessCompleted))) != 0) {
652            return result;
653        }
654
655        if (++rwl->nCompletedSharedAccessCount == 0) {
656            result = pthread_cond_signal (&(rwl->cndSharedAccessCompleted));
657        }
658
659        result1 = pthread_mutex_unlock (&(rwl->mtxSharedAccessCompleted));
660    } else {
661        rwl->nExclusiveAccessCount--;
662        result = pthread_mutex_unlock (&(rwl->mtxSharedAccessCompleted));
663        result1 = pthread_mutex_unlock (&(rwl->mtxExclusiveAccess));
664    }
665
666    return ((result != 0) ? result : result1);
667}
668
669int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
670{
671    int result;
672    pthread_rwlock_t rwl;
673
674    if (rwlock == NULL || *rwlock == NULL) {
675        return (EINVAL);
676    }
677
678    if (*rwlock == PTHREAD_RWLOCK_INITIALIZER) {
679        result = pthread_rwlock_init(rwlock, NULL);
680        if (result) {
681            return result;
682        }
683    }
684
685    rwl = *rwlock;
686
687    if (rwl->nMagic != PTHREADS_RWLOCK_MAGIC) {
688        return EINVAL;
689    }
690
691    if ((result = pthread_mutex_lock (&(rwl->mtxExclusiveAccess))) != 0) {
692        return result;
693    }
694
695    if ((result = pthread_mutex_lock (&(rwl->mtxSharedAccessCompleted))) != 0) {
696        (void) pthread_mutex_unlock (&(rwl->mtxExclusiveAccess));
697        return result;
698    }
699
700    if (rwl->nExclusiveAccessCount == 0) {
701        if (rwl->nCompletedSharedAccessCount > 0) {
702            rwl->nSharedAccessCount -= rwl->nCompletedSharedAccessCount;
703            rwl->nCompletedSharedAccessCount = 0;
704        }
705
706        if (rwl->nSharedAccessCount > 0) {
707            rwl->nCompletedSharedAccessCount = -rwl->nSharedAccessCount;
708
709            /*
710             * This routine may be a cancelation point
711             * according to POSIX 1003.1j section 18.1.2.
712             */
713           // pthread_cleanup_push (ptw32_rwlock_cancelwrwait, (void *) rwl);
714
715            do {
716                result = pthread_cond_wait (&(rwl->cndSharedAccessCompleted),
717                                            &(rwl->mtxSharedAccessCompleted));
718            } while (result == 0 && rwl->nCompletedSharedAccessCount < 0);
719
720            //pthread_cleanup_pop ((result != 0) ? 1 : 0);
721
722            if (result == 0) {
723                rwl->nSharedAccessCount = 0;
724            }
725        }
726    }
727
728    if (result == 0) {
729        rwl->nExclusiveAccessCount++;
730    }
731
732    return result;
733}
734
735int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
736{
737  int result;
738  pthread_rwlock_t rwl;
739
740  if (rwlock == NULL || *rwlock == NULL) {
741      return EINVAL;
742  }
743
744  /*
745   * We do a quick check to see if we need to do more work
746   * to initialise a static rwlock. We check
747   * again inside the guarded section of ptw32_rwlock_check_need_init()
748   * to avoid race conditions.
749   */
750   if (*rwlock == PTHREAD_RWLOCK_INITIALIZER) {
751        result = pthread_rwlock_init(rwlock, NULL);
752        if (result) {
753            return result;
754        }
755    }
756
757  rwl = *rwlock;
758
759  if (rwl->nMagic != PTHREADS_RWLOCK_MAGIC) {
760      return EINVAL;
761  }
762
763  if ((result = pthread_mutex_lock (&(rwl->mtxExclusiveAccess))) != 0) {
764      return result;
765  }
766
767  if (++rwl->nSharedAccessCount == 0xFFFFFFFF) {
768      if ((result = pthread_mutex_lock (&(rwl->mtxSharedAccessCompleted))) != 0) {
769          (void) pthread_mutex_unlock (&(rwl->mtxExclusiveAccess));
770          return result;
771      }
772
773      rwl->nSharedAccessCount -= rwl->nCompletedSharedAccessCount;
774      rwl->nCompletedSharedAccessCount = 0;
775
776      if ((result = pthread_mutex_unlock (&(rwl->mtxSharedAccessCompleted))) != 0) {
777          (void) pthread_mutex_unlock (&(rwl->mtxExclusiveAccess));
778          return result;
779      }
780  }
781
782  return (pthread_mutex_unlock (&(rwl->mtxExclusiveAccess)));
783}
784
785
786int pthread_once(pthread_once_t *ctrl, void (*init) (void))
787{
788    if (ctrl == NULL || init == NULL) {
789        return EINVAL;
790    }
791    thread_once(&ctrl->state, init);
792    return 0;
793}
794
795/**
796 * The function sets the CPU affinity mask of the thread
797 * to the CPU set pointed to by cpuset.
798 * If the call is successful, and the thread is not currently
799 * running on one of the CPUs in cpuset, then it is migrated to one of
800 * those CPUs.
801 *
802 * \retval EFAULT A supplied memory address was invalid.
803 * \retval EINVAL The affinity bit mask mask contains no processors that are
804 * currently physically on the system (TODO).
805 *
806 **/
807int pthread_attr_setaffinity_np(pthread_attr_t *attr,
808                   size_t cpusetsize, const cpuset_t *cpuset)
809{
810    if (attr == NULL || cpuset == NULL) {
811        return EFAULT;
812    }
813
814    // TODO: Query octopus to check that the affinity mask is sane (EINVAL)!
815    memcpy(&(*attr)->affinity, cpuset, cpusetsize);
816    (*attr)->affinity_set = true;
817    return 0;
818}
819
820int pthread_setcancelstate(int state, int *oldstate)
821{
822    // XXX: Not supported
823    if(oldstate != NULL) {
824        *oldstate = PTHREAD_CANCEL_ENABLE;
825    }
826    return 0;
827}
828
829int pthread_setcanceltype(int type, int *oldtype)
830{
831    // XXX: Not supported
832    if(oldtype != NULL) {
833        *oldtype = PTHREAD_CANCEL_DEFERRED;
834    }
835    return 0;
836}
837
838int pthread_sigmask(int how, const sigset_t *set, sigset_t *oldset)
839{
840    return sigprocmask(how, set, oldset);
841}
842
843int pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize)
844{
845    *stacksize = (*attr)->stacksize;
846    return 0;
847}
848
849int pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
850{
851    (*attr)->stacksize = stacksize;
852    return 0;
853}
854
855int pthread_cancel(pthread_t thread)
856{
857    assert(!"NYI");
858    return -1;
859}
860