1309260Scognet/* 2309260Scognet * Copyright 2013-2015 Olivier Houchard 3309260Scognet * Copyright 2010-2015 Samy Al Bahra. 4309260Scognet * All rights reserved. 5309260Scognet * 6309260Scognet * Redistribution and use in source and binary forms, with or without 7309260Scognet * modification, are permitted provided that the following conditions 8309260Scognet * are met: 9309260Scognet * 1. Redistributions of source code must retain the above copyright 10309260Scognet * notice, this list of conditions and the following disclaimer. 11309260Scognet * 2. Redistributions in binary form must reproduce the above copyright 12309260Scognet * notice, this list of conditions and the following disclaimer in the 13309260Scognet * documentation and/or other materials provided with the distribution. 14309260Scognet * 15309260Scognet * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16309260Scognet * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17309260Scognet * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18309260Scognet * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19309260Scognet * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20309260Scognet * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21309260Scognet * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22309260Scognet * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23309260Scognet * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24309260Scognet * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25309260Scognet * SUCH DAMAGE. 26309260Scognet */ 27309260Scognet 28309260Scognet#ifndef CK_SPINLOCK_HCLH_H 29309260Scognet#define CK_SPINLOCK_HCLH_H 30309260Scognet 31309260Scognet#include <ck_cc.h> 32309260Scognet#include <ck_pr.h> 33309260Scognet#include <ck_stdbool.h> 34309260Scognet#include <ck_stddef.h> 35309260Scognet 36309260Scognet#ifndef CK_F_SPINLOCK_HCLH 37309260Scognet#define CK_F_SPINLOCK_HCLH 38309260Scognetstruct ck_spinlock_hclh { 39309260Scognet unsigned int wait; 40309260Scognet unsigned int splice; 41309260Scognet int cluster_id; 42309260Scognet struct ck_spinlock_hclh *previous; 43309260Scognet}; 44309260Scognettypedef struct ck_spinlock_hclh ck_spinlock_hclh_t; 45309260Scognet 46309260ScognetCK_CC_INLINE static void 47309260Scognetck_spinlock_hclh_init(struct ck_spinlock_hclh **lock, 48309260Scognet struct ck_spinlock_hclh *unowned, 49309260Scognet int cluster_id) 50309260Scognet{ 51309260Scognet 52309260Scognet unowned->previous = NULL; 53309260Scognet unowned->wait = false; 54309260Scognet unowned->splice = false; 55309260Scognet unowned->cluster_id = cluster_id; 56309260Scognet *lock = unowned; 57309260Scognet ck_pr_barrier(); 58309260Scognet return; 59309260Scognet} 60309260Scognet 61309260ScognetCK_CC_INLINE static bool 62309260Scognetck_spinlock_hclh_locked(struct ck_spinlock_hclh **queue) 63309260Scognet{ 64309260Scognet struct ck_spinlock_hclh *head; 65309260Scognet bool r; 66309260Scognet 67309260Scognet head = ck_pr_load_ptr(queue); 68309260Scognet r = ck_pr_load_uint(&head->wait); 69309260Scognet ck_pr_fence_acquire(); 70309260Scognet return r; 71309260Scognet} 72309260Scognet 73309260ScognetCK_CC_INLINE static void 74309260Scognetck_spinlock_hclh_lock(struct ck_spinlock_hclh **glob_queue, 75309260Scognet struct ck_spinlock_hclh **local_queue, 76309260Scognet struct ck_spinlock_hclh *thread) 77309260Scognet{ 78309260Scognet struct ck_spinlock_hclh *previous, *local_tail; 79309260Scognet 80309260Scognet /* Indicate to the next thread on queue that they will have to block. */ 81309260Scognet thread->wait = true; 82309260Scognet thread->splice = false; 83309260Scognet thread->cluster_id = (*local_queue)->cluster_id; 84343494Smarius /* Make sure previous->previous doesn't appear to be NULL */ 85343494Smarius thread->previous = *local_queue; 86309260Scognet 87309260Scognet /* Serialize with respect to update of local queue. */ 88309260Scognet ck_pr_fence_store_atomic(); 89309260Scognet 90309260Scognet /* Mark current request as last request. Save reference to previous request. */ 91309260Scognet previous = ck_pr_fas_ptr(local_queue, thread); 92309260Scognet thread->previous = previous; 93309260Scognet 94309260Scognet /* Wait until previous thread from the local queue is done with lock. */ 95309260Scognet ck_pr_fence_load(); 96343494Smarius if (previous->previous != NULL) { 97343494Smarius while (ck_pr_load_uint(&previous->wait) == true && 98343494Smarius ck_pr_load_int(&previous->cluster_id) == thread->cluster_id && 99343494Smarius ck_pr_load_uint(&previous->splice) == false) 100309260Scognet ck_pr_stall(); 101309260Scognet 102309260Scognet /* We're head of the global queue, we're done */ 103343494Smarius if (ck_pr_load_int(&previous->cluster_id) == thread->cluster_id && 104343494Smarius ck_pr_load_uint(&previous->splice) == false) 105309260Scognet return; 106309260Scognet } 107309260Scognet 108309260Scognet /* Now we need to splice the local queue into the global queue. */ 109309260Scognet local_tail = ck_pr_load_ptr(local_queue); 110309260Scognet previous = ck_pr_fas_ptr(glob_queue, local_tail); 111309260Scognet 112309260Scognet ck_pr_store_uint(&local_tail->splice, true); 113309260Scognet 114309260Scognet /* Wait until previous thread from the global queue is done with lock. */ 115309260Scognet while (ck_pr_load_uint(&previous->wait) == true) 116309260Scognet ck_pr_stall(); 117309260Scognet 118309260Scognet ck_pr_fence_lock(); 119309260Scognet return; 120309260Scognet} 121309260Scognet 122309260ScognetCK_CC_INLINE static void 123309260Scognetck_spinlock_hclh_unlock(struct ck_spinlock_hclh **thread) 124309260Scognet{ 125309260Scognet struct ck_spinlock_hclh *previous; 126309260Scognet 127309260Scognet /* 128309260Scognet * If there are waiters, they are spinning on the current node wait 129309260Scognet * flag. The flag is cleared so that the successor may complete an 130309260Scognet * acquisition. If the caller is pre-empted then the predecessor field 131309260Scognet * may be updated by a successor's lock operation. In order to avoid 132309260Scognet * this, save a copy of the predecessor before setting the flag. 133309260Scognet */ 134309260Scognet previous = thread[0]->previous; 135309260Scognet 136309260Scognet /* We have to pay this cost anyways, use it as a compiler barrier too. */ 137309260Scognet ck_pr_fence_unlock(); 138309260Scognet ck_pr_store_uint(&(*thread)->wait, false); 139309260Scognet 140309260Scognet /* 141309260Scognet * Predecessor is guaranteed not to be spinning on previous request, 142309260Scognet * so update caller to use previous structure. This allows successor 143309260Scognet * all the time in the world to successfully read updated wait flag. 144309260Scognet */ 145309260Scognet *thread = previous; 146309260Scognet return; 147309260Scognet} 148309260Scognet#endif /* CK_F_SPINLOCK_HCLH */ 149309260Scognet#endif /* CK_SPINLOCK_HCLH_H */ 150