1210284Sjmallett/***********************license start***************
2232812Sjmallett * Copyright (c) 2003-2010  Cavium Inc. (support@cavium.com). All rights
3215990Sjmallett * reserved.
4210284Sjmallett *
5210284Sjmallett *
6215990Sjmallett * Redistribution and use in source and binary forms, with or without
7215990Sjmallett * modification, are permitted provided that the following conditions are
8215990Sjmallett * met:
9210284Sjmallett *
10215990Sjmallett *   * Redistributions of source code must retain the above copyright
11215990Sjmallett *     notice, this list of conditions and the following disclaimer.
12210284Sjmallett *
13215990Sjmallett *   * Redistributions in binary form must reproduce the above
14215990Sjmallett *     copyright notice, this list of conditions and the following
15215990Sjmallett *     disclaimer in the documentation and/or other materials provided
16215990Sjmallett *     with the distribution.
17215990Sjmallett
18232812Sjmallett *   * Neither the name of Cavium Inc. nor the names of
19215990Sjmallett *     its contributors may be used to endorse or promote products
20215990Sjmallett *     derived from this software without specific prior written
21215990Sjmallett *     permission.
22215990Sjmallett
23215990Sjmallett * This Software, including technical data, may be subject to U.S. export  control
24215990Sjmallett * laws, including the U.S. Export Administration Act and its  associated
25215990Sjmallett * regulations, and may be subject to export or import  regulations in other
26215990Sjmallett * countries.
27215990Sjmallett
28215990Sjmallett * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29232812Sjmallett * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30215990Sjmallett * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31215990Sjmallett * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32215990Sjmallett * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33215990Sjmallett * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34215990Sjmallett * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35215990Sjmallett * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36215990Sjmallett * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
37215990Sjmallett * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38210284Sjmallett ***********************license end**************************************/
39210284Sjmallett
40210284Sjmallett
41210284Sjmallett
42210284Sjmallett
43210284Sjmallett
44210284Sjmallett
45215990Sjmallett
46210284Sjmallett/**
47210284Sjmallett * @file
48210284Sjmallett *
49210284Sjmallett * Implementation of spinlocks.
50210284Sjmallett *
51232812Sjmallett * <hr>$Revision: 70030 $<hr>
52210284Sjmallett */
53210284Sjmallett
54210284Sjmallett
55210284Sjmallett#ifndef __CVMX_SPINLOCK_H__
56210284Sjmallett#define __CVMX_SPINLOCK_H__
57210284Sjmallett
58210284Sjmallett#include "cvmx-asm.h"
59210284Sjmallett
60210284Sjmallett#ifdef	__cplusplus
61210284Sjmallettextern "C" {
62210284Sjmallett#endif
63210284Sjmallett
64210284Sjmallett/* Spinlocks for Octeon */
65210284Sjmallett
66210284Sjmallett
67210284Sjmallett// define these to enable recursive spinlock debugging
68210284Sjmallett//#define CVMX_SPINLOCK_DEBUG
69210284Sjmallett
70210284Sjmallett
71210284Sjmallett/**
72210284Sjmallett * Spinlocks for Octeon
73210284Sjmallett */
74210284Sjmalletttypedef struct {
75210284Sjmallett    volatile uint32_t value;
76210284Sjmallett} cvmx_spinlock_t;
77210284Sjmallett
78210284Sjmallett// note - macros not expanded in inline ASM, so values hardcoded
79210284Sjmallett#define  CVMX_SPINLOCK_UNLOCKED_VAL  0
80210284Sjmallett#define  CVMX_SPINLOCK_LOCKED_VAL    1
81210284Sjmallett
82210284Sjmallett
83210284Sjmallett#define CVMX_SPINLOCK_UNLOCKED_INITIALIZER  {CVMX_SPINLOCK_UNLOCKED_VAL}
84210284Sjmallett
85210284Sjmallett
86210284Sjmallett/**
87210284Sjmallett * Initialize a spinlock
88210284Sjmallett *
89210284Sjmallett * @param lock   Lock to initialize
90210284Sjmallett */
91210284Sjmallettstatic inline void cvmx_spinlock_init(cvmx_spinlock_t *lock)
92210284Sjmallett{
93210284Sjmallett    lock->value = CVMX_SPINLOCK_UNLOCKED_VAL;
94210284Sjmallett}
95210284Sjmallett
96210284Sjmallett
97210284Sjmallett/**
98210284Sjmallett * Return non-zero if the spinlock is currently locked
99210284Sjmallett *
100210284Sjmallett * @param lock   Lock to check
101210284Sjmallett * @return Non-zero if locked
102210284Sjmallett */
103210284Sjmallettstatic inline int cvmx_spinlock_locked(cvmx_spinlock_t *lock)
104210284Sjmallett{
105210284Sjmallett    return (lock->value != CVMX_SPINLOCK_UNLOCKED_VAL);
106210284Sjmallett}
107210284Sjmallett
108210284Sjmallett
109210284Sjmallett/**
110210284Sjmallett * Releases lock
111210284Sjmallett *
112210284Sjmallett * @param lock   pointer to lock structure
113210284Sjmallett */
114210284Sjmallettstatic inline void cvmx_spinlock_unlock(cvmx_spinlock_t *lock)
115210284Sjmallett{
116210284Sjmallett    CVMX_SYNCWS;
117210284Sjmallett    lock->value = 0;
118210284Sjmallett    CVMX_SYNCWS;
119210284Sjmallett}
120210284Sjmallett
121210284Sjmallett
122210284Sjmallett/**
123210284Sjmallett * Attempts to take the lock, but does not spin if lock is not available.
124210284Sjmallett * May take some time to acquire the lock even if it is available
125210284Sjmallett * due to the ll/sc not succeeding.
126210284Sjmallett *
127210284Sjmallett * @param lock   pointer to lock structure
128210284Sjmallett *
129210284Sjmallett * @return 0: lock successfully taken
130210284Sjmallett *         1: lock not taken, held by someone else
131210284Sjmallett * These return values match the Linux semantics.
132210284Sjmallett */
133210284Sjmallett
134210284Sjmallettstatic inline unsigned int cvmx_spinlock_trylock(cvmx_spinlock_t *lock)
135210284Sjmallett{
136210284Sjmallett    unsigned int tmp;
137210284Sjmallett
138210284Sjmallett    __asm__ __volatile__(
139210284Sjmallett    ".set noreorder         \n"
140210284Sjmallett    "1: ll   %[tmp], %[val] \n"
141210284Sjmallett    "   bnez %[tmp], 2f     \n"  // if lock held, fail immediately
142210284Sjmallett    "   li   %[tmp], 1      \n"
143210284Sjmallett    "   sc   %[tmp], %[val] \n"
144210284Sjmallett    "   beqz %[tmp], 1b     \n"
145210284Sjmallett    "   li   %[tmp], 0      \n"
146210284Sjmallett    "2:                     \n"
147210284Sjmallett    ".set reorder           \n"
148210284Sjmallett    : [val] "+m" (lock->value), [tmp] "=&r" (tmp)
149210284Sjmallett    :
150210284Sjmallett    : "memory");
151210284Sjmallett
152210284Sjmallett    return (!!tmp);  /* normalize to 0 or 1 */
153210284Sjmallett}
154210284Sjmallett
155210284Sjmallett/**
156210284Sjmallett * Gets lock, spins until lock is taken
157210284Sjmallett *
158210284Sjmallett * @param lock   pointer to lock structure
159210284Sjmallett */
160210284Sjmallettstatic inline void cvmx_spinlock_lock(cvmx_spinlock_t *lock)
161210284Sjmallett{
162210284Sjmallett    unsigned int tmp;
163210284Sjmallett
164210284Sjmallett    __asm__ __volatile__(
165210284Sjmallett    ".set noreorder         \n"
166210284Sjmallett    "1: ll   %[tmp], %[val]  \n"
167210284Sjmallett    "   bnez %[tmp], 1b     \n"
168210284Sjmallett    "   li   %[tmp], 1      \n"
169210284Sjmallett    "   sc   %[tmp], %[val] \n"
170210284Sjmallett    "   beqz %[tmp], 1b     \n"
171210284Sjmallett    "   nop                \n"
172210284Sjmallett    ".set reorder           \n"
173210284Sjmallett    : [val] "+m" (lock->value), [tmp] "=&r" (tmp)
174210284Sjmallett    :
175210284Sjmallett    : "memory");
176210284Sjmallett
177210284Sjmallett}
178210284Sjmallett
179210284Sjmallett
180210284Sjmallett
181210284Sjmallett/** ********************************************************************
182210284Sjmallett * Bit spinlocks
183210284Sjmallett * These spinlocks use a single bit (bit 31) of a 32 bit word for locking.
184210284Sjmallett * The rest of the bits in the word are left undisturbed.  This enables more
185210284Sjmallett * compact data structures as only 1 bit is consumed for the lock.
186210284Sjmallett *
187210284Sjmallett */
188210284Sjmallett
189210284Sjmallett/**
190210284Sjmallett * Gets lock, spins until lock is taken
191210284Sjmallett * Preserves the low 31 bits of the 32 bit
192210284Sjmallett * word used for the lock.
193210284Sjmallett *
194210284Sjmallett *
195210284Sjmallett * @param word  word to lock bit 31 of
196210284Sjmallett */
197210284Sjmallettstatic inline void cvmx_spinlock_bit_lock(uint32_t *word)
198210284Sjmallett{
199210284Sjmallett    unsigned int tmp;
200210284Sjmallett    unsigned int sav;
201210284Sjmallett
202210284Sjmallett    __asm__ __volatile__(
203210284Sjmallett    ".set noreorder         \n"
204210284Sjmallett    ".set noat              \n"
205210284Sjmallett    "1: ll    %[tmp], %[val]  \n"
206210284Sjmallett    "   bbit1 %[tmp], 31, 1b    \n"
207210284Sjmallett    "   li    $at, 1      \n"
208210284Sjmallett    "   ins   %[tmp], $at, 31, 1  \n"
209210284Sjmallett    "   sc    %[tmp], %[val] \n"
210210284Sjmallett    "   beqz  %[tmp], 1b     \n"
211210284Sjmallett    "   nop                \n"
212210284Sjmallett    ".set at              \n"
213210284Sjmallett    ".set reorder           \n"
214210284Sjmallett    : [val] "+m" (*word), [tmp] "=&r" (tmp), [sav] "=&r" (sav)
215210284Sjmallett    :
216210284Sjmallett    : "memory");
217210284Sjmallett
218210284Sjmallett}
219210284Sjmallett
220210284Sjmallett/**
221210284Sjmallett * Attempts to get lock, returns immediately with success/failure
222210284Sjmallett * Preserves the low 31 bits of the 32 bit
223210284Sjmallett * word used for the lock.
224210284Sjmallett *
225210284Sjmallett *
226210284Sjmallett * @param word  word to lock bit 31 of
227210284Sjmallett * @return 0: lock successfully taken
228210284Sjmallett *         1: lock not taken, held by someone else
229210284Sjmallett * These return values match the Linux semantics.
230210284Sjmallett */
231210284Sjmallettstatic inline unsigned int cvmx_spinlock_bit_trylock(uint32_t *word)
232210284Sjmallett{
233210284Sjmallett    unsigned int tmp;
234210284Sjmallett
235210284Sjmallett    __asm__ __volatile__(
236210284Sjmallett    ".set noreorder         \n"
237210284Sjmallett    ".set noat              \n"
238210284Sjmallett    "1: ll    %[tmp], %[val] \n"
239210284Sjmallett    "   bbit1 %[tmp], 31, 2f     \n"  // if lock held, fail immediately
240210284Sjmallett    "   li    $at, 1      \n"
241210284Sjmallett    "   ins   %[tmp], $at, 31, 1  \n"
242210284Sjmallett    "   sc    %[tmp], %[val] \n"
243210284Sjmallett    "   beqz  %[tmp], 1b     \n"
244210284Sjmallett    "   li    %[tmp], 0      \n"
245210284Sjmallett    "2:                     \n"
246210284Sjmallett    ".set at              \n"
247210284Sjmallett    ".set reorder           \n"
248210284Sjmallett    : [val] "+m" (*word), [tmp] "=&r" (tmp)
249210284Sjmallett    :
250210284Sjmallett    : "memory");
251210284Sjmallett
252210284Sjmallett    return (!!tmp);  /* normalize to 0 or 1 */
253210284Sjmallett}
254210284Sjmallett/**
255210284Sjmallett * Releases bit lock
256210284Sjmallett *
257210284Sjmallett * Unconditionally clears bit 31 of the lock word.  Note that this is
258210284Sjmallett * done non-atomically, as this implementation assumes that the rest
259210284Sjmallett * of the bits in the word are protected by the lock.
260210284Sjmallett *
261210284Sjmallett * @param word  word to unlock bit 31 in
262210284Sjmallett */
263210284Sjmallettstatic inline void cvmx_spinlock_bit_unlock(uint32_t *word)
264210284Sjmallett{
265210284Sjmallett    CVMX_SYNCWS;
266210284Sjmallett    *word &= ~(1UL << 31) ;
267210284Sjmallett    CVMX_SYNCWS;
268210284Sjmallett}
269210284Sjmallett
270210284Sjmallett
271210284Sjmallett
272210284Sjmallett/** ********************************************************************
273210284Sjmallett * Recursive spinlocks
274210284Sjmallett */
275210284Sjmalletttypedef struct {
276210284Sjmallett	volatile unsigned int value;
277210284Sjmallett	volatile unsigned int core_num;
278210284Sjmallett} cvmx_spinlock_rec_t;
279210284Sjmallett
280210284Sjmallett
281210284Sjmallett/**
282210284Sjmallett * Initialize a recursive spinlock
283210284Sjmallett *
284210284Sjmallett * @param lock   Lock to initialize
285210284Sjmallett */
286210284Sjmallettstatic inline void cvmx_spinlock_rec_init(cvmx_spinlock_rec_t *lock)
287210284Sjmallett{
288210284Sjmallett    lock->value = CVMX_SPINLOCK_UNLOCKED_VAL;
289210284Sjmallett}
290210284Sjmallett
291210284Sjmallett
292210284Sjmallett/**
293210284Sjmallett * Return non-zero if the recursive spinlock is currently locked
294210284Sjmallett *
295210284Sjmallett * @param lock   Lock to check
296210284Sjmallett * @return Non-zero if locked
297210284Sjmallett */
298210284Sjmallettstatic inline int cvmx_spinlock_rec_locked(cvmx_spinlock_rec_t *lock)
299210284Sjmallett{
300210284Sjmallett    return (lock->value != CVMX_SPINLOCK_UNLOCKED_VAL);
301210284Sjmallett}
302210284Sjmallett
303210284Sjmallett
304210284Sjmallett/**
305210284Sjmallett* Unlocks one level of recursive spinlock.  Lock is not unlocked
306210284Sjmallett* unless this is the final unlock call for that spinlock
307210284Sjmallett*
308210284Sjmallett* @param lock   ptr to recursive spinlock structure
309210284Sjmallett*/
310210284Sjmallettstatic inline void cvmx_spinlock_rec_unlock(cvmx_spinlock_rec_t *lock);
311210284Sjmallett
312210284Sjmallett#ifdef CVMX_SPINLOCK_DEBUG
313210284Sjmallett#define cvmx_spinlock_rec_unlock(x)  _int_cvmx_spinlock_rec_unlock((x), __FILE__, __LINE__)
314210284Sjmallettstatic inline void _int_cvmx_spinlock_rec_unlock(cvmx_spinlock_rec_t *lock, char *filename, int linenum)
315210284Sjmallett#else
316210284Sjmallettstatic inline void cvmx_spinlock_rec_unlock(cvmx_spinlock_rec_t *lock)
317210284Sjmallett#endif
318210284Sjmallett{
319210284Sjmallett
320210284Sjmallett	unsigned int temp, result;
321210284Sjmallett    int core_num;
322210284Sjmallett    core_num = cvmx_get_core_num();
323210284Sjmallett
324210284Sjmallett#ifdef CVMX_SPINLOCK_DEBUG
325210284Sjmallett    {
326210284Sjmallett        if (lock->core_num != core_num)
327210284Sjmallett        {
328210284Sjmallett            cvmx_dprintf("ERROR: Recursive spinlock release attemped by non-owner! file: %s, line: %d\n", filename, linenum);
329210284Sjmallett            return;
330210284Sjmallett        }
331210284Sjmallett    }
332210284Sjmallett#endif
333210284Sjmallett
334210284Sjmallett	__asm__ __volatile__(
335210284Sjmallett		".set  noreorder                 \n"
336210284Sjmallett		"     addi  %[tmp], %[pid], 0x80 \n"
337210284Sjmallett		"     sw    %[tmp], %[lid]       # set lid to invalid value\n"
338210284Sjmallett                CVMX_SYNCWS_STR
339210284Sjmallett		"1:   ll    %[tmp], %[val]       \n"
340210284Sjmallett		"     addu  %[res], %[tmp], -1   # decrement lock count\n"
341210284Sjmallett		"     sc    %[res], %[val]       \n"
342210284Sjmallett		"     beqz  %[res], 1b           \n"
343210284Sjmallett		"     nop                        \n"
344210284Sjmallett		"     beq   %[tmp], %[res], 2f   # res is 1 on successful sc       \n"
345210284Sjmallett		"     nop                        \n"
346210284Sjmallett		"     sw   %[pid], %[lid]        # set lid to pid, only if lock still held\n"
347210284Sjmallett		"2:                         \n"
348210284Sjmallett                CVMX_SYNCWS_STR
349210284Sjmallett		".set  reorder                   \n"
350210284Sjmallett		: [res] "=&r" (result), [tmp] "=&r" (temp), [val] "+m" (lock->value), [lid] "+m" (lock->core_num)
351210284Sjmallett		: [pid] "r" (core_num)
352210284Sjmallett		: "memory");
353210284Sjmallett
354210284Sjmallett
355210284Sjmallett#ifdef CVMX_SPINLOCK_DEBUG
356210284Sjmallett    {
357210284Sjmallett        if (lock->value == ~0UL)
358210284Sjmallett        {
359210284Sjmallett            cvmx_dprintf("ERROR: Recursive spinlock released too many times! file: %s, line: %d\n", filename, linenum);
360210284Sjmallett        }
361210284Sjmallett    }
362210284Sjmallett#endif
363210284Sjmallett
364210284Sjmallett
365210284Sjmallett}
366210284Sjmallett
367210284Sjmallett/**
368210284Sjmallett * Takes recursive spinlock for a given core.  A core can take the lock multiple
369210284Sjmallett * times, and the lock is released only when the corresponding number of
370210284Sjmallett * unlocks have taken place.
371210284Sjmallett *
372210284Sjmallett * NOTE: This assumes only one thread per core, and that the core ID is used as
373210284Sjmallett * the lock 'key'.  (This implementation cannot be generalized to allow
374210284Sjmallett * multiple threads to use the same key (core id) .)
375210284Sjmallett *
376210284Sjmallett * @param lock   address of recursive spinlock structure.  Note that this is
377210284Sjmallett *               distinct from the standard spinlock
378210284Sjmallett */
379210284Sjmallettstatic inline void cvmx_spinlock_rec_lock(cvmx_spinlock_rec_t *lock);
380210284Sjmallett
381210284Sjmallett#ifdef CVMX_SPINLOCK_DEBUG
382210284Sjmallett#define cvmx_spinlock_rec_lock(x)  _int_cvmx_spinlock_rec_lock((x), __FILE__, __LINE__)
383210284Sjmallettstatic inline void _int_cvmx_spinlock_rec_lock(cvmx_spinlock_rec_t *lock, char *filename, int linenum)
384210284Sjmallett#else
385210284Sjmallettstatic inline void cvmx_spinlock_rec_lock(cvmx_spinlock_rec_t *lock)
386210284Sjmallett#endif
387210284Sjmallett{
388210284Sjmallett
389210284Sjmallett
390210284Sjmallett	volatile unsigned int tmp;
391210284Sjmallett	volatile int core_num;
392210284Sjmallett
393210284Sjmallett	core_num = cvmx_get_core_num();
394210284Sjmallett
395210284Sjmallett
396210284Sjmallett	__asm__ __volatile__(
397210284Sjmallett		".set  noreorder              \n"
398210284Sjmallett		"1: ll   %[tmp], %[val]       # load the count\n"
399210284Sjmallett		"   bnez %[tmp], 2f           # if count!=zero branch to 2\n"
400210284Sjmallett		"   addu %[tmp], %[tmp], 1    \n"
401210284Sjmallett		"   sc   %[tmp], %[val]       \n"
402210284Sjmallett		"   beqz %[tmp], 1b           # go back if not success\n"
403210284Sjmallett		"   nop                       \n"
404210284Sjmallett		"   j    3f                   # go to write core_num \n"
405210284Sjmallett		"2: lw   %[tmp], %[lid]       # load the core_num \n"
406210284Sjmallett		"   bne  %[tmp], %[pid], 1b   # core_num no match, restart\n"
407210284Sjmallett		"   nop                       \n"
408210284Sjmallett		"   lw   %[tmp], %[val]       \n"
409210284Sjmallett		"   addu %[tmp], %[tmp], 1    \n"
410210284Sjmallett		"   sw   %[tmp], %[val]       # update the count\n"
411210284Sjmallett		"3: sw   %[pid], %[lid]       # store the core_num\n"
412210284Sjmallett                CVMX_SYNCWS_STR
413210284Sjmallett		".set  reorder                \n"
414210284Sjmallett		: [tmp] "=&r" (tmp), [val] "+m" (lock->value), [lid] "+m" (lock->core_num)
415210284Sjmallett		: [pid] "r" (core_num)
416210284Sjmallett		: "memory");
417210284Sjmallett
418210284Sjmallett#ifdef CVMX_SPINLOCK_DEBUG
419210284Sjmallett    if (lock->core_num != core_num)
420210284Sjmallett    {
421210284Sjmallett        cvmx_dprintf("cvmx_spinlock_rec_lock: lock taken, but core_num is incorrect. file: %s, line: %d\n", filename, linenum);
422210284Sjmallett    }
423210284Sjmallett#endif
424210284Sjmallett
425210284Sjmallett
426210284Sjmallett}
427210284Sjmallett
428210284Sjmallett#ifdef	__cplusplus
429210284Sjmallett}
430210284Sjmallett#endif
431210284Sjmallett
432210284Sjmallett#endif /* __CVMX_SPINLOCK_H__ */
433