1/*
2 * Linux-specific abstractions to gain some independence from linux kernel versions.
3 * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences.
4 *
5 * Copyright (C) 2015, Broadcom Corporation. All Rights Reserved.
6 *
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
14 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
16 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
17 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 *
19 * $Id: linuxver.h 444765 2013-12-20 22:37:15Z $
20 */
21
22#ifndef _linuxver_h_
23#define _linuxver_h_
24
25#include <linux/version.h>
26#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
27#include <linux/config.h>
28#else
29#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33))
30#include <generated/autoconf.h>
31#else
32#include <linux/autoconf.h>
33#endif
34#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
35
36#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
37#include <linux/kconfig.h>
38#endif
39
40#include <linux/module.h>
41
42#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0))
43/* __NO_VERSION__ must be defined for all linkables except one in 2.2 */
44#ifdef __UNDEF_NO_VERSION__
45#undef __NO_VERSION__
46#else
47#define __NO_VERSION__
48#endif
49#endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0) */
50
51#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
52#define module_param(_name_, _type_, _perm_)	MODULE_PARM(_name_, "i")
53#define module_param_string(_name_, _string_, _size_, _perm_) \
54		MODULE_PARM(_string_, "c" __MODULE_STRING(_size_))
55#endif
56
57/* linux/malloc.h is deprecated, use linux/slab.h instead. */
58#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9))
59#include <linux/malloc.h>
60#else
61#include <linux/slab.h>
62#endif
63
64#include <linux/types.h>
65#include <linux/init.h>
66#include <linux/mm.h>
67#include <linux/string.h>
68#include <linux/pci.h>
69#include <linux/interrupt.h>
70#include <linux/kthread.h>
71#include <linux/netdevice.h>
72#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
73#include <linux/semaphore.h>
74#else
75#include <asm/semaphore.h>
76#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
77#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
78#undef IP_TOS
79#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) */
80#include <asm/io.h>
81
82#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
83#include <linux/workqueue.h>
84#else
85#include <linux/tqueue.h>
86#ifndef work_struct
87#define work_struct tq_struct
88#endif
89#ifndef INIT_WORK
90#define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data))
91#endif
92#ifndef schedule_work
93#define schedule_work(_work) schedule_task((_work))
94#endif
95#ifndef flush_scheduled_work
96#define flush_scheduled_work() flush_scheduled_tasks()
97#endif
98#endif	/* LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41) */
99
100#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
101#define DAEMONIZE(a)	do { \
102		allow_signal(SIGKILL);	\
103		allow_signal(SIGTERM);	\
104	} while (0)
105#elif ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && \
106	(LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)))
107#define DAEMONIZE(a) daemonize(a); \
108	allow_signal(SIGKILL); \
109	allow_signal(SIGTERM);
110#else /* Linux 2.4 (w/o preemption patch) */
111#define RAISE_RX_SOFTIRQ() \
112	cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
113#define DAEMONIZE(a) daemonize(); \
114	do { if (a) \
115		strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a)))); \
116	} while (0);
117#endif /* LINUX_VERSION_CODE  */
118
119#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
120#define	MY_INIT_WORK(_work, _func)	INIT_WORK(_work, _func)
121#else
122#define	MY_INIT_WORK(_work, _func)	INIT_WORK(_work, _func, _work)
123#if !(LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18) && defined(RHEL_MAJOR) && \
124	(RHEL_MAJOR == 5))
125/* Exclude RHEL 5 */
126typedef void (*work_func_t)(void *work);
127#endif
128#endif	/* >= 2.6.20 */
129
130#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
131/* Some distributions have their own 2.6.x compatibility layers */
132#ifndef IRQ_NONE
133typedef void irqreturn_t;
134#define IRQ_NONE
135#define IRQ_HANDLED
136#define IRQ_RETVAL(x)
137#endif
138#else
139typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs);
140#endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */
141
142#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
143#define IRQF_SHARED	SA_SHIRQ
144#endif /* < 2.6.18 */
145
146#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
147#ifdef	CONFIG_NET_RADIO
148#define	CONFIG_WIRELESS_EXT
149#endif
150#endif	/* < 2.6.17 */
151
152#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67)
153#define MOD_INC_USE_COUNT
154#define MOD_DEC_USE_COUNT
155#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67) */
156
157#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
158#include <linux/sched.h>
159#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */
160
161#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
162#include <linux/sched/rt.h>
163#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
164
165#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
166#include <net/lib80211.h>
167#endif
168#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
169#include <linux/ieee80211.h>
170#else
171#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
172#include <net/ieee80211.h>
173#endif
174#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) */
175
176#if defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE)
177
178#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
179#include <pcmcia/version.h>
180#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27) */
181
182#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
183#include <pcmcia/cs_types.h>
184#include <pcmcia/cs.h>
185#endif
186#include <pcmcia/cistpl.h>
187#include <pcmcia/cisreg.h>
188#include <pcmcia/ds.h>
189
190#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 69))
191/* In 2.5 (as of 2.5.69 at least) there is a cs_error exported which
192 * does this, but it's not in 2.4 so we do our own for now.
193 */
194static inline void
195cs_error(client_handle_t handle, int func, int ret)
196{
197	error_info_t err = { func, ret };
198	CardServices(ReportError, handle, &err);
199}
200#endif
201
202#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 16))
203
204typedef	struct pcmcia_device dev_link_t;
205
206#endif
207
208#endif /* CONFIG_PCMCIA */
209
210#ifndef __exit
211#define __exit
212#endif
213#ifndef __devexit
214#define __devexit
215#endif
216#ifndef __devinit
217#  if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
218#    define __devinit	__init
219#  else
220/* All devices are hotpluggable since linux 3.8.0 */
221#    define __devinit
222#  endif
223#endif /* !__devinit */
224#ifndef __devinitdata
225#define __devinitdata
226#endif
227#ifndef __devexit_p
228#define __devexit_p(x)	x
229#endif
230
231#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0))
232
233#define pci_get_drvdata(dev)		(dev)->sysdata
234#define pci_set_drvdata(dev, value)	(dev)->sysdata = (value)
235
236/*
237 * New-style (2.4.x) PCI/hot-pluggable PCI/CardBus registration
238 */
239
240struct pci_device_id {
241	unsigned int vendor, device;		/* Vendor and device ID or PCI_ANY_ID */
242	unsigned int subvendor, subdevice;	/* Subsystem ID's or PCI_ANY_ID */
243	unsigned int class, class_mask;		/* (class,subclass,prog-if) triplet */
244	unsigned long driver_data;		/* Data private to the driver */
245};
246
247struct pci_driver {
248	struct list_head node;
249	char *name;
250	const struct pci_device_id *id_table;	/* NULL if wants all devices */
251	int (*probe)(struct pci_dev *dev,
252	             const struct pci_device_id *id); /* New device inserted */
253	void (*remove)(struct pci_dev *dev);	/* Device removed (NULL if not a hot-plug
254						 * capable driver)
255						 */
256	void (*suspend)(struct pci_dev *dev);	/* Device suspended */
257	void (*resume)(struct pci_dev *dev);	/* Device woken up */
258};
259
260#define MODULE_DEVICE_TABLE(type, name)
261#define PCI_ANY_ID (~0)
262
263/* compatpci.c */
264#define pci_module_init pci_register_driver
265extern int pci_register_driver(struct pci_driver *drv);
266extern void pci_unregister_driver(struct pci_driver *drv);
267
268#endif /* PCI registration */
269
270#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18))
271#define pci_module_init pci_register_driver
272#endif
273
274#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18))
275#ifdef MODULE
276#define module_init(x) int init_module(void) { return x(); }
277#define module_exit(x) void cleanup_module(void) { x(); }
278#else
279#define module_init(x)	__initcall(x);
280#define module_exit(x)	__exitcall(x);
281#endif
282#endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18) */
283
284#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
285#define WL_USE_NETDEV_OPS
286#else
287#undef WL_USE_NETDEV_OPS
288#endif
289
290#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) && defined(CONFIG_RFKILL)
291#define WL_CONFIG_RFKILL
292#else
293#undef WL_CONFIG_RFKILL
294#endif
295
296#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48))
297#define list_for_each(pos, head) \
298	for (pos = (head)->next; pos != (head); pos = pos->next)
299#endif
300
301#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13))
302#define pci_resource_start(dev, bar)	((dev)->base_address[(bar)])
303#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44))
304#define pci_resource_start(dev, bar)	((dev)->resource[(bar)].start)
305#endif
306
307#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23))
308#define pci_enable_device(dev) do { } while (0)
309#endif
310
311#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14))
312#define net_device device
313#endif
314
315#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42))
316
317/*
318 * DMA mapping
319 *
320 * See linux/Documentation/DMA-mapping.txt
321 */
322
323#ifndef PCI_DMA_TODEVICE
324#define	PCI_DMA_TODEVICE	1
325#define	PCI_DMA_FROMDEVICE	2
326#endif
327
328typedef u32 dma_addr_t;
329
330/* Pure 2^n version of get_order */
331static inline int get_order(unsigned long size)
332{
333	int order;
334
335	size = (size-1) >> (PAGE_SHIFT-1);
336	order = -1;
337	do {
338		size >>= 1;
339		order++;
340	} while (size);
341	return order;
342}
343
344static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
345                                         dma_addr_t *dma_handle)
346{
347	void *ret;
348	int gfp = GFP_ATOMIC | GFP_DMA;
349
350	ret = (void *)__get_free_pages(gfp, get_order(size));
351
352	if (ret != NULL) {
353		memset(ret, 0, size);
354		*dma_handle = virt_to_bus(ret);
355	}
356	return ret;
357}
358static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size,
359                                       void *vaddr, dma_addr_t dma_handle)
360{
361	free_pages((unsigned long)vaddr, get_order(size));
362}
363#ifdef ILSIM
364extern uint pci_map_single(void *dev, void *va, uint size, int direction);
365extern void pci_unmap_single(void *dev, uint pa, uint size, int direction);
366#else
367#define pci_map_single(cookie, address, size, dir)	virt_to_bus(address)
368#define pci_unmap_single(cookie, address, size, dir)
369#endif
370
371#endif /* DMA mapping */
372
373#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43))
374
375#define dev_kfree_skb_any(a)		dev_kfree_skb(a)
376#define netif_down(dev)			do { (dev)->start = 0; } while (0)
377
378/* pcmcia-cs provides its own netdevice compatibility layer */
379#ifndef _COMPAT_NETDEVICE_H
380
381/*
382 * SoftNet
383 *
384 * For pre-softnet kernels we need to tell the upper layer not to
385 * re-enter start_xmit() while we are in there. However softnet
386 * guarantees not to enter while we are in there so there is no need
387 * to do the netif_stop_queue() dance unless the transmit queue really
388 * gets stuck. This should also improve performance according to tests
389 * done by Aman Singla.
390 */
391
392#define dev_kfree_skb_irq(a)	dev_kfree_skb(a)
393#define netif_wake_queue(dev) \
394		do { clear_bit(0, &(dev)->tbusy); mark_bh(NET_BH); } while (0)
395#define netif_stop_queue(dev)	set_bit(0, &(dev)->tbusy)
396
397static inline void netif_start_queue(struct net_device *dev)
398{
399	dev->tbusy = 0;
400	dev->interrupt = 0;
401	dev->start = 1;
402}
403
404#define netif_queue_stopped(dev)	(dev)->tbusy
405#define netif_running(dev)		(dev)->start
406
407#endif /* _COMPAT_NETDEVICE_H */
408
409#define netif_device_attach(dev)	netif_start_queue(dev)
410#define netif_device_detach(dev)	netif_stop_queue(dev)
411
412/* 2.4.x renamed bottom halves to tasklets */
413#define tasklet_struct				tq_struct
414static inline void tasklet_schedule(struct tasklet_struct *tasklet)
415{
416	queue_task(tasklet, &tq_immediate);
417	mark_bh(IMMEDIATE_BH);
418}
419
420static inline void tasklet_init(struct tasklet_struct *tasklet,
421                                void (*func)(unsigned long),
422                                unsigned long data)
423{
424	tasklet->next = NULL;
425	tasklet->sync = 0;
426	tasklet->routine = (void (*)(void *))func;
427	tasklet->data = (void *)data;
428}
429#define tasklet_kill(tasklet)	{ do {} while (0); }
430
431/* 2.4.x introduced del_timer_sync() */
432#define del_timer_sync(timer) del_timer(timer)
433
434#else
435
436#define netif_down(dev)
437
438#endif /* SoftNet */
439
440#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3))
441
442/*
443 * Emit code to initialise a tq_struct's routine and data pointers
444 */
445#define PREPARE_TQUEUE(_tq, _routine, _data)			\
446	do {							\
447		(_tq)->routine = _routine;			\
448		(_tq)->data = _data;				\
449	} while (0)
450
451/*
452 * Emit code to initialise all of a tq_struct
453 */
454#define INIT_TQUEUE(_tq, _routine, _data)			\
455	do {							\
456		INIT_LIST_HEAD(&(_tq)->list);			\
457		(_tq)->sync = 0;				\
458		PREPARE_TQUEUE((_tq), (_routine), (_data));	\
459	} while (0)
460
461#endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3) */
462
463/* Power management related macro & routines */
464#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 9)
465#define	PCI_SAVE_STATE(a, b)	pci_save_state(a)
466#define	PCI_RESTORE_STATE(a, b)	pci_restore_state(a)
467#else
468#define	PCI_SAVE_STATE(a, b)	pci_save_state(a, b)
469#define	PCI_RESTORE_STATE(a, b)	pci_restore_state(a, b)
470#endif
471
472#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6))
473static inline int
474pci_save_state(struct pci_dev *dev, u32 *buffer)
475{
476	int i;
477	if (buffer) {
478		for (i = 0; i < 16; i++)
479			pci_read_config_dword(dev, i * 4, &buffer[i]);
480	}
481	return 0;
482}
483
484static inline int
485pci_restore_state(struct pci_dev *dev, u32 *buffer)
486{
487	int i;
488
489	if (buffer) {
490		for (i = 0; i < 16; i++)
491			pci_write_config_dword(dev, i * 4, buffer[i]);
492	}
493	/*
494	 * otherwise, write the context information we know from bootup.
495	 * This works around a problem where warm-booting from Windows
496	 * combined with a D3(hot)->D0 transition causes PCI config
497	 * header data to be forgotten.
498	 */
499	else {
500		for (i = 0; i < 6; i ++)
501			pci_write_config_dword(dev,
502			                       PCI_BASE_ADDRESS_0 + (i * 4),
503			                       pci_resource_start(dev, i));
504		pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
505	}
506	return 0;
507}
508#endif /* PCI power management */
509
510/* Old cp0 access macros deprecated in 2.4.19 */
511#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19))
512#define read_c0_count() read_32bit_cp0_register(CP0_COUNT)
513#endif
514
515/* Module refcount handled internally in 2.6.x */
516#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
517#ifndef SET_MODULE_OWNER
518#define SET_MODULE_OWNER(dev)		do {} while (0)
519#define OLD_MOD_INC_USE_COUNT		MOD_INC_USE_COUNT
520#define OLD_MOD_DEC_USE_COUNT		MOD_DEC_USE_COUNT
521#else
522#define OLD_MOD_INC_USE_COUNT		do {} while (0)
523#define OLD_MOD_DEC_USE_COUNT		do {} while (0)
524#endif
525#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
526#ifndef SET_MODULE_OWNER
527#define SET_MODULE_OWNER(dev)		do {} while (0)
528#endif
529#ifndef MOD_INC_USE_COUNT
530#define MOD_INC_USE_COUNT			do {} while (0)
531#endif
532#ifndef MOD_DEC_USE_COUNT
533#define MOD_DEC_USE_COUNT			do {} while (0)
534#endif
535#define OLD_MOD_INC_USE_COUNT		MOD_INC_USE_COUNT
536#define OLD_MOD_DEC_USE_COUNT		MOD_DEC_USE_COUNT
537#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
538
539#ifndef SET_NETDEV_DEV
540#define SET_NETDEV_DEV(net, pdev)	do {} while (0)
541#endif
542
543#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0))
544#ifndef HAVE_FREE_NETDEV
545#define free_netdev(dev)		kfree(dev)
546#endif
547#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0) */
548
549#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
550/* struct packet_type redefined in 2.6.x */
551#define af_packet_priv			data
552#endif
553
554/* suspend args */
555#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
556#define DRV_SUSPEND_STATE_TYPE pm_message_t
557#else
558#define DRV_SUSPEND_STATE_TYPE uint32
559#endif
560
561#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
562#define CHECKSUM_HW	CHECKSUM_PARTIAL
563#endif
564
565typedef struct {
566	void	*parent;  /* some external entity that the thread supposed to work for */
567	char	*proc_name;
568	struct	task_struct *p_task;
569	long	thr_pid;
570	int		prio; /* priority */
571	struct	semaphore sema;
572	int	terminated;
573	struct	completion completed;
574	spinlock_t	spinlock;
575	int		up_cnt;
576} tsk_ctl_t;
577
578
579/* requires  tsk_ctl_t tsk  argument, the caller's priv data is passed in owner ptr */
580/* note this macro assumes there may be only one context waiting on thread's completion */
581#ifdef DHD_DEBUG
582#define DBG_THR(x) printk x
583#else
584#define DBG_THR(x)
585#endif
586
587static inline bool binary_sema_down(tsk_ctl_t *tsk)
588{
589	if (down_interruptible(&tsk->sema) == 0) {
590		unsigned long flags = 0;
591		spin_lock_irqsave(&tsk->spinlock, flags);
592		if (tsk->up_cnt == 1)
593			tsk->up_cnt--;
594		else {
595			DBG_THR(("dhd_dpc_thread: Unexpected up_cnt %d\n", tsk->up_cnt));
596		}
597		spin_unlock_irqrestore(&tsk->spinlock, flags);
598		return false;
599	} else
600		return true;
601}
602
603static inline bool binary_sema_up(tsk_ctl_t *tsk)
604{
605	bool sem_up = false;
606	unsigned long flags = 0;
607
608	spin_lock_irqsave(&tsk->spinlock, flags);
609	if (tsk->up_cnt == 0) {
610		tsk->up_cnt++;
611		sem_up = true;
612	} else if (tsk->up_cnt == 1) {
613		/* dhd_sched_dpc: dpc is alread up! */
614	} else
615		DBG_THR(("dhd_sched_dpc: unexpected up cnt %d!\n", tsk->up_cnt));
616
617	spin_unlock_irqrestore(&tsk->spinlock, flags);
618
619	if (sem_up)
620		up(&tsk->sema);
621
622	return sem_up;
623}
624
625#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
626#define SMP_RD_BARRIER_DEPENDS(x) smp_read_barrier_depends(x)
627#else
628#define SMP_RD_BARRIER_DEPENDS(x) smp_rmb(x)
629#endif
630
631#define PROC_START(thread_func, owner, tsk_ctl, flags, name) \
632{ \
633	sema_init(&((tsk_ctl)->sema), 0); \
634	init_completion(&((tsk_ctl)->completed)); \
635	(tsk_ctl)->parent = owner; \
636	(tsk_ctl)->proc_name = name;  \
637	(tsk_ctl)->terminated = FALSE; \
638	(tsk_ctl)->p_task  = kthread_run(thread_func, tsk_ctl, (char*)name); \
639	(tsk_ctl)->thr_pid = (tsk_ctl)->p_task->pid; \
640	spin_lock_init(&((tsk_ctl)->spinlock)); \
641	DBG_THR(("%s(): thread:%s:%lx started\n", __FUNCTION__, \
642		(tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
643}
644
645#define PROC_STOP(tsk_ctl) \
646{ \
647	(tsk_ctl)->terminated = TRUE; \
648	smp_wmb(); \
649	up(&((tsk_ctl)->sema));	\
650	wait_for_completion(&((tsk_ctl)->completed)); \
651	DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \
652			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
653	(tsk_ctl)->thr_pid = -1; \
654}
655
656/*  ----------------------- */
657
658#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
659#define KILL_PROC(nr, sig) \
660{ \
661struct task_struct *tsk; \
662struct pid *pid;    \
663pid = find_get_pid((pid_t)nr);    \
664tsk = pid_task(pid, PIDTYPE_PID);    \
665if (tsk) send_sig(sig, tsk, 1); \
666}
667#else
668#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
669	KERNEL_VERSION(2, 6, 30))
670#define KILL_PROC(pid, sig) \
671{ \
672	struct task_struct *tsk; \
673	tsk = find_task_by_vpid(pid); \
674	if (tsk) send_sig(sig, tsk, 1); \
675}
676#else
677#define KILL_PROC(pid, sig) \
678{ \
679	kill_proc(pid, sig, 1); \
680}
681#endif
682#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) */
683
684#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
685#include <linux/time.h>
686#include <linux/wait.h>
687#else
688#include <linux/sched.h>
689
690#define __wait_event_interruptible_timeout(wq, condition, ret)		\
691do {									\
692	wait_queue_t __wait;						\
693	init_waitqueue_entry(&__wait, current);				\
694									\
695	add_wait_queue(&wq, &__wait);					\
696	for (;;) {							\
697		set_current_state(TASK_INTERRUPTIBLE);			\
698		if (condition)						\
699			break;						\
700		if (!signal_pending(current)) {				\
701			ret = schedule_timeout(ret);			\
702			if (!ret)					\
703				break;					\
704			continue;					\
705		}							\
706		ret = -ERESTARTSYS;					\
707		break;							\
708	}								\
709	current->state = TASK_RUNNING;					\
710	remove_wait_queue(&wq, &__wait);				\
711} while (0)
712
713#define wait_event_interruptible_timeout(wq, condition, timeout)	\
714({									\
715	long __ret = timeout;						\
716	if (!(condition))						\
717		__wait_event_interruptible_timeout(wq, condition, __ret); \
718	__ret;								\
719})
720
721#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
722
723/*
724For < 2.6.24, wl creates its own netdev but doesn't
725align the priv area like the genuine alloc_netdev().
726Since netdev_priv() always gives us the aligned address, it will
727not match our unaligned address for < 2.6.24
728*/
729#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
730#define DEV_PRIV(dev)	(dev->priv)
731#else
732#define DEV_PRIV(dev)	netdev_priv(dev)
733#endif
734
735#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
736#define WL_ISR(i, d, p)         wl_isr((i), (d))
737#else
738#define WL_ISR(i, d, p)         wl_isr((i), (d), (p))
739#endif  /* < 2.6.20 */
740
741#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
742#define netdev_priv(dev) dev->priv
743#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
744
745#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
746#define RANDOM32	prandom_u32
747#else
748#define RANDOM32	random32
749#endif
750
751#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
752#define SRANDOM32(entropy)	prandom_seed(entropy)
753#else
754#define SRANDOM32(entropy)	srandom32(entropy)
755#endif
756
757#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
758#define CAN_SLEEP()	((!in_atomic() && !irqs_disabled()))
759#else
760#define CAN_SLEEP()	(FALSE)
761#endif
762
763#define KMALLOC_FLAG (CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC)
764
765/*
766 * Overide latest kfifo functions with
767 * older version to work on older kernels
768 */
769#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33))
770#define kfifo_in_spinlocked(a, b, c, d)		kfifo_put(a, (u8 *)b, c)
771#define kfifo_out_spinlocked(a, b, c, d)	kfifo_get(a, (u8 *)b, c)
772#define kfifo_esize(a)				1
773#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32)) && \
774	(LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36))
775#define kfifo_in_spinlocked(a, b, c, d)		kfifo_in_locked(a, b, c, d)
776#define kfifo_out_spinlocked(a, b, c, d)	kfifo_out_locked(a, b, c, d)
777#define kfifo_esize(a)				1
778#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
779
780#endif /* _linuxver_h_ */
781