1/*
2 * Linux OS Independent Layer
3 *
4 * Copyright (C) 2010, Broadcom Corporation. All Rights Reserved.
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
13 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
15 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
16 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 *
18 * $Id: linux_osl.c,v 1.172.2.21 2011-01-27 17:03:39 Exp $
19 */
20
21#define LINUX_PORT
22
23#include <typedefs.h>
24#include <bcmendian.h>
25#include <linuxver.h>
26#include <bcmdefs.h>
27#include <osl.h>
28#include <bcmutils.h>
29#include <linux/delay.h>
30#ifdef mips
31#include <asm/paccess.h>
32#endif /* mips */
33#include <pcicfg.h>
34
35
36
37#include <linux/fs.h>
38
39#define PCI_CFG_RETRY 		10
40
41#define OS_HANDLE_MAGIC		0x1234abcd	/* Magic # to recognise osh */
42#define BCM_MEM_FILENAME_LEN 	24		/* Mem. filename length */
43
44typedef struct bcm_mem_link {
45	struct bcm_mem_link *prev;
46	struct bcm_mem_link *next;
47	uint	size;
48	int	line;
49	void 	*osh;
50	char	file[BCM_MEM_FILENAME_LEN];
51} bcm_mem_link_t;
52
53#if defined(DSLCPE_DELAY_NOT_YET)
54struct shared_osl {
55	int long_delay;
56	spinlock_t *lock;
57	void *wl;
58	unsigned long MIPS;
59};
60#endif
61
62struct osl_info {
63	osl_pubinfo_t pub;
64#ifdef CTFPOOL
65	ctfpool_t *ctfpool;
66#endif /* CTFPOOL */
67	uint magic;
68	void *pdev;
69	atomic_t malloced;
70	uint failed;
71	uint bustype;
72	bcm_mem_link_t *dbgmem_list;
73	spinlock_t dbgmem_lock;
74#if defined(DSLCPE_DELAY)
75	shared_osl_t *oshsh; /* osh shared */
76#endif
77#ifdef BCMDBG_PKT      /* pkt logging for debugging */
78	spinlock_t pktlist_lock;
79	pktlist_info_t pktlist;
80#endif  /* BCMDBG_PKT */
81	spinlock_t pktalloc_lock;
82};
83
84/* PCMCIA attribute space access macros */
85#if defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE)
86struct pcmcia_dev {
87	dev_link_t link;	/* PCMCIA device pointer */
88#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
89	dev_node_t node;	/* PCMCIA node structure */
90#endif
91	void *base;		/* Mapped attribute memory window */
92	size_t size;		/* Size of window */
93	void *drv;		/* Driver data */
94};
95#endif /* defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE) */
96
97/* Global ASSERT type flag */
98uint32 g_assert_type = FALSE;
99
100static int16 linuxbcmerrormap[] =
101{	0, 			/* 0 */
102	-EINVAL,		/* BCME_ERROR */
103	-EINVAL,		/* BCME_BADARG */
104	-EINVAL,		/* BCME_BADOPTION */
105	-EINVAL,		/* BCME_NOTUP */
106	-EINVAL,		/* BCME_NOTDOWN */
107	-EINVAL,		/* BCME_NOTAP */
108	-EINVAL,		/* BCME_NOTSTA */
109	-EINVAL,		/* BCME_BADKEYIDX */
110	-EINVAL,		/* BCME_RADIOOFF */
111	-EINVAL,		/* BCME_NOTBANDLOCKED */
112	-EINVAL, 		/* BCME_NOCLK */
113	-EINVAL, 		/* BCME_BADRATESET */
114	-EINVAL, 		/* BCME_BADBAND */
115	-E2BIG,			/* BCME_BUFTOOSHORT */
116	-E2BIG,			/* BCME_BUFTOOLONG */
117	-EBUSY, 		/* BCME_BUSY */
118	-EINVAL, 		/* BCME_NOTASSOCIATED */
119	-EINVAL, 		/* BCME_BADSSIDLEN */
120	-EINVAL, 		/* BCME_OUTOFRANGECHAN */
121	-EINVAL, 		/* BCME_BADCHAN */
122	-EFAULT, 		/* BCME_BADADDR */
123	-ENOMEM, 		/* BCME_NORESOURCE */
124	-EOPNOTSUPP,		/* BCME_UNSUPPORTED */
125	-EMSGSIZE,		/* BCME_BADLENGTH */
126	-EINVAL,		/* BCME_NOTREADY */
127	-EPERM,			/* BCME_NOTPERMITTED */
128	-ENOMEM, 		/* BCME_NOMEM */
129	-EINVAL, 		/* BCME_ASSOCIATED */
130	-ERANGE, 		/* BCME_RANGE */
131	-EINVAL, 		/* BCME_NOTFOUND */
132	-EINVAL, 		/* BCME_WME_NOT_ENABLED */
133	-EINVAL, 		/* BCME_TSPEC_NOTFOUND */
134	-EINVAL, 		/* BCME_ACM_NOTSUPPORTED */
135	-EINVAL,		/* BCME_NOT_WME_ASSOCIATION */
136	-EIO,			/* BCME_SDIO_ERROR */
137	-ENODEV,		/* BCME_DONGLE_DOWN */
138	-EINVAL,		/* BCME_VERSION */
139	-EIO,			/* BCME_TXFAIL */
140	-EIO,			/* BCME_RXFAIL */
141	-EINVAL,		/* BCME_NODEVICE */
142	-EINVAL,		/* BCME_NMODE_DISABLED */
143	-ENODATA,			/* BCME_NONRESIDENT */
144
145/* When an new error code is added to bcmutils.h, add os
146 * spcecific error translation here as well
147 */
148/* check if BCME_LAST changed since the last time this function was updated */
149#if BCME_LAST != -42
150#error "You need to add a OS error translation in the linuxbcmerrormap \
151	for new error code defined in bcmutils.h"
152#endif
153};
154
155/* translate bcmerrors into linux errors */
156int
157osl_error(int bcmerror)
158{
159	if (bcmerror > 0)
160		bcmerror = 0;
161	else if (bcmerror < BCME_LAST)
162		bcmerror = BCME_ERROR;
163
164	/* Array bounds covered by ASSERT in osl_attach */
165	return linuxbcmerrormap[-bcmerror];
166}
167
168osl_t *
169osl_attach(void *pdev, uint bustype, bool pkttag)
170{
171	osl_t *osh;
172
173	osh = kmalloc(sizeof(osl_t), GFP_ATOMIC);
174	ASSERT(osh);
175
176	bzero(osh, sizeof(osl_t));
177
178	/* Check that error map has the right number of entries in it */
179	ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
180
181	osh->magic = OS_HANDLE_MAGIC;
182	atomic_set(&osh->malloced, 0);
183	osh->failed = 0;
184	osh->dbgmem_list = NULL;
185	spin_lock_init(&(osh->dbgmem_lock));
186	osh->pdev = pdev;
187	osh->pub.pkttag = pkttag;
188	osh->bustype = bustype;
189
190	switch (bustype) {
191		case PCI_BUS:
192		case SI_BUS:
193		case PCMCIA_BUS:
194			osh->pub.mmbus = TRUE;
195			break;
196		case JTAG_BUS:
197		case SDIO_BUS:
198		case USB_BUS:
199		case SPI_BUS:
200		case RPC_BUS:
201			osh->pub.mmbus = FALSE;
202			break;
203		default:
204			ASSERT(FALSE);
205			break;
206	}
207
208#ifdef BCMDBG_PKT
209	spin_lock_init(&(osh->pktlist_lock));
210#endif
211	spin_lock_init(&(osh->pktalloc_lock));
212	return osh;
213}
214
215void
216osl_detach(osl_t *osh)
217{
218	if (osh == NULL)
219		return;
220
221	ASSERT(osh->magic == OS_HANDLE_MAGIC);
222	kfree(osh);
223}
224
225#ifdef CTFPOOL
226
227#ifdef CTFPOOL_SPINLOCK
228#define CTFPOOL_LOCK(ctfpool, flags)	spin_lock_irqsave(&(ctfpool)->lock, flags)
229#define CTFPOOL_UNLOCK(ctfpool, flags)	spin_unlock_irqrestore(&(ctfpool)->lock, flags)
230#else
231#define CTFPOOL_LOCK(ctfpool, flags)	spin_lock_bh(&(ctfpool)->lock)
232#define CTFPOOL_UNLOCK(ctfpool, flags)	spin_unlock_bh(&(ctfpool)->lock)
233#endif /* CTFPOOL_SPINLOCK */
234/*
235 * Allocate and add an object to packet pool.
236 */
237void *
238osl_ctfpool_add(osl_t *osh)
239{
240	struct sk_buff *skb;
241#ifdef CTFPOOL_SPINLOCK
242	unsigned long flags;
243#endif /* CTFPOOL_SPINLOCK */
244
245	if ((osh == NULL) || (osh->ctfpool == NULL))
246		return NULL;
247
248	CTFPOOL_LOCK(osh->ctfpool, flags);
249	ASSERT(osh->ctfpool->curr_obj <= osh->ctfpool->max_obj);
250
251	/* No need to allocate more objects */
252	if (osh->ctfpool->curr_obj == osh->ctfpool->max_obj) {
253		CTFPOOL_UNLOCK(osh->ctfpool, flags);
254		return NULL;
255	}
256
257	/* Allocate a new skb and add it to the ctfpool */
258	skb = dev_alloc_skb(osh->ctfpool->obj_size);
259	if (skb == NULL) {
260		printf("%s: skb alloc of len %d failed\n", __FUNCTION__,
261		       osh->ctfpool->obj_size);
262		CTFPOOL_UNLOCK(osh->ctfpool, flags);
263		return NULL;
264	}
265
266	/* Add to ctfpool */
267	skb->next = (struct sk_buff *)osh->ctfpool->head;
268	osh->ctfpool->head = skb;
269	osh->ctfpool->fast_frees++;
270	osh->ctfpool->curr_obj++;
271
272	/* Hijack a skb member to store ptr to ctfpool */
273	CTFPOOLPTR(osh, skb) = (void *)osh->ctfpool;
274
275	/* Use bit flag to indicate skb from fast ctfpool */
276	PKTFAST(osh, skb) = FASTBUF;
277
278	CTFPOOL_UNLOCK(osh->ctfpool, flags);
279
280	return skb;
281}
282
283/*
284 * Add new objects to the pool.
285 */
286void
287osl_ctfpool_replenish(osl_t *osh, uint thresh)
288{
289	if ((osh == NULL) || (osh->ctfpool == NULL))
290		return;
291
292	/* Do nothing if no refills are required */
293	while ((osh->ctfpool->refills > 0) && (thresh--)) {
294		osl_ctfpool_add(osh);
295		osh->ctfpool->refills--;
296	}
297}
298
299/*
300 * Initialize the packet pool with specified number of objects.
301 */
302int32
303osl_ctfpool_init(osl_t *osh, uint numobj, uint size)
304{
305	osh->ctfpool = kmalloc(sizeof(ctfpool_t), GFP_ATOMIC);
306	ASSERT(osh->ctfpool);
307	bzero(osh->ctfpool, sizeof(ctfpool_t));
308
309	osh->ctfpool->max_obj = numobj;
310	osh->ctfpool->obj_size = size;
311
312	spin_lock_init(&osh->ctfpool->lock);
313
314	while (numobj--) {
315		if (!osl_ctfpool_add(osh))
316			return -1;
317		osh->ctfpool->fast_frees--;
318	}
319
320	return 0;
321}
322
323/*
324 * Cleanup the packet pool objects.
325 */
326void
327osl_ctfpool_cleanup(osl_t *osh)
328{
329	struct sk_buff *skb, *nskb;
330#ifdef CTFPOOL_SPINLOCK
331	unsigned long flags;
332#endif /* CTFPOOL_SPINLOCK */
333
334	if ((osh == NULL) || (osh->ctfpool == NULL))
335		return;
336
337	CTFPOOL_LOCK(osh->ctfpool, flags);
338
339	skb = osh->ctfpool->head;
340
341	while (skb != NULL) {
342		nskb = skb->next;
343		dev_kfree_skb(skb);
344		skb = nskb;
345		osh->ctfpool->curr_obj--;
346	}
347
348	ASSERT(osh->ctfpool->curr_obj == 0);
349	osh->ctfpool->head = NULL;
350	CTFPOOL_UNLOCK(osh->ctfpool, flags);
351
352	kfree(osh->ctfpool);
353	osh->ctfpool = NULL;
354}
355
356void
357osl_ctfpool_stats(osl_t *osh, void *b)
358{
359	struct bcmstrbuf *bb;
360
361	if ((osh == NULL) || (osh->ctfpool == NULL))
362		return;
363
364	bb = b;
365
366	ASSERT((osh != NULL) && (bb != NULL));
367
368	bcm_bprintf(bb, "max_obj %d obj_size %d curr_obj %d refills %d\n",
369	            osh->ctfpool->max_obj, osh->ctfpool->obj_size,
370	            osh->ctfpool->curr_obj, osh->ctfpool->refills);
371	bcm_bprintf(bb, "fast_allocs %d fast_frees %d slow_allocs %d\n",
372	            osh->ctfpool->fast_allocs, osh->ctfpool->fast_frees,
373	            osh->ctfpool->slow_allocs);
374}
375
376static inline struct sk_buff *
377osl_pktfastget(osl_t *osh, uint len)
378{
379	struct sk_buff *skb;
380#ifdef CTFPOOL_SPINLOCK
381	unsigned long flags;
382#endif /* CTFPOOL_SPINLOCK */
383
384	/* Try to do fast allocate. Return null if ctfpool is not in use
385	 * or if there are no items in the ctfpool.
386	 */
387	if (osh->ctfpool == NULL)
388		return NULL;
389
390	CTFPOOL_LOCK(osh->ctfpool, flags);
391	if (osh->ctfpool->head == NULL) {
392		ASSERT(osh->ctfpool->curr_obj == 0);
393		osh->ctfpool->slow_allocs++;
394		CTFPOOL_UNLOCK(osh->ctfpool, flags);
395		return NULL;
396	}
397
398	ASSERT(len <= osh->ctfpool->obj_size);
399
400	/* Get an object from ctfpool */
401	skb = (struct sk_buff *)osh->ctfpool->head;
402	osh->ctfpool->head = (void *)skb->next;
403
404	osh->ctfpool->fast_allocs++;
405	osh->ctfpool->curr_obj--;
406	ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head);
407	CTFPOOL_UNLOCK(osh->ctfpool, flags);
408
409	/* Init skb struct */
410	skb->next = skb->prev = NULL;
411	skb->data = skb->head + 16;
412	skb->tail = skb->head + 16;
413
414	skb->len = 0;
415	skb->cloned = 0;
416#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
417	skb->list = NULL;
418#endif
419	atomic_set(&skb->users, 1);
420
421	return skb;
422}
423#endif /* CTFPOOL */
424/* Convert a driver packet to native(OS) packet
425 * In the process, packettag is zeroed out before sending up
426 * IP code depends on skb->cb to be setup correctly with various options
427 * In our case, that means it should be 0
428 */
429struct sk_buff * BCMFASTPATH
430osl_pkt_tonative(osl_t *osh, void *pkt)
431{
432#ifndef WL_UMK
433	struct sk_buff *nskb;
434	unsigned long flags;
435#endif
436
437	if (osh->pub.pkttag)
438		bzero((void*)((struct sk_buff *)pkt)->cb, OSL_PKTTAG_SZ);
439
440#ifndef WL_UMK
441	/* Decrement the packet counter */
442	for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
443#ifdef BCMDBG_PKT
444		spin_lock_irqsave(&osh->pktlist_lock, flags);
445		pktlist_remove(&(osh->pktlist), (void *) nskb);
446		spin_unlock_irqrestore(&osh->pktlist_lock, flags);
447#endif  /* BCMDBG_PKT */
448		spin_lock_irqsave(&osh->pktalloc_lock, flags);
449		osh->pub.pktalloced--;
450		spin_unlock_irqrestore(&osh->pktalloc_lock, flags);
451	}
452#endif /* WL_UMK */
453	return (struct sk_buff *)pkt;
454}
455
456/* Convert a native(OS) packet to driver packet.
457 * In the process, native packet is destroyed, there is no copying
458 * Also, a packettag is zeroed out
459 */
460#ifdef BCMDBG_PKT
461void *
462osl_pkt_frmnative(osl_t *osh, void *pkt, int line, char *file)
463#else /* BCMDBG_PKT pkt logging for debugging */
464void * BCMFASTPATH
465osl_pkt_frmnative(osl_t *osh, void *pkt)
466#endif /* BCMDBG_PKT */
467{
468#ifndef WL_UMK
469	struct sk_buff *nskb;
470	unsigned long flags;
471#endif
472
473	if (osh->pub.pkttag)
474		bzero((void*)((struct sk_buff *)pkt)->cb, OSL_PKTTAG_SZ);
475
476#ifndef WL_UMK
477	/* Increment the packet counter */
478	for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
479#ifdef BCMDBG_PKT
480		spin_lock_irqsave(&osh->pktlist_lock, flags);
481		pktlist_add(&(osh->pktlist), (void *) nskb, line, file);
482		spin_unlock_irqrestore(&osh->pktlist_lock, flags);
483#endif  /* BCMDBG_PKT */
484		spin_lock_irqsave(&osh->pktalloc_lock, flags);
485		osh->pub.pktalloced++;
486		spin_unlock_irqrestore(&osh->pktalloc_lock, flags);
487	}
488#endif /* WL_UMK */
489	return (void *)pkt;
490}
491
492/* Return a new packet. zero out pkttag */
493#ifdef BCMDBG_PKT
494void * BCMFASTPATH
495osl_pktget(osl_t *osh, uint len, int line, char *file)
496#else /* BCMDBG_PKT */
497void * BCMFASTPATH
498osl_pktget(osl_t *osh, uint len)
499#endif /* BCMDBG_PKT */
500{
501	struct sk_buff *skb;
502	unsigned long flags;
503
504#ifdef CTFPOOL
505	/* Allocate from local pool */
506	skb = osl_pktfastget(osh, len);
507	if ((skb != NULL) || ((skb = dev_alloc_skb(len)) != NULL)) {
508#else /* CTFPOOL */
509	if ((skb = dev_alloc_skb(len))) {
510#endif /* CTFPOOL */
511		skb_put(skb, len);
512		skb->priority = 0;
513
514#ifdef BCMDBG_PKT
515		spin_lock_irqsave(&osh->pktlist_lock, flags);
516		pktlist_add(&(osh->pktlist), (void *) skb, line, file);
517		spin_unlock_irqrestore(&osh->pktlist_lock, flags);
518#endif
519
520		spin_lock_irqsave(&osh->pktalloc_lock, flags);
521		osh->pub.pktalloced++;
522		spin_unlock_irqrestore(&osh->pktalloc_lock, flags);
523	}
524
525	return ((void*) skb);
526}
527
528#ifdef CTFPOOL
529static inline void
530osl_pktfastfree(osl_t *osh, struct sk_buff *skb)
531{
532	ctfpool_t *ctfpool;
533#ifdef CTFPOOL_SPINLOCK
534	unsigned long flags;
535#endif /* CTFPOOL_SPINLOCK */
536
537#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
538	skb->tstamp.tv.sec = 0;
539#else
540	skb->stamp.tv_sec = 0;
541#endif
542
543	/* We only need to init the fields that we change */
544	skb->dev = NULL;
545	skb->dst = NULL;
546	memset(skb->cb, 0, sizeof(skb->cb));
547	skb->ip_summed = 0;
548	skb->destructor = NULL;
549
550	ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
551	ASSERT(ctfpool != NULL);
552
553	/* Add object to the ctfpool */
554	CTFPOOL_LOCK(ctfpool, flags);
555	skb->next = (struct sk_buff *)ctfpool->head;
556	ctfpool->head = (void *)skb;
557
558	ctfpool->fast_frees++;
559	ctfpool->curr_obj++;
560
561	ASSERT(ctfpool->curr_obj <= ctfpool->max_obj);
562	CTFPOOL_UNLOCK(ctfpool, flags);
563}
564#endif /* CTFPOOL */
565
566/* Free the driver packet. Free the tag if present */
567void BCMFASTPATH
568osl_pktfree(osl_t *osh, void *p, bool send)
569{
570	struct sk_buff *skb, *nskb;
571	unsigned long flags;
572
573	skb = (struct sk_buff*) p;
574
575	if (send && osh->pub.tx_fn)
576		osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
577
578	PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE);
579
580	/* perversion: we use skb->next to chain multi-skb packets */
581	while (skb) {
582		nskb = skb->next;
583		skb->next = NULL;
584
585#ifdef BCMDBG_PKT
586		spin_lock_irqsave(&osh->pktlist_lock, flags);
587		pktlist_remove(&(osh->pktlist), (void *) skb);
588		spin_unlock_irqrestore(&osh->pktlist_lock, flags);
589#endif
590
591#ifdef CTFPOOL
592        /* foxconn wklin modified, 07/07/2011 */
593        /* add CTFPOOLPTR() check because..
594         * skb->mac_len bit 4 may be set (PKTISFAST()==true) in the stack...*/
595		if (PKTISFAST(osh, skb) && CTFPOOLPTR(osh, skb))
596			osl_pktfastfree(osh, skb);
597		else {
598#else /* CTFPOOL */
599		{
600#endif /* CTFPOOL */
601
602			if (skb->destructor)
603				/* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
604				 * destructor exists
605				 */
606				dev_kfree_skb_any(skb);
607			else
608				/* can free immediately (even in_irq()) if destructor
609				 * does not exist
610				 */
611				dev_kfree_skb(skb);
612		}
613	spin_lock_irqsave(&osh->pktalloc_lock, flags);
614		osh->pub.pktalloced--;
615	spin_unlock_irqrestore(&osh->pktalloc_lock, flags);
616		skb = nskb;
617	}
618}
619
620uint32
621osl_pci_read_config(osl_t *osh, uint offset, uint size)
622{
623	uint val = 0;
624	uint retry = PCI_CFG_RETRY;
625
626	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
627
628	/* only 4byte access supported */
629	ASSERT(size == 4);
630
631	do {
632		pci_read_config_dword(osh->pdev, offset, &val);
633		if (val != 0xffffffff)
634			break;
635	} while (retry--);
636
637
638	return (val);
639}
640
641void
642osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
643{
644	uint retry = PCI_CFG_RETRY;
645
646	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
647
648	/* only 4byte access supported */
649	ASSERT(size == 4);
650
651	do {
652		pci_write_config_dword(osh->pdev, offset, val);
653		if (offset != PCI_BAR0_WIN)
654			break;
655		if (osl_pci_read_config(osh, offset, size) == val)
656			break;
657	} while (retry--);
658
659}
660
661/* return bus # for the pci device pointed by osh->pdev */
662uint
663osl_pci_bus(osl_t *osh)
664{
665	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
666
667	return ((struct pci_dev *)osh->pdev)->bus->number;
668}
669
670/* return slot # for the pci device pointed by osh->pdev */
671uint
672osl_pci_slot(osl_t *osh)
673{
674	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
675
676	return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
677}
678
679static void
680osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
681{
682}
683
684void
685osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
686{
687	osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
688}
689
690void
691osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
692{
693	osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
694}
695
696#ifdef BCMDBG_MEM
697/* In BCMDBG_MEM configurations osl_malloc is only used internally in
698 * the implementation of osl_debug_malloc.  Because we are using the GCC
699 * -Wstrict-prototypes compile option, we must always have a prototype
700 * for a global/external function.  So make osl_malloc static in
701 * the BCMDBG_MEM case.
702 */
703static
704#endif
705void *
706osl_malloc(osl_t *osh, uint size)
707{
708	void *addr;
709
710	/* only ASSERT if osh is defined */
711	if (osh)
712		ASSERT(osh->magic == OS_HANDLE_MAGIC);
713
714	if ((addr = kmalloc(size, GFP_ATOMIC)) == NULL) {
715		if (osh)
716			osh->failed++;
717		return (NULL);
718	}
719	if (osh)
720		atomic_add(size, &osh->malloced);
721
722	return (addr);
723}
724
725#ifdef BCMDBG_MEM
726/* In BCMDBG_MEM configurations osl_mfree is only used internally in
727 * the implementation of osl_debug_mfree.  Because we are using the GCC
728 * -Wstrict-prototypes compile option, we must always have a prototype
729 * for a global/external function.  So make osl_mfree static in
730 * the BCMDBG_MEM case.
731 */
732static
733#endif
734void
735osl_mfree(osl_t *osh, void *addr, uint size)
736{
737	if (osh) {
738		ASSERT(osh->magic == OS_HANDLE_MAGIC);
739		atomic_sub(size, &osh->malloced);
740	}
741	kfree(addr);
742}
743
744uint
745osl_malloced(osl_t *osh)
746{
747	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
748	return (atomic_read(&osh->malloced));
749}
750
751uint
752osl_malloc_failed(osl_t *osh)
753{
754	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
755	return (osh->failed);
756}
757
758#ifdef BCMDBG_MEM
759#define MEMLIST_LOCK(osh, flags)	spin_lock_irqsave(&(osh)->dbgmem_lock, flags)
760#define MEMLIST_UNLOCK(osh, flags)	spin_unlock_irqrestore(&(osh)->dbgmem_lock, flags)
761
762void*
763osl_debug_malloc(osl_t *osh, uint size, int line, char* file)
764{
765	bcm_mem_link_t *p;
766	char* basename;
767	unsigned long flags = 0;
768
769	if (!size) {
770		printk("%s: allocating zero sized mem at %s line %d\n", __FUNCTION__, file, line);
771		ASSERT(0);
772	}
773
774	if (osh) {
775		MEMLIST_LOCK(osh, flags);
776	}
777	if ((p = (bcm_mem_link_t*)osl_malloc(osh, sizeof(bcm_mem_link_t) + size)) == NULL) {
778		if (osh) {
779			MEMLIST_UNLOCK(osh, flags);
780		}
781		return (NULL);
782	}
783
784	p->size = size;
785	p->line = line;
786	p->osh = (void *)osh;
787
788	basename = strrchr(file, '/');
789	/* skip the '/' */
790	if (basename)
791		basename++;
792
793	if (!basename)
794		basename = file;
795
796	strncpy(p->file, basename, BCM_MEM_FILENAME_LEN);
797	p->file[BCM_MEM_FILENAME_LEN - 1] = '\0';
798
799	/* link this block */
800	if (osh) {
801		p->prev = NULL;
802		p->next = osh->dbgmem_list;
803		if (p->next)
804			p->next->prev = p;
805		osh->dbgmem_list = p;
806		MEMLIST_UNLOCK(osh, flags);
807	}
808
809	return p + 1;
810}
811
812void
813osl_debug_mfree(osl_t *osh, void *addr, uint size, int line, char* file)
814{
815	bcm_mem_link_t *p = (bcm_mem_link_t *)((int8*)addr - sizeof(bcm_mem_link_t));
816	unsigned long flags = 0;
817
818	ASSERT(osh == NULL || osh->magic == OS_HANDLE_MAGIC);
819
820	if (p->size == 0) {
821		printk("osl_debug_mfree: double free on addr %p size %d at line %d file %s\n",
822			addr, size, line, file);
823		ASSERT(p->size);
824		return;
825	}
826
827	if (p->size != size) {
828		printk("osl_debug_mfree: dealloc size %d does not match alloc size %d on addr %p"
829		       " at line %d file %s\n",
830		       size, p->size, addr, line, file);
831		ASSERT(p->size == size);
832		return;
833	}
834
835	if (p->osh != (void *)osh) {
836		printk("osl_debug_mfree: alloc osh %p does not match dealloc osh %p\n",
837			p->osh, osh);
838		printk("Dealloc addr %p size %d at line %d file %s\n", addr, size, line, file);
839		printk("Alloc size %d line %d file %s\n", p->size, p->line, p->file);
840		ASSERT(p->osh == (void *)osh);
841		return;
842	}
843
844	/* unlink this block */
845	if (osh) {
846		MEMLIST_LOCK(osh, flags);
847		if (p->prev)
848			p->prev->next = p->next;
849		if (p->next)
850			p->next->prev = p->prev;
851		if (osh->dbgmem_list == p)
852			osh->dbgmem_list = p->next;
853		p->next = p->prev = NULL;
854	}
855	p->size = 0;
856
857	osl_mfree(osh, p, size + sizeof(bcm_mem_link_t));
858	if (osh) {
859		MEMLIST_UNLOCK(osh, flags);
860	}
861}
862
863int
864osl_debug_memdump(osl_t *osh, struct bcmstrbuf *b)
865{
866	bcm_mem_link_t *p;
867	unsigned long flags = 0;
868
869	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
870
871	MEMLIST_LOCK(osh, flags);
872	if (osh->dbgmem_list) {
873		if (b != NULL)
874			bcm_bprintf(b, "   Address   Size File:line\n");
875		else
876			printf("   Address   Size File:line\n");
877
878		for (p = osh->dbgmem_list; p; p = p->next) {
879			if (b != NULL)
880				bcm_bprintf(b, "%p %6d %s:%d\n", (char*)p + sizeof(bcm_mem_link_t),
881					p->size, p->file, p->line);
882			else
883				printf("%p %6d %s:%d\n", (char*)p + sizeof(bcm_mem_link_t),
884					p->size, p->file, p->line);
885
886			/* Detects loop-to-self so we don't enter infinite loop */
887			if (p == p->next) {
888				if (b != NULL)
889					bcm_bprintf(b, "WARNING: loop-to-self "
890						"p %p p->next %p\n", p, p->next);
891				else
892					printf("WARNING: loop-to-self "
893						"p %p p->next %p\n", p, p->next);
894
895				break;
896			}
897		}
898	}
899	MEMLIST_UNLOCK(osh, flags);
900
901	return 0;
902}
903
904#endif	/* BCMDBG_MEM */
905
906uint
907osl_dma_consistent_align(void)
908{
909	return (PAGE_SIZE);
910}
911
912void*
913osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, ulong *pap)
914{
915	uint16 align = (1 << align_bits);
916	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
917
918	if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
919		size += align;
920	*alloced = size;
921
922	return (pci_alloc_consistent(osh->pdev, size, (dma_addr_t*)pap));
923}
924
925void
926osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa)
927{
928	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
929
930	pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
931}
932
933uint BCMFASTPATH
934osl_dma_map(osl_t *osh, void *va, uint size, int direction)
935{
936	int dir;
937
938	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
939	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
940	return (pci_map_single(osh->pdev, va, size, dir));
941}
942
943void BCMFASTPATH
944osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction)
945{
946	int dir;
947
948	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
949	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
950	pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
951}
952
953#if defined(BCMDBG_ASSERT)
954void
955osl_assert(const char *exp, const char *file, int line)
956{
957	char tempbuf[256];
958	const char *basename;
959
960	basename = strrchr(file, '/');
961	/* skip the '/' */
962	if (basename)
963		basename++;
964
965	if (!basename)
966		basename = file;
967
968#ifdef BCMDBG_ASSERT
969	snprintf(tempbuf, 256, "assertion \"%s\" failed: file \"%s\", line %d\n",
970		exp, basename, line);
971
972	/* Print assert message and give it time to be written to /var/log/messages */
973	if (!in_interrupt()) {
974		const int delay = 3;
975		printk("%s", tempbuf);
976		printk("panic in %d seconds\n", delay);
977		set_current_state(TASK_INTERRUPTIBLE);
978		schedule_timeout(delay * HZ);
979	}
980
981	switch (g_assert_type) {
982		case 0:
983			panic("%s", tempbuf);
984#ifdef __COVERITY__
985			/* Inform Coverity that execution will not continue past this point */
986			__coverity_panic__();
987#endif /* __COVERITY__ */
988			break;
989		case 2:
990			printk("%s", tempbuf);
991			BUG();
992#ifdef __COVERITY__
993			/* Inform Coverity that execution will not continue past this point */
994			__coverity_panic__();
995#endif /* __COVERITY__ */
996			break;
997		default:
998			break;
999	}
1000#endif /* BCMDBG_ASSERT */
1001
1002}
1003#endif
1004
1005void
1006osl_delay(uint usec)
1007{
1008	uint d;
1009
1010	while (usec > 0) {
1011		d = MIN(usec, 1000);
1012		udelay(d);
1013		usec -= d;
1014	}
1015}
1016
1017#if defined(DSLCPE_DELAY)
1018
1019void
1020osl_oshsh_init(osl_t *osh, shared_osl_t* oshsh)
1021{
1022	extern unsigned long loops_per_jiffy;
1023	osh->oshsh = oshsh;
1024	osh->oshsh->MIPS = loops_per_jiffy / (500000/HZ);
1025}
1026
1027int
1028in_long_delay(osl_t *osh)
1029{
1030	return osh->oshsh->long_delay;
1031}
1032
1033void
1034osl_long_delay(osl_t *osh, uint usec, bool yield)
1035{
1036	uint d;
1037	bool yielded = TRUE;
1038	int usec_to_delay = usec;
1039	unsigned long tick1, tick2, tick_diff = 0;
1040
1041	/* delay at least requested usec */
1042	while (usec_to_delay > 0) {
1043		if (!yield || !yielded) {
1044			d = MIN(usec_to_delay, 10);
1045			udelay(d);
1046			usec_to_delay -= d;
1047		}
1048		if (usec_to_delay > 0) {
1049			osh->oshsh->long_delay++;
1050			OSL_GETCYCLES(tick1);
1051			spin_unlock_bh(osh->oshsh->lock);
1052			if (usec_to_delay > 0 && !in_irq() && !in_softirq() && !in_interrupt()) {
1053				schedule();
1054				yielded = TRUE;
1055			} else {
1056				yielded = FALSE;
1057			}
1058			spin_lock_bh(osh->oshsh->lock);
1059			OSL_GETCYCLES(tick2);
1060
1061			if (yielded) {
1062				tick_diff = TICKDIFF(tick2, tick1);
1063				tick_diff = (tick_diff * 2)/(osh->oshsh->MIPS);
1064				if (tick_diff) {
1065					usec_to_delay -= tick_diff;
1066				} else
1067					yielded = 0;
1068			}
1069			osh->oshsh->long_delay--;
1070			ASSERT(osh->oshsh->long_delay >= 0);
1071		}
1072	}
1073}
1074#endif /* DSLCPE_DELAY */
1075
1076/* Clone a packet.
1077 * The pkttag contents are NOT cloned.
1078 */
1079#ifdef BCMDBG_PKT
1080void *
1081osl_pktdup(osl_t *osh, void *skb, int line, char *file)
1082#else /* BCMDBG_PKT */
1083void *
1084osl_pktdup(osl_t *osh, void *skb)
1085#endif /* BCMDBG_PKT */
1086{
1087	void * p;
1088	unsigned long flags;
1089
1090	if ((p = skb_clone((struct sk_buff*)skb, GFP_ATOMIC)) == NULL)
1091		return NULL;
1092
1093#ifdef CTFPOOL
1094	if (PKTISFAST(osh, skb)) {
1095		ctfpool_t *ctfpool;
1096
1097		/* if the buffer allocated from ctfpool is cloned then
1098		 * we can't be sure when it will be freed. since there
1099		 * is a chance that we will be losing a buffer
1100		 * from our pool, we increment the refill count for the
1101		 * object to be alloced later.
1102		 */
1103		ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
1104		ASSERT(ctfpool != NULL);
1105		PKTCLRFAST(osh, p);
1106		PKTCLRFAST(osh, skb);
1107		ctfpool->refills++;
1108	}
1109#endif /* CTFPOOL */
1110
1111	/* skb_clone copies skb->cb.. we don't want that */
1112	if (osh->pub.pkttag)
1113		bzero((void*)((struct sk_buff *)p)->cb, OSL_PKTTAG_SZ);
1114
1115	/* Increment the packet counter */
1116	spin_lock_irqsave(&osh->pktalloc_lock, flags);
1117	osh->pub.pktalloced++;
1118	spin_unlock_irqrestore(&osh->pktalloc_lock, flags);
1119#ifdef BCMDBG_PKT
1120	spin_lock_irqsave(&osh->pktlist_lock, flags);
1121	pktlist_add(&(osh->pktlist), (void *) p, line, file);
1122	spin_unlock_irqrestore(&osh->pktlist_lock, flags);
1123#endif
1124	return (p);
1125}
1126
1127#ifdef BCMDBG_PKT
1128#ifdef BCMDBG_PTRACE
1129void
1130osl_pkttrace(osl_t *osh, void *pkt, uint16 bit)
1131{
1132	pktlist_trace(&(osh->pktlist), pkt, bit);
1133}
1134#endif /* BCMDBG_PTRACE */
1135
1136char *
1137osl_pktlist_dump(osl_t *osh, char *buf)
1138{
1139	pktlist_dump(&(osh->pktlist), buf);
1140	return buf;
1141}
1142
1143void
1144osl_pktlist_add(osl_t *osh, void *p, int line, char *file)
1145{
1146	unsigned long flags;
1147	spin_lock_irqsave(&osh->pktlist_lock, flags);
1148	pktlist_add(&(osh->pktlist), p, line, file);
1149	spin_unlock_irqrestore(&osh->pktlist_lock, flags);
1150}
1151
1152void
1153osl_pktlist_remove(osl_t *osh, void *p)
1154{
1155	unsigned long flags;
1156	spin_lock_irqsave(&osh->pktlist_lock, flags);
1157	pktlist_remove(&(osh->pktlist), p);
1158	spin_unlock_irqrestore(&osh->pktlist_lock, flags);
1159}
1160#endif /* BCMDBG_PKT */
1161
1162/*
1163 * OSLREGOPS specifies the use of osl_XXX routines to be used for register access
1164 */
1165#if defined(OSLREGOPS) || (defined(WLC_HIGH) && !defined(WLC_LOW))
1166uint8
1167osl_readb(osl_t *osh, volatile uint8 *r)
1168{
1169	osl_rreg_fn_t rreg	= ((osl_pubinfo_t*)osh)->rreg_fn;
1170	void *ctx		= ((osl_pubinfo_t*)osh)->reg_ctx;
1171
1172	return (uint8)((rreg)(ctx, (void*)r, sizeof(uint8)));
1173}
1174
1175
1176uint16
1177osl_readw(osl_t *osh, volatile uint16 *r)
1178{
1179	osl_rreg_fn_t rreg	= ((osl_pubinfo_t*)osh)->rreg_fn;
1180	void *ctx		= ((osl_pubinfo_t*)osh)->reg_ctx;
1181
1182	return (uint16)((rreg)(ctx, (void*)r, sizeof(uint16)));
1183}
1184
1185uint32
1186osl_readl(osl_t *osh, volatile uint32 *r)
1187{
1188	osl_rreg_fn_t rreg	= ((osl_pubinfo_t*)osh)->rreg_fn;
1189	void *ctx		= ((osl_pubinfo_t*)osh)->reg_ctx;
1190
1191	return (uint32)((rreg)(ctx, (void*)r, sizeof(uint32)));
1192}
1193
1194void
1195osl_writeb(osl_t *osh, volatile uint8 *r, uint8 v)
1196{
1197	osl_wreg_fn_t wreg	= ((osl_pubinfo_t*)osh)->wreg_fn;
1198	void *ctx		= ((osl_pubinfo_t*)osh)->reg_ctx;
1199
1200	((wreg)(ctx, (void*)r, v, sizeof(uint8)));
1201}
1202
1203
1204void
1205osl_writew(osl_t *osh, volatile uint16 *r, uint16 v)
1206{
1207	osl_wreg_fn_t wreg	= ((osl_pubinfo_t*)osh)->wreg_fn;
1208	void *ctx		= ((osl_pubinfo_t*)osh)->reg_ctx;
1209
1210	((wreg)(ctx, (void*)r, v, sizeof(uint16)));
1211}
1212
1213void
1214osl_writel(osl_t *osh, volatile uint32 *r, uint32 v)
1215{
1216	osl_wreg_fn_t wreg	= ((osl_pubinfo_t*)osh)->wreg_fn;
1217	void *ctx		= ((osl_pubinfo_t*)osh)->reg_ctx;
1218
1219	((wreg)(ctx, (void*)r, v, sizeof(uint32)));
1220}
1221#endif /* OSLREGOPS */
1222
1223/*
1224 * BINOSL selects the slightly slower function-call-based binary compatible osl.
1225 */
1226#ifdef BINOSL
1227
1228uint32
1229osl_sysuptime(void)
1230{
1231	return ((uint32)jiffies * (1000 / HZ));
1232}
1233
1234uint
1235osl_pktalloced(osl_t *osh)
1236{
1237	return (osh->pub.pktalloced);
1238}
1239
1240int
1241osl_printf(const char *format, ...)
1242{
1243	va_list args;
1244	static char printbuf[1024];
1245	int len;
1246
1247	/* sprintf into a local buffer because there *is* no "vprintk()".. */
1248	va_start(args, format);
1249	len = vsnprintf(printbuf, 1024, format, args);
1250	va_end(args);
1251
1252	if (len > sizeof(printbuf)) {
1253		printk("osl_printf: buffer overrun\n");
1254		return (0);
1255	}
1256
1257	return (printk("%s", printbuf));
1258}
1259
1260int
1261osl_sprintf(char *buf, const char *format, ...)
1262{
1263	va_list args;
1264	int rc;
1265
1266	va_start(args, format);
1267	rc = vsprintf(buf, format, args);
1268	va_end(args);
1269	return (rc);
1270}
1271
1272int
1273osl_snprintf(char *buf, size_t n, const char *format, ...)
1274{
1275	va_list args;
1276	int rc;
1277
1278	va_start(args, format);
1279	rc = vsnprintf(buf, n, format, args);
1280	va_end(args);
1281	return (rc);
1282}
1283
1284int
1285osl_vsprintf(char *buf, const char *format, va_list ap)
1286{
1287	return (vsprintf(buf, format, ap));
1288}
1289
1290int
1291osl_vsnprintf(char *buf, size_t n, const char *format, va_list ap)
1292{
1293	return (vsnprintf(buf, n, format, ap));
1294}
1295
1296int
1297osl_strcmp(const char *s1, const char *s2)
1298{
1299	return (strcmp(s1, s2));
1300}
1301
1302int
1303osl_strncmp(const char *s1, const char *s2, uint n)
1304{
1305	return (strncmp(s1, s2, n));
1306}
1307
1308int
1309osl_strlen(const char *s)
1310{
1311	return (strlen(s));
1312}
1313
1314char*
1315osl_strcpy(char *d, const char *s)
1316{
1317	return (strcpy(d, s));
1318}
1319
1320char*
1321osl_strncpy(char *d, const char *s, uint n)
1322{
1323	return (strncpy(d, s, n));
1324}
1325
1326char*
1327osl_strchr(const char *s, int c)
1328{
1329	return (strchr(s, c));
1330}
1331
1332char*
1333osl_strrchr(const char *s, int c)
1334{
1335	return (strrchr(s, c));
1336}
1337
1338void*
1339osl_memset(void *d, int c, size_t n)
1340{
1341	return memset(d, c, n);
1342}
1343
1344void*
1345osl_memcpy(void *d, const void *s, size_t n)
1346{
1347	return memcpy(d, s, n);
1348}
1349
1350void*
1351osl_memmove(void *d, const void *s, size_t n)
1352{
1353	return memmove(d, s, n);
1354}
1355
1356int
1357osl_memcmp(const void *s1, const void *s2, size_t n)
1358{
1359	return memcmp(s1, s2, n);
1360}
1361
1362uint32
1363osl_readl(volatile uint32 *r)
1364{
1365	return (readl(r));
1366}
1367
1368uint16
1369osl_readw(volatile uint16 *r)
1370{
1371	return (readw(r));
1372}
1373
1374uint8
1375osl_readb(volatile uint8 *r)
1376{
1377	return (readb(r));
1378}
1379
1380void
1381osl_writel(uint32 v, volatile uint32 *r)
1382{
1383	writel(v, r);
1384}
1385
1386void
1387osl_writew(uint16 v, volatile uint16 *r)
1388{
1389	writew(v, r);
1390}
1391
1392void
1393osl_writeb(uint8 v, volatile uint8 *r)
1394{
1395	writeb(v, r);
1396}
1397
1398void *
1399osl_uncached(void *va)
1400{
1401#ifdef mips
1402	return ((void*)KSEG1ADDR(va));
1403#else
1404	return ((void*)va);
1405#endif /* mips */
1406}
1407
1408void *
1409osl_cached(void *va)
1410{
1411#ifdef mips
1412	return ((void*)KSEG0ADDR(va));
1413#else
1414	return ((void*)va);
1415#endif /* mips */
1416}
1417
1418uint
1419osl_getcycles(void)
1420{
1421	uint cycles;
1422
1423#if defined(mips)
1424	cycles = read_c0_count() * 2;
1425#elif defined(__i386__)
1426	rdtscl(cycles);
1427#else
1428	cycles = 0;
1429#endif /* defined(mips) */
1430	return cycles;
1431}
1432
1433void *
1434osl_reg_map(uint32 pa, uint size)
1435{
1436	return (ioremap_nocache((unsigned long)pa, (unsigned long)size));
1437}
1438
1439void
1440osl_reg_unmap(void *va)
1441{
1442	iounmap(va);
1443}
1444
1445int
1446osl_busprobe(uint32 *val, uint32 addr)
1447{
1448#ifdef mips
1449	return get_dbe(*val, (uint32 *)addr);
1450#else
1451	*val = readl((uint32 *)(uintptr)addr);
1452	return 0;
1453#endif /* mips */
1454}
1455
1456bool
1457osl_pktshared(void *skb)
1458{
1459	return (((struct sk_buff*)skb)->cloned);
1460}
1461
1462uchar*
1463osl_pktdata(osl_t *osh, void *skb)
1464{
1465	return (((struct sk_buff*)skb)->data);
1466}
1467
1468uint
1469osl_pktlen(osl_t *osh, void *skb)
1470{
1471	return (((struct sk_buff*)skb)->len);
1472}
1473
1474uint
1475osl_pktheadroom(osl_t *osh, void *skb)
1476{
1477	return (uint) skb_headroom((struct sk_buff *) skb);
1478}
1479
1480uint
1481osl_pkttailroom(osl_t *osh, void *skb)
1482{
1483	return (uint) skb_tailroom((struct sk_buff *) skb);
1484}
1485
1486void*
1487osl_pktnext(osl_t *osh, void *skb)
1488{
1489	return (((struct sk_buff*)skb)->next);
1490}
1491
1492void
1493osl_pktsetnext(void *skb, void *x)
1494{
1495	((struct sk_buff*)skb)->next = (struct sk_buff*)x;
1496}
1497
1498void
1499osl_pktsetlen(osl_t *osh, void *skb, uint len)
1500{
1501	__skb_trim((struct sk_buff*)skb, len);
1502}
1503
1504uchar*
1505osl_pktpush(osl_t *osh, void *skb, int bytes)
1506{
1507	return (skb_push((struct sk_buff*)skb, bytes));
1508}
1509
1510uchar*
1511osl_pktpull(osl_t *osh, void *skb, int bytes)
1512{
1513	return (skb_pull((struct sk_buff*)skb, bytes));
1514}
1515
1516void*
1517osl_pkttag(void *skb)
1518{
1519	return ((void*)(((struct sk_buff*)skb)->cb));
1520}
1521
1522void*
1523osl_pktlink(void *skb)
1524{
1525	return (((struct sk_buff*)skb)->prev);
1526}
1527
1528void
1529osl_pktsetlink(void *skb, void *x)
1530{
1531	((struct sk_buff*)skb)->prev = (struct sk_buff*)x;
1532}
1533
1534uint
1535osl_pktprio(void *skb)
1536{
1537	return (((struct sk_buff*)skb)->priority);
1538}
1539
1540void
1541osl_pktsetprio(void *skb, uint x)
1542{
1543	((struct sk_buff*)skb)->priority = x;
1544}
1545#endif	/* BINOSL */
1546
1547/* Linux Kernel: File Operations: start */
1548void *
1549osl_os_open_image(char *filename)
1550{
1551	struct file *fp;
1552
1553	fp = filp_open(filename, O_RDONLY, 0);
1554	/*
1555	 * 2.6.11 (FC4) supports filp_open() but later revs don't?
1556	 * Alternative:
1557	 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
1558	 * ???
1559	 */
1560	 if (IS_ERR(fp))
1561		 fp = NULL;
1562
1563	 return fp;
1564}
1565
1566int
1567osl_os_get_image_block(char *buf, int len, void *image)
1568{
1569	struct file *fp = (struct file *)image;
1570	int rdlen;
1571
1572	if (!image)
1573		return 0;
1574
1575	rdlen = kernel_read(fp, fp->f_pos, buf, len);
1576	if (rdlen > 0)
1577		fp->f_pos += rdlen;
1578
1579	return rdlen;
1580}
1581
1582void
1583osl_os_close_image(void *image)
1584{
1585	if (image)
1586		filp_close((struct file *)image, NULL);
1587}
1588int
1589osl_os_image_size(void *image)
1590{
1591	int len = 0, curroffset;
1592
1593	if (image) {
1594		/* store the current offset */
1595		curroffset = generic_file_llseek(image, 0, 1);
1596		/* goto end of file to get length */
1597		len = generic_file_llseek(image, 0, 2);
1598		/* restore back the offset */
1599		generic_file_llseek(image, curroffset, 0);
1600	}
1601	return len;
1602}
1603
1604//Foxconn add start, Lewis Min, for UBD, 04/18/2008
1605int (*ip_pre_insert_hook)(struct sk_buff *skb);//Foxconn add , Lewis Min, for UBD, 04/18/2008
1606
1607void insert_func_to_IP_PRE_ROUTE(void *FUNC)
1608{
1609    local_bh_disable(); /* foxconn wklin added, 11/24/2008 */
1610    ip_pre_insert_hook= FUNC;
1611    local_bh_enable(); /* foxconn wklin added, 11/24/2008 */
1612}
1613
1614void remove_func_to_IP_PRE_ROUTE(void *FUNC)
1615{
1616    local_bh_disable(); /* foxconn wklin added, 11/24/2008 */
1617    ip_pre_insert_hook= NULL;
1618    local_bh_enable(); /* foxconn wklin added, 11/24/2008 */
1619}
1620
1621/* Linux Kernel: File Operations: end */
1622