1/*
2 * Linux OS Independent Layer
3 *
4 * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved.
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
13 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
15 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
16 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 *
18 * $Id: linux_osl.c 418493 2013-08-15 09:56:44Z $
19 */
20
21#define LINUX_PORT
22
23#include <typedefs.h>
24#include <bcmendian.h>
25#include <linuxver.h>
26#include <bcmdefs.h>
27#include <osl.h>
28#include <bcmutils.h>
29#include <linux/delay.h>
30#ifdef mips
31#include <asm/paccess.h>
32#endif /* mips */
33#include <pcicfg.h>
34
35
36
37#include <linux/fs.h>
38
39#define PCI_CFG_RETRY 		10
40
41#define OS_HANDLE_MAGIC		0x1234abcd	/* Magic # to recognize osh */
42#define BCM_MEM_FILENAME_LEN 	24		/* Mem. filename length */
43
44#ifdef DHD_USE_STATIC_BUF
45#define STATIC_BUF_MAX_NUM	16
46#define STATIC_BUF_SIZE	(PAGE_SIZE*2)
47#define STATIC_BUF_TOTAL_LEN	(STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE)
48
49typedef struct bcm_static_buf {
50	struct semaphore static_sem;
51	unsigned char *buf_ptr;
52	unsigned char buf_use[STATIC_BUF_MAX_NUM];
53} bcm_static_buf_t;
54
55static bcm_static_buf_t *bcm_static_buf = 0;
56
57#define STATIC_PKT_MAX_NUM	8
58
59typedef struct bcm_static_pkt {
60	struct sk_buff *skb_4k[STATIC_PKT_MAX_NUM];
61	struct sk_buff *skb_8k[STATIC_PKT_MAX_NUM];
62	struct semaphore osl_pkt_sem;
63	unsigned char pkt_use[STATIC_PKT_MAX_NUM * 2];
64} bcm_static_pkt_t;
65
66static bcm_static_pkt_t *bcm_static_skb = 0;
67#endif /* DHD_USE_STATIC_BUF */
68
69typedef struct bcm_mem_link {
70	struct bcm_mem_link *prev;
71	struct bcm_mem_link *next;
72	uint	size;
73	int	line;
74	void 	*osh;
75	char	file[BCM_MEM_FILENAME_LEN];
76} bcm_mem_link_t;
77
78struct osl_info {
79	osl_pubinfo_t pub;
80#ifdef CTFPOOL
81	ctfpool_t *ctfpool;
82#endif /* CTFPOOL */
83	uint magic;
84	void *pdev;
85	atomic_t malloced;
86	atomic_t pktalloced; 	/* Number of allocated packet buffers */
87	uint failed;
88	uint bustype;
89	bcm_mem_link_t *dbgmem_list;
90#if defined(DSLCPE_DELAY)
91	shared_osl_t *oshsh; /* osh shared */
92#endif
93	spinlock_t dbgmem_lock;
94#ifdef BCMDBG_PKT    /* pkt logging for debugging */
95	spinlock_t pktlist_lock;
96	pktlist_info_t pktlist;
97#endif  /* BCMDBG_PKT */
98#ifdef BCMDBG_CTRACE
99	spinlock_t ctrace_lock;
100	struct list_head ctrace_list;
101	int ctrace_num;
102#endif /* BCMDBG_CTRACE */
103	spinlock_t pktalloc_lock;
104};
105
106#define OSL_PKTTAG_CLEAR(p) \
107do { \
108	struct sk_buff *s = (struct sk_buff *)(p); \
109	ASSERT(OSL_PKTTAG_SZ == 32); \
110	*(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \
111	*(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
112	*(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
113	*(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
114} while (0)
115
116/* PCMCIA attribute space access macros */
117#if defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE)
118struct pcmcia_dev {
119	dev_link_t link;	/* PCMCIA device pointer */
120#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
121	dev_node_t node;	/* PCMCIA node structure */
122#endif
123	void *base;		/* Mapped attribute memory window */
124	size_t size;		/* Size of window */
125	void *drv;		/* Driver data */
126};
127#endif /* defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE) */
128
129/* Global ASSERT type flag */
130uint32 g_assert_type = TRUE;
131
132static int16 linuxbcmerrormap[] =
133{	0, 			/* 0 */
134	-EINVAL,		/* BCME_ERROR */
135	-EINVAL,		/* BCME_BADARG */
136	-EINVAL,		/* BCME_BADOPTION */
137	-EINVAL,		/* BCME_NOTUP */
138	-EINVAL,		/* BCME_NOTDOWN */
139	-EINVAL,		/* BCME_NOTAP */
140	-EINVAL,		/* BCME_NOTSTA */
141	-EINVAL,		/* BCME_BADKEYIDX */
142	-EINVAL,		/* BCME_RADIOOFF */
143	-EINVAL,		/* BCME_NOTBANDLOCKED */
144	-EINVAL, 		/* BCME_NOCLK */
145	-EINVAL, 		/* BCME_BADRATESET */
146	-EINVAL, 		/* BCME_BADBAND */
147	-E2BIG,			/* BCME_BUFTOOSHORT */
148	-E2BIG,			/* BCME_BUFTOOLONG */
149	-EBUSY, 		/* BCME_BUSY */
150	-EINVAL, 		/* BCME_NOTASSOCIATED */
151	-EINVAL, 		/* BCME_BADSSIDLEN */
152	-EINVAL, 		/* BCME_OUTOFRANGECHAN */
153	-EINVAL, 		/* BCME_BADCHAN */
154	-EFAULT, 		/* BCME_BADADDR */
155	-ENOMEM, 		/* BCME_NORESOURCE */
156	-EOPNOTSUPP,		/* BCME_UNSUPPORTED */
157	-EMSGSIZE,		/* BCME_BADLENGTH */
158	-EINVAL,		/* BCME_NOTREADY */
159	-EPERM,			/* BCME_EPERM */
160	-ENOMEM, 		/* BCME_NOMEM */
161	-EINVAL, 		/* BCME_ASSOCIATED */
162	-ERANGE, 		/* BCME_RANGE */
163	-EINVAL, 		/* BCME_NOTFOUND */
164	-EINVAL, 		/* BCME_WME_NOT_ENABLED */
165	-EINVAL, 		/* BCME_TSPEC_NOTFOUND */
166	-EINVAL, 		/* BCME_ACM_NOTSUPPORTED */
167	-EINVAL,		/* BCME_NOT_WME_ASSOCIATION */
168	-EIO,			/* BCME_SDIO_ERROR */
169	-ENODEV,		/* BCME_DONGLE_DOWN */
170	-EINVAL,		/* BCME_VERSION */
171	-EIO,			/* BCME_TXFAIL */
172	-EIO,			/* BCME_RXFAIL */
173	-ENODEV,		/* BCME_NODEVICE */
174	-EINVAL,		/* BCME_NMODE_DISABLED */
175	-ENODATA,		/* BCME_NONRESIDENT */
176	-EINVAL,		/* BCME_SCANREJECT */
177	-EINVAL,		/* unused */
178	-EINVAL,		/* unused */
179	-EINVAL,		/* unused */
180	-EOPNOTSUPP,		/* BCME_DISABLED */
181
182/* When an new error code is added to bcmutils.h, add os
183 * specific error translation here as well
184 */
185/* check if BCME_LAST changed since the last time this function was updated */
186#if BCME_LAST != -47
187#error "You need to add a OS error translation in the linuxbcmerrormap \
188	for new error code defined in bcmutils.h"
189#endif
190};
191
192/* translate bcmerrors into linux errors */
193int
194osl_error(int bcmerror)
195{
196	if (bcmerror > 0)
197		bcmerror = 0;
198	else if (bcmerror < BCME_LAST)
199		bcmerror = BCME_ERROR;
200
201	/* Array bounds covered by ASSERT in osl_attach */
202	return linuxbcmerrormap[-bcmerror];
203}
204
205extern uint8* dhd_os_prealloc(void *osh, int section, int size);
206
207osl_t *
208osl_attach(void *pdev, uint bustype, bool pkttag)
209{
210	osl_t *osh;
211
212	osh = kmalloc(sizeof(osl_t), GFP_ATOMIC);
213	ASSERT(osh);
214
215	bzero(osh, sizeof(osl_t));
216
217	/* Check that error map has the right number of entries in it */
218	ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
219
220	osh->magic = OS_HANDLE_MAGIC;
221	atomic_set(&osh->malloced, 0);
222	osh->failed = 0;
223	osh->dbgmem_list = NULL;
224	spin_lock_init(&(osh->dbgmem_lock));
225	osh->pdev = pdev;
226	osh->pub.pkttag = pkttag;
227	osh->bustype = bustype;
228
229	switch (bustype) {
230		case PCI_BUS:
231		case SI_BUS:
232		case PCMCIA_BUS:
233			osh->pub.mmbus = TRUE;
234			break;
235		case JTAG_BUS:
236		case SDIO_BUS:
237		case USB_BUS:
238		case SPI_BUS:
239		case RPC_BUS:
240			osh->pub.mmbus = FALSE;
241			break;
242		default:
243			ASSERT(FALSE);
244			break;
245	}
246
247#if defined(DHD_USE_STATIC_BUF)
248	if (!bcm_static_buf) {
249		if (!(bcm_static_buf = (bcm_static_buf_t *)dhd_os_prealloc(osh, 3, STATIC_BUF_SIZE+
250			STATIC_BUF_TOTAL_LEN))) {
251			printk("can not alloc static buf!\n");
252		}
253		else
254			printk("alloc static buf at %x!\n", (unsigned int)bcm_static_buf);
255
256
257		sema_init(&bcm_static_buf->static_sem, 1);
258
259		bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
260	}
261
262	if (!bcm_static_skb) {
263		int i;
264		void *skb_buff_ptr = 0;
265		bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
266		skb_buff_ptr = dhd_os_prealloc(osh, 4, 0);
267
268		bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *)*16);
269		for (i = 0; i < STATIC_PKT_MAX_NUM * 2; i++)
270			bcm_static_skb->pkt_use[i] = 0;
271
272		sema_init(&bcm_static_skb->osl_pkt_sem, 1);
273	}
274#endif /* DHD_USE_STATIC_BUF */
275
276#ifdef BCMDBG_CTRACE
277	spin_lock_init(&osh->ctrace_lock);
278	INIT_LIST_HEAD(&osh->ctrace_list);
279	osh->ctrace_num = 0;
280#endif /* BCMDBG_CTRACE */
281
282#ifdef BCMDBG_PKT
283	spin_lock_init(&(osh->pktlist_lock));
284#endif
285	spin_lock_init(&(osh->pktalloc_lock));
286
287	return osh;
288}
289
290void
291osl_detach(osl_t *osh)
292{
293	if (osh == NULL)
294		return;
295
296#ifdef DHD_USE_STATIC_BUF
297		if (bcm_static_buf) {
298			bcm_static_buf = 0;
299		}
300		if (bcm_static_skb) {
301			bcm_static_skb = 0;
302		}
303#endif
304
305	ASSERT(osh->magic == OS_HANDLE_MAGIC);
306	kfree(osh);
307}
308
309static struct sk_buff *osl_alloc_skb(unsigned int len)
310{
311#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
312	gfp_t flags = GFP_ATOMIC;
313	struct sk_buff *skb;
314
315#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_ZONE_DMA)
316	flags |= GFP_DMA;
317#endif
318	skb = __dev_alloc_skb(len, flags);
319#ifdef CTFMAP
320	if (skb) {
321		skb->data = skb->head + 16;
322		skb->tail = skb->head + 16;
323	}
324#endif /* CTFMAP */
325	return skb;
326#else
327	return dev_alloc_skb(len);
328#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
329}
330
331#ifdef CTFPOOL
332
333#ifdef CTFPOOL_SPINLOCK
334#define CTFPOOL_LOCK(ctfpool, flags)	spin_lock_irqsave(&(ctfpool)->lock, flags)
335#define CTFPOOL_UNLOCK(ctfpool, flags)	spin_unlock_irqrestore(&(ctfpool)->lock, flags)
336#else
337#define CTFPOOL_LOCK(ctfpool, flags)	spin_lock_bh(&(ctfpool)->lock)
338#define CTFPOOL_UNLOCK(ctfpool, flags)	spin_unlock_bh(&(ctfpool)->lock)
339#endif /* CTFPOOL_SPINLOCK */
340/*
341 * Allocate and add an object to packet pool.
342 */
343void *
344osl_ctfpool_add(osl_t *osh)
345{
346	struct sk_buff *skb;
347#ifdef CTFPOOL_SPINLOCK
348	unsigned long flags;
349#endif /* CTFPOOL_SPINLOCK */
350
351	if ((osh == NULL) || (osh->ctfpool == NULL))
352		return NULL;
353
354	CTFPOOL_LOCK(osh->ctfpool, flags);
355	ASSERT(osh->ctfpool->curr_obj <= osh->ctfpool->max_obj);
356
357	/* No need to allocate more objects */
358	if (osh->ctfpool->curr_obj == osh->ctfpool->max_obj) {
359		CTFPOOL_UNLOCK(osh->ctfpool, flags);
360		return NULL;
361	}
362
363	/* Allocate a new skb and add it to the ctfpool */
364	skb = osl_alloc_skb(osh->ctfpool->obj_size);
365	if (skb == NULL) {
366		printf("%s: skb alloc of len %d failed\n", __FUNCTION__,
367		       osh->ctfpool->obj_size);
368		CTFPOOL_UNLOCK(osh->ctfpool, flags);
369		return NULL;
370	}
371
372	/* Add to ctfpool */
373	skb->next = (struct sk_buff *)osh->ctfpool->head;
374	osh->ctfpool->head = skb;
375	osh->ctfpool->fast_frees++;
376	osh->ctfpool->curr_obj++;
377
378	/* Hijack a skb member to store ptr to ctfpool */
379	CTFPOOLPTR(osh, skb) = (void *)osh->ctfpool;
380
381	/* Use bit flag to indicate skb from fast ctfpool */
382	PKTFAST(osh, skb) = FASTBUF;
383
384	CTFPOOL_UNLOCK(osh->ctfpool, flags);
385
386	return skb;
387}
388
389/*
390 * Add new objects to the pool.
391 */
392void
393osl_ctfpool_replenish(osl_t *osh, uint thresh)
394{
395	if ((osh == NULL) || (osh->ctfpool == NULL))
396		return;
397
398	/* Do nothing if no refills are required */
399	while ((osh->ctfpool->refills > 0) && (thresh--)) {
400		osl_ctfpool_add(osh);
401		osh->ctfpool->refills--;
402	}
403}
404
405/*
406 * Initialize the packet pool with specified number of objects.
407 */
408int32
409osl_ctfpool_init(osl_t *osh, uint numobj, uint size)
410{
411	osh->ctfpool = kmalloc(sizeof(ctfpool_t), GFP_ATOMIC);
412	ASSERT(osh->ctfpool);
413	bzero(osh->ctfpool, sizeof(ctfpool_t));
414
415	osh->ctfpool->max_obj = numobj;
416	osh->ctfpool->obj_size = size;
417
418	spin_lock_init(&osh->ctfpool->lock);
419
420	while (numobj--) {
421		if (!osl_ctfpool_add(osh))
422			return -1;
423		osh->ctfpool->fast_frees--;
424	}
425
426	return 0;
427}
428
429/*
430 * Cleanup the packet pool objects.
431 */
432void
433osl_ctfpool_cleanup(osl_t *osh)
434{
435	struct sk_buff *skb, *nskb;
436#ifdef CTFPOOL_SPINLOCK
437	unsigned long flags;
438#endif /* CTFPOOL_SPINLOCK */
439
440	if ((osh == NULL) || (osh->ctfpool == NULL))
441		return;
442
443	CTFPOOL_LOCK(osh->ctfpool, flags);
444
445	skb = osh->ctfpool->head;
446
447	while (skb != NULL) {
448		nskb = skb->next;
449		dev_kfree_skb(skb);
450		skb = nskb;
451		osh->ctfpool->curr_obj--;
452	}
453
454	ASSERT(osh->ctfpool->curr_obj == 0);
455	osh->ctfpool->head = NULL;
456	CTFPOOL_UNLOCK(osh->ctfpool, flags);
457
458	kfree(osh->ctfpool);
459	osh->ctfpool = NULL;
460}
461
462void
463osl_ctfpool_stats(osl_t *osh, void *b)
464{
465	struct bcmstrbuf *bb;
466
467	if ((osh == NULL) || (osh->ctfpool == NULL))
468		return;
469
470#ifdef DHD_USE_STATIC_BUF
471	if (bcm_static_buf) {
472		bcm_static_buf = 0;
473	}
474	if (bcm_static_skb) {
475		bcm_static_skb = 0;
476	}
477#endif /* DHD_USE_STATIC_BUF */
478
479	bb = b;
480
481	ASSERT((osh != NULL) && (bb != NULL));
482
483	bcm_bprintf(bb, "max_obj %d obj_size %d curr_obj %d refills %d\n",
484	            osh->ctfpool->max_obj, osh->ctfpool->obj_size,
485	            osh->ctfpool->curr_obj, osh->ctfpool->refills);
486	bcm_bprintf(bb, "fast_allocs %d fast_frees %d slow_allocs %d\n",
487	            osh->ctfpool->fast_allocs, osh->ctfpool->fast_frees,
488	            osh->ctfpool->slow_allocs);
489}
490
491static inline struct sk_buff *
492osl_pktfastget(osl_t *osh, uint len)
493{
494	struct sk_buff *skb;
495#ifdef CTFPOOL_SPINLOCK
496	unsigned long flags;
497#endif /* CTFPOOL_SPINLOCK */
498
499	/* Try to do fast allocate. Return null if ctfpool is not in use
500	 * or if there are no items in the ctfpool.
501	 */
502	if (osh->ctfpool == NULL)
503		return NULL;
504
505	CTFPOOL_LOCK(osh->ctfpool, flags);
506	if (osh->ctfpool->head == NULL) {
507		ASSERT(osh->ctfpool->curr_obj == 0);
508		osh->ctfpool->slow_allocs++;
509		CTFPOOL_UNLOCK(osh->ctfpool, flags);
510		return NULL;
511	}
512
513	ASSERT(len <= osh->ctfpool->obj_size);
514
515	/* Get an object from ctfpool */
516	skb = (struct sk_buff *)osh->ctfpool->head;
517	osh->ctfpool->head = (void *)skb->next;
518
519	osh->ctfpool->fast_allocs++;
520	osh->ctfpool->curr_obj--;
521	ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head);
522	CTFPOOL_UNLOCK(osh->ctfpool, flags);
523
524	/* Init skb struct */
525	skb->next = skb->prev = NULL;
526	skb->data = skb->head + 16;
527	skb->tail = skb->head + 16;
528
529	skb->len = 0;
530	skb->cloned = 0;
531#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
532	skb->list = NULL;
533#endif
534	atomic_set(&skb->users, 1);
535
536	PKTSETCLINK(skb, NULL);
537	PKTCCLRATTR(skb);
538	PKTFAST(osh, skb) &= ~(CTFBUF | SKIPCT | CHAINED);
539
540	return skb;
541}
542#endif /* CTFPOOL */
543/* Convert a driver packet to native(OS) packet
544 * In the process, packettag is zeroed out before sending up
545 * IP code depends on skb->cb to be setup correctly with various options
546 * In our case, that means it should be 0
547 */
548struct sk_buff * BCMFASTPATH
549osl_pkt_tonative(osl_t *osh, void *pkt)
550{
551	struct sk_buff *nskb;
552#ifdef BCMDBG_CTRACE
553	struct sk_buff *nskb1, *nskb2;
554#endif
555#ifdef BCMDBG_PKT
556	unsigned long flags;
557#endif
558
559	if (osh->pub.pkttag)
560		OSL_PKTTAG_CLEAR(pkt);
561
562	/* Decrement the packet counter */
563	for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
564#ifdef BCMDBG_PKT
565		spin_lock_irqsave(&osh->pktlist_lock, flags);
566		pktlist_remove(&(osh->pktlist), (void *) nskb);
567		spin_unlock_irqrestore(&osh->pktlist_lock, flags);
568#endif  /* BCMDBG_PKT */
569		atomic_sub(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->pktalloced);
570
571#ifdef BCMDBG_CTRACE
572		for (nskb1 = nskb; nskb1 != NULL; nskb1 = nskb2) {
573			if (PKTISCHAINED(nskb1)) {
574				nskb2 = PKTCLINK(nskb1);
575			}
576			else
577				nskb2 = NULL;
578
579			DEL_CTRACE(osh, nskb1);
580		}
581#endif /* BCMDBG_CTRACE */
582	}
583	return (struct sk_buff *)pkt;
584}
585
586/* Convert a native(OS) packet to driver packet.
587 * In the process, native packet is destroyed, there is no copying
588 * Also, a packettag is zeroed out
589 */
590#ifdef BCMDBG_PKT
591void *
592osl_pkt_frmnative(osl_t *osh, void *pkt, int line, char *file)
593#else /* BCMDBG_PKT pkt logging for debugging */
594#ifdef BCMDBG_CTRACE
595void * BCMFASTPATH
596osl_pkt_frmnative(osl_t *osh, void *pkt, int line, char *file)
597#else
598void * BCMFASTPATH
599osl_pkt_frmnative(osl_t *osh, void *pkt)
600#endif /* BCMDBG_CTRACE */
601#endif /* BCMDBG_PKT */
602{
603	struct sk_buff *nskb;
604#ifdef BCMDBG_CTRACE
605	struct sk_buff *nskb1, *nskb2;
606#endif
607#ifdef BCMDBG_PKT
608	unsigned long flags;
609#endif
610
611	if (osh->pub.pkttag)
612		OSL_PKTTAG_CLEAR(pkt);
613
614	/* Increment the packet counter */
615	for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
616#ifdef BCMDBG_PKT
617		spin_lock_irqsave(&osh->pktlist_lock, flags);
618		pktlist_add(&(osh->pktlist), (void *) nskb, line, file);
619		spin_unlock_irqrestore(&osh->pktlist_lock, flags);
620#endif  /* BCMDBG_PKT */
621		atomic_add(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->pktalloced);
622
623#ifdef BCMDBG_CTRACE
624		for (nskb1 = nskb; nskb1 != NULL; nskb1 = nskb2) {
625			if (PKTISCHAINED(nskb1)) {
626				nskb2 = PKTCLINK(nskb1);
627			}
628			else
629				nskb2 = NULL;
630
631			ADD_CTRACE(osh, nskb1, file, line);
632		}
633#endif /* BCMDBG_CTRACE */
634	}
635	return (void *)pkt;
636}
637
638/* Return a new packet. zero out pkttag */
639#ifdef BCMDBG_PKT
640void * BCMFASTPATH
641osl_pktget(osl_t *osh, uint len, int line, char *file)
642#else /* BCMDBG_PKT */
643#ifdef BCMDBG_CTRACE
644void * BCMFASTPATH
645osl_pktget(osl_t *osh, uint len, int line, char *file)
646#else
647void * BCMFASTPATH
648osl_pktget(osl_t *osh, uint len)
649#endif /* BCMDBG_CTRACE */
650#endif /* BCMDBG_PKT */
651{
652	struct sk_buff *skb;
653#ifdef BCMDBG_PKT
654	unsigned long flags;
655#endif
656
657#ifdef CTFPOOL
658	/* Allocate from local pool */
659	skb = osl_pktfastget(osh, len);
660	if ((skb != NULL) || ((skb = osl_alloc_skb(len)) != NULL)) {
661#else /* CTFPOOL */
662	if ((skb = osl_alloc_skb(len))) {
663#endif /* CTFPOOL */
664		skb->tail += len;
665		skb->len  += len;
666		skb->priority = 0;
667
668#ifdef BCMDBG_CTRACE
669		ADD_CTRACE(osh, skb, file, line);
670#endif
671#ifdef BCMDBG_PKT
672		spin_lock_irqsave(&osh->pktlist_lock, flags);
673		pktlist_add(&(osh->pktlist), (void *) skb, line, file);
674		spin_unlock_irqrestore(&osh->pktlist_lock, flags);
675#endif
676		atomic_inc(&osh->pktalloced);
677	}
678
679	return ((void*) skb);
680}
681
682#ifdef CTFPOOL
683static inline void
684osl_pktfastfree(osl_t *osh, struct sk_buff *skb)
685{
686	ctfpool_t *ctfpool;
687#ifdef CTFPOOL_SPINLOCK
688	unsigned long flags;
689#endif /* CTFPOOL_SPINLOCK */
690
691#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
692	skb->tstamp.tv.sec = 0;
693#else
694	skb->stamp.tv_sec = 0;
695#endif
696
697	/* We only need to init the fields that we change */
698	skb->dev = NULL;
699#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
700	skb->dst = NULL;
701#endif
702	OSL_PKTTAG_CLEAR(skb);
703	skb->ip_summed = 0;
704
705#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
706	skb_orphan(skb);
707#else
708	skb->destructor = NULL;
709#endif
710
711	ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
712	ASSERT(ctfpool != NULL);
713
714	/* Add object to the ctfpool */
715	CTFPOOL_LOCK(ctfpool, flags);
716	skb->next = (struct sk_buff *)ctfpool->head;
717	ctfpool->head = (void *)skb;
718
719	ctfpool->fast_frees++;
720	ctfpool->curr_obj++;
721
722	ASSERT(ctfpool->curr_obj <= ctfpool->max_obj);
723	CTFPOOL_UNLOCK(ctfpool, flags);
724}
725#endif /* CTFPOOL */
726
727/* Free the driver packet. Free the tag if present */
728void BCMFASTPATH
729osl_pktfree(osl_t *osh, void *p, bool send)
730{
731	struct sk_buff *skb, *nskb;
732#ifdef BCMDBG_PKT
733	unsigned long flags;
734#endif
735
736	skb = (struct sk_buff*) p;
737
738	if (send && osh->pub.tx_fn)
739		osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
740
741	PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE);
742
743	/* perversion: we use skb->next to chain multi-skb packets */
744	while (skb) {
745		nskb = skb->next;
746		skb->next = NULL;
747
748#ifdef BCMDBG_CTRACE
749		DEL_CTRACE(osh, skb);
750#endif
751#ifdef BCMDBG_PKT
752		spin_lock_irqsave(&osh->pktlist_lock, flags);
753		pktlist_remove(&(osh->pktlist), (void *) skb);
754		spin_unlock_irqrestore(&osh->pktlist_lock, flags);
755#endif
756
757#ifdef CTFMAP
758		/* Clear the map ptr before freeing */
759		PKTCLRCTF(osh, skb);
760		CTFMAPPTR(osh, skb) = NULL;
761#endif
762
763#ifdef CTFPOOL
764        /* foxconn wklin modified, 07/07/2011 */
765        /* add CTFPOOLPTR() check because..
766         * skb->mac_len bit 4 may be set (PKTISFAST()==true) in the stack...*/
767		if (PKTISFAST(osh, skb) && CTFPOOLPTR(osh, skb)){
768			if (atomic_read(&skb->users) == 1)
769				smp_rmb();
770			else if (!atomic_dec_and_test(&skb->users))
771				goto next_skb;
772			osl_pktfastfree(osh, skb);
773		} else
774#endif
775		{
776			if (skb->destructor)
777				/* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
778				 * destructor exists
779				 */
780				dev_kfree_skb_any(skb);
781			else
782				/* can free immediately (even in_irq()) if destructor
783				 * does not exist
784				 */
785				dev_kfree_skb(skb);
786		}
787#ifdef CTFPOOL
788next_skb:
789#endif
790		atomic_dec(&osh->pktalloced);
791		skb = nskb;
792	}
793}
794
795#ifdef DHD_USE_STATIC_BUF
796void*
797osl_pktget_static(osl_t *osh, uint len)
798{
799	int i = 0;
800	struct sk_buff *skb;
801
802	if (len > (PAGE_SIZE*2)) {
803		printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len);
804		return osl_pktget(osh, len);
805	}
806
807	down(&bcm_static_skb->osl_pkt_sem);
808
809	if (len <= PAGE_SIZE) {
810		for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
811			if (bcm_static_skb->pkt_use[i] == 0)
812				break;
813		}
814
815		if (i != STATIC_PKT_MAX_NUM) {
816			bcm_static_skb->pkt_use[i] = 1;
817			up(&bcm_static_skb->osl_pkt_sem);
818			skb = bcm_static_skb->skb_4k[i];
819			skb->tail = skb->data + len;
820			skb->len = len;
821			return skb;
822		}
823	}
824
825
826	for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
827		if (bcm_static_skb->pkt_use[i+STATIC_PKT_MAX_NUM] == 0)
828			break;
829	}
830
831	if (i != STATIC_PKT_MAX_NUM) {
832		bcm_static_skb->pkt_use[i+STATIC_PKT_MAX_NUM] = 1;
833		up(&bcm_static_skb->osl_pkt_sem);
834		skb = bcm_static_skb->skb_8k[i];
835		skb->tail = skb->data + len;
836		skb->len = len;
837		return skb;
838	}
839
840	up(&bcm_static_skb->osl_pkt_sem);
841	printk("%s: all static pkt in use!\n", __FUNCTION__);
842	return osl_pktget(osh, len);
843}
844
845void
846osl_pktfree_static(osl_t *osh, void *p, bool send)
847{
848	int i;
849
850	for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
851		if (p == bcm_static_skb->skb_4k[i]) {
852			down(&bcm_static_skb->osl_pkt_sem);
853			bcm_static_skb->pkt_use[i] = 0;
854			up(&bcm_static_skb->osl_pkt_sem);
855			return;
856		}
857	}
858
859	for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
860		if (p == bcm_static_skb->skb_8k[i]) {
861			down(&bcm_static_skb->osl_pkt_sem);
862			bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM] = 0;
863			up(&bcm_static_skb->osl_pkt_sem);
864			return;
865		}
866	}
867
868	return osl_pktfree(osh, p, send);
869}
870#endif /* DHD_USE_STATIC_BUF */
871
872uint32
873osl_pci_read_config(osl_t *osh, uint offset, uint size)
874{
875	uint val = 0;
876	uint retry = PCI_CFG_RETRY;
877
878	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
879
880	/* only 4byte access supported */
881	ASSERT(size == 4);
882
883	do {
884		pci_read_config_dword(osh->pdev, offset, &val);
885		if (val != 0xffffffff)
886			break;
887	} while (retry--);
888
889
890	return (val);
891}
892
893void
894osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
895{
896	uint retry = PCI_CFG_RETRY;
897
898	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
899
900	/* only 4byte access supported */
901	ASSERT(size == 4);
902
903	do {
904		pci_write_config_dword(osh->pdev, offset, val);
905		if (offset != PCI_BAR0_WIN)
906			break;
907		if (osl_pci_read_config(osh, offset, size) == val)
908			break;
909	} while (retry--);
910
911}
912
913/* return bus # for the pci device pointed by osh->pdev */
914uint
915osl_pci_bus(osl_t *osh)
916{
917	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
918
919	return ((struct pci_dev *)osh->pdev)->bus->number;
920}
921
922/* return slot # for the pci device pointed by osh->pdev */
923uint
924osl_pci_slot(osl_t *osh)
925{
926	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
927
928#if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
929	return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn) + 1;
930#else
931	return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
932#endif
933}
934
935/* return the pci device pointed by osh->pdev */
936struct pci_dev *
937osl_pci_device(osl_t *osh)
938{
939	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
940
941	return osh->pdev;
942}
943
944static void
945osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
946{
947}
948
949void
950osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
951{
952	osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
953}
954
955void
956osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
957{
958	osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
959}
960
961#ifdef BCMDBG_MEM
962/* In BCMDBG_MEM configurations osl_malloc is only used internally in
963 * the implementation of osl_debug_malloc.  Because we are using the GCC
964 * -Wstrict-prototypes compile option, we must always have a prototype
965 * for a global/external function.  So make osl_malloc static in
966 * the BCMDBG_MEM case.
967 */
968static
969#endif
970void *
971osl_malloc(osl_t *osh, uint size)
972{
973	void *addr;
974
975	/* only ASSERT if osh is defined */
976	if (osh)
977		ASSERT(osh->magic == OS_HANDLE_MAGIC);
978
979#ifdef DHD_USE_STATIC_BUF
980	if (bcm_static_buf)
981	{
982		int i = 0;
983		if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE))
984		{
985			down(&bcm_static_buf->static_sem);
986
987			for (i = 0; i < STATIC_BUF_MAX_NUM; i++)
988			{
989				if (bcm_static_buf->buf_use[i] == 0)
990					break;
991			}
992
993			if (i == STATIC_BUF_MAX_NUM)
994			{
995				up(&bcm_static_buf->static_sem);
996				printk("all static buff in use!\n");
997				goto original;
998			}
999
1000			bcm_static_buf->buf_use[i] = 1;
1001			up(&bcm_static_buf->static_sem);
1002
1003			bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size);
1004			if (osh)
1005				atomic_add(size, &osh->malloced);
1006
1007			return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i));
1008		}
1009	}
1010original:
1011#endif /* DHD_USE_STATIC_BUF */
1012
1013	if ((addr = kmalloc(size, GFP_ATOMIC)) == NULL) {
1014		if (osh)
1015			osh->failed++;
1016		return (NULL);
1017	}
1018	if (osh)
1019		atomic_add(size, &osh->malloced);
1020
1021	return (addr);
1022}
1023
1024#ifdef BCMDBG_MEM
1025/* In BCMDBG_MEM configurations osl_mfree is only used internally in
1026 * the implementation of osl_debug_mfree.  Because we are using the GCC
1027 * -Wstrict-prototypes compile option, we must always have a prototype
1028 * for a global/external function.  So make osl_mfree static in
1029 * the BCMDBG_MEM case.
1030 */
1031static
1032#endif
1033void
1034osl_mfree(osl_t *osh, void *addr, uint size)
1035{
1036#ifdef DHD_USE_STATIC_BUF
1037	if (bcm_static_buf)
1038	{
1039		if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr
1040			<= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN)))
1041		{
1042			int buf_idx = 0;
1043
1044			buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE;
1045
1046			down(&bcm_static_buf->static_sem);
1047			bcm_static_buf->buf_use[buf_idx] = 0;
1048			up(&bcm_static_buf->static_sem);
1049
1050			if (osh) {
1051				ASSERT(osh->magic == OS_HANDLE_MAGIC);
1052				atomic_sub(size, &osh->malloced);
1053			}
1054			return;
1055		}
1056	}
1057#endif /* DHD_USE_STATIC_BUF */
1058	if (osh) {
1059		ASSERT(osh->magic == OS_HANDLE_MAGIC);
1060		atomic_sub(size, &osh->malloced);
1061	}
1062	kfree(addr);
1063}
1064
1065uint
1066osl_malloced(osl_t *osh)
1067{
1068	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1069	return (atomic_read(&osh->malloced));
1070}
1071
1072uint
1073osl_malloc_failed(osl_t *osh)
1074{
1075	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1076	return (osh->failed);
1077}
1078
1079#ifdef BCMDBG_MEM
1080#define MEMLIST_LOCK(osh, flags)	spin_lock_irqsave(&(osh)->dbgmem_lock, flags)
1081#define MEMLIST_UNLOCK(osh, flags)	spin_unlock_irqrestore(&(osh)->dbgmem_lock, flags)
1082
1083void *
1084osl_debug_malloc(osl_t *osh, uint size, int line, const char* file)
1085{
1086	bcm_mem_link_t *p;
1087	const char* basename;
1088	unsigned long flags = 0;
1089
1090	if (!size) {
1091		printk("%s: allocating zero sized mem at %s line %d\n", __FUNCTION__, file, line);
1092		ASSERT(0);
1093	}
1094
1095	if (osh) {
1096		MEMLIST_LOCK(osh, flags);
1097	}
1098	if ((p = (bcm_mem_link_t*)osl_malloc(osh, sizeof(bcm_mem_link_t) + size)) == NULL) {
1099		if (osh) {
1100			MEMLIST_UNLOCK(osh, flags);
1101		}
1102		return (NULL);
1103	}
1104
1105	p->size = size;
1106	p->line = line;
1107	p->osh = (void *)osh;
1108
1109	basename = strrchr(file, '/');
1110	/* skip the '/' */
1111	if (basename)
1112		basename++;
1113
1114	if (!basename)
1115		basename = file;
1116
1117	strncpy(p->file, basename, BCM_MEM_FILENAME_LEN);
1118	p->file[BCM_MEM_FILENAME_LEN - 1] = '\0';
1119
1120	/* link this block */
1121	if (osh) {
1122		p->prev = NULL;
1123		p->next = osh->dbgmem_list;
1124		if (p->next)
1125			p->next->prev = p;
1126		osh->dbgmem_list = p;
1127		MEMLIST_UNLOCK(osh, flags);
1128	}
1129
1130	return p + 1;
1131}
1132
1133void
1134osl_debug_mfree(osl_t *osh, void *addr, uint size, int line, const char* file)
1135{
1136	bcm_mem_link_t *p = (bcm_mem_link_t *)((int8*)addr - sizeof(bcm_mem_link_t));
1137	unsigned long flags = 0;
1138
1139	ASSERT(osh == NULL || osh->magic == OS_HANDLE_MAGIC);
1140
1141	/* Make function compliant with standard free() */
1142	if (addr == NULL)
1143		return;
1144
1145	if (p->size == 0) {
1146		printk("osl_debug_mfree: double free on addr %p size %d at line %d file %s\n",
1147			addr, size, line, file);
1148		ASSERT(p->size);
1149		return;
1150	}
1151
1152	if (p->size != size) {
1153		printk("%s: dealloca size does not match alloc size\n", __FUNCTION__);
1154		printk("Dealloc addr %p size %d at line %d file %s\n", addr, size, line, file);
1155		printk("Alloc size %d line %d file %s\n", p->size, p->line, p->file);
1156		ASSERT(p->size == size);
1157		return;
1158	}
1159
1160	if (p->osh != (void *)osh) {
1161		printk("osl_debug_mfree: alloc osh %p does not match dealloc osh %p\n",
1162			p->osh, osh);
1163		printk("Dealloc addr %p size %d at line %d file %s\n", addr, size, line, file);
1164		printk("Alloc size %d line %d file %s\n", p->size, p->line, p->file);
1165		ASSERT(p->osh == (void *)osh);
1166		return;
1167	}
1168
1169	/* unlink this block */
1170	if (osh) {
1171		MEMLIST_LOCK(osh, flags);
1172		if (p->prev)
1173			p->prev->next = p->next;
1174		if (p->next)
1175			p->next->prev = p->prev;
1176		if (osh->dbgmem_list == p)
1177			osh->dbgmem_list = p->next;
1178		p->next = p->prev = NULL;
1179	}
1180	p->size = 0;
1181
1182	osl_mfree(osh, p, size + sizeof(bcm_mem_link_t));
1183	if (osh) {
1184		MEMLIST_UNLOCK(osh, flags);
1185	}
1186}
1187
1188int
1189osl_debug_memdump(osl_t *osh, struct bcmstrbuf *b)
1190{
1191	bcm_mem_link_t *p;
1192	unsigned long flags = 0;
1193
1194	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1195
1196	MEMLIST_LOCK(osh, flags);
1197	if (osh->dbgmem_list) {
1198		if (b != NULL)
1199			bcm_bprintf(b, "   Address   Size File:line\n");
1200		else
1201			printf("   Address   Size File:line\n");
1202
1203		for (p = osh->dbgmem_list; p; p = p->next) {
1204			if (b != NULL)
1205				bcm_bprintf(b, "%p %6d %s:%d\n", (char*)p + sizeof(bcm_mem_link_t),
1206					p->size, p->file, p->line);
1207			else
1208				printf("%p %6d %s:%d\n", (char*)p + sizeof(bcm_mem_link_t),
1209					p->size, p->file, p->line);
1210
1211			/* Detects loop-to-self so we don't enter infinite loop */
1212			if (p == p->next) {
1213				if (b != NULL)
1214					bcm_bprintf(b, "WARNING: loop-to-self "
1215						"p %p p->next %p\n", p, p->next);
1216				else
1217					printf("WARNING: loop-to-self "
1218						"p %p p->next %p\n", p, p->next);
1219
1220				break;
1221			}
1222		}
1223	}
1224	MEMLIST_UNLOCK(osh, flags);
1225
1226	return 0;
1227}
1228
1229#endif	/* BCMDBG_MEM */
1230
1231uint
1232osl_dma_consistent_align(void)
1233{
1234	return (PAGE_SIZE);
1235}
1236
1237void*
1238osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, ulong *pap)
1239{
1240	void *va;
1241	uint16 align = (1 << align_bits);
1242	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1243
1244	if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
1245		size += align;
1246	*alloced = size;
1247
1248#ifdef __ARM_ARCH_7A__
1249	va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO);
1250	if (va)
1251		*pap = (ulong)__virt_to_phys(va);
1252#else
1253	va = pci_alloc_consistent(osh->pdev, size, (dma_addr_t*)pap);
1254#endif
1255	return va;
1256}
1257
1258void
1259osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa)
1260{
1261	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1262
1263#ifdef __ARM_ARCH_7A__
1264	kfree(va);
1265#else
1266	pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
1267#endif
1268}
1269
1270uint BCMFASTPATH
1271osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
1272{
1273	int dir;
1274
1275	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1276	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
1277
1278#if defined(__ARM_ARCH_7A__) && defined(BCMDMASGLISTOSL)
1279	if (dmah != NULL) {
1280		int32 nsegs, i, totsegs = 0, totlen = 0;
1281		struct scatterlist *sg, _sg[MAX_DMA_SEGS * 2];
1282		struct sk_buff *skb;
1283		for (skb = (struct sk_buff *)p; skb != NULL; skb = PKTNEXT(osh, skb)) {
1284			sg = &_sg[totsegs];
1285			if (skb_is_nonlinear(skb)) {
1286				nsegs = skb_to_sgvec(skb, sg, 0, PKTLEN(osh, skb));
1287				ASSERT((nsegs > 0) && (totsegs + nsegs <= MAX_DMA_SEGS));
1288				pci_map_sg(osh->pdev, sg, nsegs, dir);
1289			} else {
1290				nsegs = 1;
1291				ASSERT(totsegs + nsegs <= MAX_DMA_SEGS);
1292				sg->page_link = 0;
1293				sg_set_buf(sg, PKTDATA(osh, skb), PKTLEN(osh, skb));
1294#ifdef	CTFMAP
1295				/* Map size bytes (not skb->len) for ctf bufs */
1296				pci_map_single(osh->pdev, PKTDATA(osh, skb),
1297				    PKTISCTF(osh, skb) ? CTFMAPSZ : PKTLEN(osh, skb), dir);
1298#else
1299				pci_map_single(osh->pdev, PKTDATA(osh, skb), PKTLEN(osh, skb), dir);
1300#endif
1301			}
1302			totsegs += nsegs;
1303			totlen += PKTLEN(osh, skb);
1304		}
1305		dmah->nsegs = totsegs;
1306		dmah->origsize = totlen;
1307		for (i = 0, sg = _sg; i < totsegs; i++, sg++) {
1308			dmah->segs[i].addr = sg_phys(sg);
1309			dmah->segs[i].length = sg->length;
1310		}
1311		return dmah->segs[0].addr;
1312	}
1313#endif /* __ARM_ARCH_7A__ && BCMDMASGLISTOSL */
1314
1315	return (pci_map_single(osh->pdev, va, size, dir));
1316}
1317
1318void BCMFASTPATH
1319osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction)
1320{
1321	int dir;
1322
1323	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1324	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
1325	pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
1326}
1327
1328#if defined(BCMDBG_ASSERT)
1329void
1330osl_assert(const char *exp, const char *file, int line)
1331{
1332	char tempbuf[256];
1333	const char *basename;
1334
1335	basename = strrchr(file, '/');
1336	/* skip the '/' */
1337	if (basename)
1338		basename++;
1339
1340	if (!basename)
1341		basename = file;
1342
1343
1344#ifdef BCMDBG_ASSERT
1345	snprintf(tempbuf, 256, "assertion \"%s\" failed: file \"%s\", line %d\n",
1346		exp, basename, line);
1347
1348	/* Print assert message and give it time to be written to /var/log/messages */
1349	if (!in_interrupt() && g_assert_type != 1) {
1350		const int delay = 3;
1351		printk("%s", tempbuf);
1352		printk("panic in %d seconds\n", delay);
1353		set_current_state(TASK_INTERRUPTIBLE);
1354		schedule_timeout(delay * HZ);
1355	}
1356
1357	switch (g_assert_type) {
1358	case 0:
1359		panic("%s", tempbuf);
1360		break;
1361	case 1:
1362		printk("%s", tempbuf);
1363		break;
1364	case 2:
1365		printk("%s", tempbuf);
1366		BUG();
1367		break;
1368	default:
1369		break;
1370	}
1371#endif /* BCMDBG_ASSERT */
1372
1373}
1374#endif
1375
1376void
1377osl_delay(uint usec)
1378{
1379	uint d;
1380
1381	while (usec > 0) {
1382		d = MIN(usec, 1000);
1383		udelay(d);
1384		usec -= d;
1385	}
1386}
1387
1388#if defined(DSLCPE_DELAY)
1389
1390void
1391osl_oshsh_init(osl_t *osh, shared_osl_t* oshsh)
1392{
1393	extern unsigned long loops_per_jiffy;
1394	osh->oshsh = oshsh;
1395	osh->oshsh->MIPS = loops_per_jiffy / (500000/HZ);
1396}
1397
1398int
1399in_long_delay(osl_t *osh)
1400{
1401	return osh->oshsh->long_delay;
1402}
1403
1404void
1405osl_long_delay(osl_t *osh, uint usec, bool yield)
1406{
1407	uint d;
1408	bool yielded = TRUE;
1409	int usec_to_delay = usec;
1410	unsigned long tick1, tick2, tick_diff = 0;
1411
1412	/* delay at least requested usec */
1413	while (usec_to_delay > 0) {
1414		if (!yield || !yielded) {
1415			d = MIN(usec_to_delay, 10);
1416			udelay(d);
1417			usec_to_delay -= d;
1418		}
1419		if (usec_to_delay > 0) {
1420			osh->oshsh->long_delay++;
1421			OSL_GETCYCLES(tick1);
1422			spin_unlock_bh(osh->oshsh->lock);
1423			if (usec_to_delay > 0 && !in_irq() && !in_softirq() && !in_interrupt()) {
1424				schedule();
1425				yielded = TRUE;
1426			} else {
1427				yielded = FALSE;
1428			}
1429			spin_lock_bh(osh->oshsh->lock);
1430			OSL_GETCYCLES(tick2);
1431
1432			if (yielded) {
1433				tick_diff = TICKDIFF(tick2, tick1);
1434				tick_diff = (tick_diff * 2)/(osh->oshsh->MIPS);
1435				if (tick_diff) {
1436					usec_to_delay -= tick_diff;
1437				} else
1438					yielded = 0;
1439			}
1440			osh->oshsh->long_delay--;
1441			ASSERT(osh->oshsh->long_delay >= 0);
1442		}
1443	}
1444}
1445#endif /* DSLCPE_DELAY */
1446
1447/* Clone a packet.
1448 * The pkttag contents are NOT cloned.
1449 */
1450#ifdef BCMDBG_PKT
1451void *
1452osl_pktdup(osl_t *osh, void *skb, int line, char *file)
1453#else /* BCMDBG_PKT */
1454#ifdef BCMDBG_CTRACE
1455void *
1456osl_pktdup(osl_t *osh, void *skb, int line, char *file)
1457#else
1458void *
1459osl_pktdup(osl_t *osh, void *skb)
1460#endif /* BCMDBG_CTRACE */
1461#endif /* BCMDBG_PKT */
1462{
1463	void * p;
1464#ifdef BCMDBG_PKT
1465	unsigned long irqflags;
1466#endif
1467
1468	ASSERT(!PKTISCHAINED(skb));
1469
1470	/* clear the CTFBUF flag if set and map the rest of the buffer
1471	 * before cloning.
1472	 */
1473	PKTCTFMAP(osh, skb);
1474
1475	if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
1476		return NULL;
1477
1478#ifdef CTFPOOL
1479	if (PKTISFAST(osh, skb)) {
1480		ctfpool_t *ctfpool;
1481
1482		/* if the buffer allocated from ctfpool is cloned then
1483		 * we can't be sure when it will be freed. since there
1484		 * is a chance that we will be losing a buffer
1485		 * from our pool, we increment the refill count for the
1486		 * object to be alloced later.
1487		 */
1488		ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
1489		ASSERT(ctfpool != NULL);
1490		PKTCLRFAST(osh, p);
1491		PKTCLRFAST(osh, skb);
1492		ctfpool->refills++;
1493	}
1494#endif /* CTFPOOL */
1495
1496	/* Clear PKTC  context */
1497	PKTSETCLINK(p, NULL);
1498	PKTCCLRFLAGS(p);
1499	PKTCSETCNT(p, 1);
1500	PKTCSETLEN(p, PKTLEN(osh, skb));
1501
1502	/* skb_clone copies skb->cb.. we don't want that */
1503	if (osh->pub.pkttag)
1504		OSL_PKTTAG_CLEAR(p);
1505
1506	/* Increment the packet counter */
1507	atomic_inc(&osh->pktalloced);
1508#ifdef BCMDBG_CTRACE
1509	ADD_CTRACE(osh, (struct sk_buff *)p, file, line);
1510#endif
1511#ifdef BCMDBG_PKT
1512	spin_lock_irqsave(&osh->pktlist_lock, irqflags);
1513	pktlist_add(&(osh->pktlist), (void *) p, line, file);
1514	spin_unlock_irqrestore(&osh->pktlist_lock, irqflags);
1515#endif
1516	return (p);
1517}
1518
1519#ifdef BCMDBG_CTRACE
1520int osl_pkt_is_frmnative(osl_t *osh, struct sk_buff *pkt)
1521{
1522	unsigned long flags;
1523	struct sk_buff *skb;
1524	int ck = FALSE;
1525
1526	spin_lock_irqsave(&osh->ctrace_lock, flags);
1527
1528	list_for_each_entry(skb, &osh->ctrace_list, ctrace_list) {
1529		if (pkt == skb) {
1530			ck = TRUE;
1531			break;
1532		}
1533	}
1534
1535	spin_unlock_irqrestore(&osh->ctrace_lock, flags);
1536	return ck;
1537}
1538
1539void osl_ctrace_dump(osl_t *osh, struct bcmstrbuf *b)
1540{
1541	unsigned long flags;
1542	struct sk_buff *skb;
1543	int idx = 0;
1544	int i, j;
1545
1546	spin_lock_irqsave(&osh->ctrace_lock, flags);
1547
1548	if (b != NULL)
1549		bcm_bprintf(b, " Total %d sbk not free\n", osh->ctrace_num);
1550	else
1551		printk(" Total %d sbk not free\n", osh->ctrace_num);
1552
1553	list_for_each_entry(skb, &osh->ctrace_list, ctrace_list) {
1554		if (b != NULL)
1555			bcm_bprintf(b, "[%d] skb %p:\n", ++idx, skb);
1556		else
1557			printk("[%d] skb %p:\n", ++idx, skb);
1558
1559		for (i = 0; i < skb->ctrace_count; i++) {
1560			j = (skb->ctrace_start + i) % CTRACE_NUM;
1561			if (b != NULL)
1562				bcm_bprintf(b, "    [%s(%d)]\n", skb->func[j], skb->line[j]);
1563			else
1564				printk("    [%s(%d)]\n", skb->func[j], skb->line[j]);
1565		}
1566		if (b != NULL)
1567			bcm_bprintf(b, "\n");
1568		else
1569			printk("\n");
1570	}
1571
1572	spin_unlock_irqrestore(&osh->ctrace_lock, flags);
1573
1574	return;
1575}
1576#endif /* BCMDBG_CTRACE */
1577
1578#ifdef BCMDBG_PKT
1579#ifdef BCMDBG_PTRACE
1580void
1581osl_pkttrace(osl_t *osh, void *pkt, uint16 bit)
1582{
1583	pktlist_trace(&(osh->pktlist), pkt, bit);
1584}
1585#endif /* BCMDBG_PTRACE */
1586
1587char *
1588osl_pktlist_dump(osl_t *osh, char *buf)
1589{
1590	pktlist_dump(&(osh->pktlist), buf);
1591	return buf;
1592}
1593
1594void
1595osl_pktlist_add(osl_t *osh, void *p, int line, char *file)
1596{
1597	unsigned long flags;
1598	spin_lock_irqsave(&osh->pktlist_lock, flags);
1599	pktlist_add(&(osh->pktlist), p, line, file);
1600	spin_unlock_irqrestore(&osh->pktlist_lock, flags);
1601}
1602
1603void
1604osl_pktlist_remove(osl_t *osh, void *p)
1605{
1606	unsigned long flags;
1607	spin_lock_irqsave(&osh->pktlist_lock, flags);
1608	pktlist_remove(&(osh->pktlist), p);
1609	spin_unlock_irqrestore(&osh->pktlist_lock, flags);
1610}
1611#endif /* BCMDBG_PKT */
1612
1613/*
1614 * OSLREGOPS specifies the use of osl_XXX routines to be used for register access
1615 */
1616#ifdef OSLREGOPS
1617uint8
1618osl_readb(osl_t *osh, volatile uint8 *r)
1619{
1620	osl_rreg_fn_t rreg	= ((osl_pubinfo_t*)osh)->rreg_fn;
1621	void *ctx		= ((osl_pubinfo_t*)osh)->reg_ctx;
1622
1623	return (uint8)((rreg)(ctx, (void*)r, sizeof(uint8)));
1624}
1625
1626
1627uint16
1628osl_readw(osl_t *osh, volatile uint16 *r)
1629{
1630	osl_rreg_fn_t rreg	= ((osl_pubinfo_t*)osh)->rreg_fn;
1631	void *ctx		= ((osl_pubinfo_t*)osh)->reg_ctx;
1632
1633	return (uint16)((rreg)(ctx, (void*)r, sizeof(uint16)));
1634}
1635
1636uint32
1637osl_readl(osl_t *osh, volatile uint32 *r)
1638{
1639	osl_rreg_fn_t rreg	= ((osl_pubinfo_t*)osh)->rreg_fn;
1640	void *ctx		= ((osl_pubinfo_t*)osh)->reg_ctx;
1641
1642	return (uint32)((rreg)(ctx, (void*)r, sizeof(uint32)));
1643}
1644
1645void
1646osl_writeb(osl_t *osh, volatile uint8 *r, uint8 v)
1647{
1648	osl_wreg_fn_t wreg	= ((osl_pubinfo_t*)osh)->wreg_fn;
1649	void *ctx		= ((osl_pubinfo_t*)osh)->reg_ctx;
1650
1651	((wreg)(ctx, (void*)r, v, sizeof(uint8)));
1652}
1653
1654
1655void
1656osl_writew(osl_t *osh, volatile uint16 *r, uint16 v)
1657{
1658	osl_wreg_fn_t wreg	= ((osl_pubinfo_t*)osh)->wreg_fn;
1659	void *ctx		= ((osl_pubinfo_t*)osh)->reg_ctx;
1660
1661	((wreg)(ctx, (void*)r, v, sizeof(uint16)));
1662}
1663
1664void
1665osl_writel(osl_t *osh, volatile uint32 *r, uint32 v)
1666{
1667	osl_wreg_fn_t wreg	= ((osl_pubinfo_t*)osh)->wreg_fn;
1668	void *ctx		= ((osl_pubinfo_t*)osh)->reg_ctx;
1669
1670	((wreg)(ctx, (void*)r, v, sizeof(uint32)));
1671}
1672#endif /* OSLREGOPS */
1673
1674/*
1675 * BINOSL selects the slightly slower function-call-based binary compatible osl.
1676 */
1677#ifdef BINOSL
1678
1679uint32
1680osl_sysuptime(void)
1681{
1682	return ((uint32)jiffies * (1000 / HZ));
1683}
1684
1685int
1686osl_printf(const char *format, ...)
1687{
1688	va_list args;
1689	static char printbuf[1024];
1690	int len;
1691
1692	/* sprintf into a local buffer because there *is* no "vprintk()".. */
1693	va_start(args, format);
1694	len = vsnprintf(printbuf, 1024, format, args);
1695	va_end(args);
1696
1697	if (len > sizeof(printbuf)) {
1698		printk("osl_printf: buffer overrun\n");
1699		return (0);
1700	}
1701
1702	return (printk("%s", printbuf));
1703}
1704
1705int
1706osl_sprintf(char *buf, const char *format, ...)
1707{
1708	va_list args;
1709	int rc;
1710
1711	va_start(args, format);
1712	rc = vsprintf(buf, format, args);
1713	va_end(args);
1714	return (rc);
1715}
1716
1717int
1718osl_snprintf(char *buf, size_t n, const char *format, ...)
1719{
1720	va_list args;
1721	int rc;
1722
1723	va_start(args, format);
1724	rc = vsnprintf(buf, n, format, args);
1725	va_end(args);
1726	return (rc);
1727}
1728
1729int
1730osl_vsprintf(char *buf, const char *format, va_list ap)
1731{
1732	return (vsprintf(buf, format, ap));
1733}
1734
1735int
1736osl_vsnprintf(char *buf, size_t n, const char *format, va_list ap)
1737{
1738	return (vsnprintf(buf, n, format, ap));
1739}
1740
1741int
1742osl_strcmp(const char *s1, const char *s2)
1743{
1744	return (strcmp(s1, s2));
1745}
1746
1747int
1748osl_strncmp(const char *s1, const char *s2, uint n)
1749{
1750	return (strncmp(s1, s2, n));
1751}
1752
1753int
1754osl_strlen(const char *s)
1755{
1756	return (strlen(s));
1757}
1758
1759char*
1760osl_strcpy(char *d, const char *s)
1761{
1762	return (strcpy(d, s));
1763}
1764
1765char*
1766osl_strncpy(char *d, const char *s, uint n)
1767{
1768	return (strncpy(d, s, n));
1769}
1770
1771char*
1772osl_strchr(const char *s, int c)
1773{
1774	return (strchr(s, c));
1775}
1776
1777char*
1778osl_strrchr(const char *s, int c)
1779{
1780	return (strrchr(s, c));
1781}
1782
1783void*
1784osl_memset(void *d, int c, size_t n)
1785{
1786	return memset(d, c, n);
1787}
1788
1789void*
1790osl_memcpy(void *d, const void *s, size_t n)
1791{
1792	return memcpy(d, s, n);
1793}
1794
1795void*
1796osl_memmove(void *d, const void *s, size_t n)
1797{
1798	return memmove(d, s, n);
1799}
1800
1801int
1802osl_memcmp(const void *s1, const void *s2, size_t n)
1803{
1804	return memcmp(s1, s2, n);
1805}
1806
1807uint32
1808osl_readl(volatile uint32 *r)
1809{
1810	return (readl(r));
1811}
1812
1813uint16
1814osl_readw(volatile uint16 *r)
1815{
1816	return (readw(r));
1817}
1818
1819uint8
1820osl_readb(volatile uint8 *r)
1821{
1822	return (readb(r));
1823}
1824
1825void
1826osl_writel(uint32 v, volatile uint32 *r)
1827{
1828	writel(v, r);
1829}
1830
1831void
1832osl_writew(uint16 v, volatile uint16 *r)
1833{
1834	writew(v, r);
1835}
1836
1837void
1838osl_writeb(uint8 v, volatile uint8 *r)
1839{
1840	writeb(v, r);
1841}
1842
1843void *
1844osl_uncached(void *va)
1845{
1846#ifdef mips
1847	return ((void*)KSEG1ADDR(va));
1848#else
1849	return ((void*)va);
1850#endif /* mips */
1851}
1852
1853void *
1854osl_cached(void *va)
1855{
1856#ifdef mips
1857	return ((void*)KSEG0ADDR(va));
1858#else
1859	return ((void*)va);
1860#endif /* mips */
1861}
1862
1863uint
1864osl_getcycles(void)
1865{
1866	uint cycles;
1867
1868#if defined(mips)
1869	cycles = read_c0_count() * 2;
1870#elif defined(__i386__)
1871	rdtscl(cycles);
1872#else
1873	cycles = 0;
1874#endif /* defined(mips) */
1875	return cycles;
1876}
1877
1878void *
1879osl_reg_map(uint32 pa, uint size)
1880{
1881	return (ioremap_nocache((unsigned long)pa, (unsigned long)size));
1882}
1883
1884void
1885osl_reg_unmap(void *va)
1886{
1887	iounmap(va);
1888}
1889
1890int
1891osl_busprobe(uint32 *val, uint32 addr)
1892{
1893#ifdef mips
1894	return get_dbe(*val, (uint32 *)addr);
1895#else
1896	*val = readl((uint32 *)(uintptr)addr);
1897	return 0;
1898#endif /* mips */
1899}
1900
1901bool
1902osl_pktshared(void *skb)
1903{
1904	return (((struct sk_buff*)skb)->cloned);
1905}
1906
1907uchar*
1908osl_pktdata(osl_t *osh, void *skb)
1909{
1910	return (((struct sk_buff*)skb)->data);
1911}
1912
1913uint
1914osl_pktlen(osl_t *osh, void *skb)
1915{
1916	return (((struct sk_buff*)skb)->len);
1917}
1918
1919uint
1920osl_pktheadroom(osl_t *osh, void *skb)
1921{
1922	return (uint) skb_headroom((struct sk_buff *) skb);
1923}
1924
1925uint
1926osl_pkttailroom(osl_t *osh, void *skb)
1927{
1928	return (uint) skb_tailroom((struct sk_buff *) skb);
1929}
1930
1931void*
1932osl_pktnext(osl_t *osh, void *skb)
1933{
1934	return (((struct sk_buff*)skb)->next);
1935}
1936
1937void
1938osl_pktsetnext(void *skb, void *x)
1939{
1940	((struct sk_buff*)skb)->next = (struct sk_buff*)x;
1941}
1942
1943void
1944osl_pktsetlen(osl_t *osh, void *skb, uint len)
1945{
1946	__skb_trim((struct sk_buff*)skb, len);
1947}
1948
1949uchar*
1950osl_pktpush(osl_t *osh, void *skb, int bytes)
1951{
1952	return (skb_push((struct sk_buff*)skb, bytes));
1953}
1954
1955uchar*
1956osl_pktpull(osl_t *osh, void *skb, int bytes)
1957{
1958	return (skb_pull((struct sk_buff*)skb, bytes));
1959}
1960
1961void*
1962osl_pkttag(void *skb)
1963{
1964	return ((void*)(((struct sk_buff*)skb)->cb));
1965}
1966
1967void*
1968osl_pktlink(void *skb)
1969{
1970	return (((struct sk_buff*)skb)->prev);
1971}
1972
1973void
1974osl_pktsetlink(void *skb, void *x)
1975{
1976	((struct sk_buff*)skb)->prev = (struct sk_buff*)x;
1977}
1978
1979uint
1980osl_pktprio(void *skb)
1981{
1982	return (((struct sk_buff*)skb)->priority);
1983}
1984
1985void
1986osl_pktsetprio(void *skb, uint x)
1987{
1988	((struct sk_buff*)skb)->priority = x;
1989}
1990#endif	/* BINOSL */
1991
1992uint
1993osl_pktalloced(osl_t *osh)
1994{
1995	return (atomic_read(&osh->pktalloced));
1996}
1997
1998/* Linux Kernel: File Operations: start */
1999void *
2000osl_os_open_image(char *filename)
2001{
2002	struct file *fp;
2003
2004	fp = filp_open(filename, O_RDONLY, 0);
2005	/*
2006	 * 2.6.11 (FC4) supports filp_open() but later revs don't?
2007	 * Alternative:
2008	 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
2009	 * ???
2010	 */
2011	 if (IS_ERR(fp))
2012		 fp = NULL;
2013
2014	 return fp;
2015}
2016
2017int
2018osl_os_get_image_block(char *buf, int len, void *image)
2019{
2020	struct file *fp = (struct file *)image;
2021	int rdlen;
2022
2023	if (!image)
2024		return 0;
2025
2026	rdlen = kernel_read(fp, fp->f_pos, buf, len);
2027	if (rdlen > 0)
2028		fp->f_pos += rdlen;
2029
2030	return rdlen;
2031}
2032
2033void
2034osl_os_close_image(void *image)
2035{
2036	if (image)
2037		filp_close((struct file *)image, NULL);
2038}
2039
2040int
2041osl_os_image_size(void *image)
2042{
2043	int len = 0, curroffset;
2044
2045	if (image) {
2046		/* store the current offset */
2047		curroffset = generic_file_llseek(image, 0, 1);
2048		/* goto end of file to get length */
2049		len = generic_file_llseek(image, 0, 2);
2050		/* restore back the offset */
2051		generic_file_llseek(image, curroffset, 0);
2052	}
2053	return len;
2054}
2055
2056//Foxconn add start, Lewis Min, for UBD, 04/18/2008
2057int (*ip_pre_insert_hook)(struct sk_buff *skb);//Foxconn add , Lewis Min, for UBD, 04/18/2008
2058
2059void insert_func_to_IP_PRE_ROUTE(void *FUNC)
2060{
2061    local_bh_disable(); /* foxconn wklin added, 11/24/2008 */
2062    ip_pre_insert_hook= FUNC;
2063    local_bh_enable(); /* foxconn wklin added, 11/24/2008 */
2064}
2065
2066void remove_func_to_IP_PRE_ROUTE(void *FUNC)
2067{
2068    local_bh_disable(); /* foxconn wklin added, 11/24/2008 */
2069    ip_pre_insert_hook= NULL;
2070    local_bh_enable(); /* foxconn wklin added, 11/24/2008 */
2071}
2072
2073/* Linux Kernel: File Operations: end */
2074