1/*
2 * RPC layer. It links to bus layer with transport layer(bus dependent)
3 * Broadcom 802.11abg Networking Device Driver
4 *
5 * Copyright (C) 2015, Broadcom Corporation
6 * All Rights Reserved.
7 *
8 * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
9 * the contents of this file may not be disclosed to third parties, copied
10 * or duplicated in any form, in whole or in part, without the prior
11 * written permission of Broadcom Corporation.
12 *
13 * $Id: bcm_rpc.c 419467 2013-08-21 09:19:48Z $
14 */
15
16#include <epivers.h>
17#include <typedefs.h>
18#include <bcmdefs.h>
19#include <bcmendian.h>
20#include <osl.h>
21#include <bcmutils.h>
22
23#include <bcm_rpc_tp.h>
24#include <bcm_rpc.h>
25#include <rpc_osl.h>
26#include <bcmdevs.h>
27
28#if (!defined(WLC_HIGH) && !defined(WLC_LOW))
29#error "SPLIT"
30#endif
31#if defined(WLC_HIGH) && defined(WLC_LOW)
32#error "SPLIT"
33#endif
34
35/* RPC may use OS APIs directly to avoid overloading osl.h
36 *  HIGH_ONLY supports NDIS and LINUX so far. can be ported to other OS if needed
37 */
38#ifdef WLC_HIGH
39#if !defined(NDIS) && !defined(linux)
40#error "RPC only supports NDIS and LINUX in HIGH driver"
41#endif
42#endif /* WLC_HIGH */
43#ifdef WLC_LOW
44#error "RPC only supports HNDRTE in LOW driver"
45#endif /* WLC_LOW */
46
47/* use local flag BCMDBG_RPC so that it can be turned on without global BCMDBG */
48#ifdef	BCMDBG
49#ifndef BCMDBG_RPC
50#define BCMDBG_RPC
51#endif
52#endif	/* BCMDBG */
53
54/* #define BCMDBG_RPC */
55
56static uint32 rpc_msg_level = RPC_ERROR_VAL;
57/* Print error messages even for non-debug drivers
58 * NOTE: RPC_PKTLOG_VAL can be added in bcm_rpc_pktlog_init()
59 */
60
61/* osl_msg_level is a bitvector with defs in wlioctl.h */
62#define	RPC_ERR(args)		do {if (rpc_msg_level & RPC_ERROR_VAL) printf args;} while (0)
63
64#ifdef	BCMDBG_RPC
65#define	RPC_TRACE(args)		do {if (rpc_msg_level & RPC_TRACE_VAL) printf args;} while (0)
66#define RPC_PKTTRACE_ON()	(rpc_msg_level & RPC_PKTTRACE_VAL)
67#else
68#ifdef	BCMDBG_ERR
69#define	RPC_TRACE(args)		do {if (rpc_msg_level & RPC_TRACE_VAL) printf args;} while (0)
70#define RPC_PKTTRACE_ON()	(FALSE)
71#define prhex(a, b, c)		do { } while (0)  /* prhex is not defined under */
72#define RPC_PKTLOG_ON()		(FALSE)
73#else
74#define	RPC_TRACE(args)
75#define RPC_PKTTRACE_ON()	(FALSE)
76#define RPC_PKTLOG_ON()		(FALSE)
77#define prhex(a, b, c) 	do { } while (0)  /* prhex is not defined under */
78#endif /* BCMDBG_ERR */
79#endif /* BCMDBG_RPC */
80
81#ifdef BCMDBG_RPC
82#define RPC_PKTLOG_ON()		(rpc_msg_level & RPC_PKTLOG_VAL)
83#else
84#define RPC_PKTLOG_ON()		(FALSE)
85#endif /* BCMDBG_RPC */
86
87#ifndef BCM_RPC_REORDER_LIMIT
88#ifdef WLC_HIGH
89#define BCM_RPC_REORDER_LIMIT 40	/* limit to toss hole to avoid overflow reorder queque */
90#else
91#define BCM_RPC_REORDER_LIMIT 30
92#endif
93#endif	/* BCM_RPC_REORDER_LIMIT */
94
95/* OS specific files for locks */
96#define RPC_INIT_WAIT_TIMEOUT_MSEC	2000
97#ifndef RPC_RETURN_WAIT_TIMEOUT_MSEC
98#if defined(NDIS) && !defined(SDIO_BMAC)
99#define RPC_RETURN_WAIT_TIMEOUT_MSEC	800 /* NDIS OIDs timeout in 1 second.
100					     * This timeout needs to be smaller than that
101					     */
102#else
103#define RPC_RETURN_WAIT_TIMEOUT_MSEC	3200
104#endif
105#endif /* RPC_RETURN_WAIT_TIMEOUT_MSEC */
106
107/* RPC Frame formats */
108/* |--------------||-------------|
109 * RPC Header      RPC Payload
110 *
111 * 1) RPC Header:
112 * |-------|--------|----------------|
113 * 31      23       15               0
114 * Type     Session  Transaction ID
115 * = 0 Data
116 * = 1 Return
117 * = 2 Mgn
118 *
119 * 2) payload
120 * Data and Return RPC payload is RPC all dependent
121 *
122 * Management frame formats:
123 * |--------|--------|--------|--------|
124 * Byte 0       1        2        3
125 * Header     Action   Version  Reason
126 *
127 * Version is included only for following actions:
128 * -- CONNECT
129 * -- RESET
130 * -- DOWN
131 * -- CONNECT_ACK
132 * -- CONNECT_NACK
133 *
134 * Reason sent only by BMAC for following actions:
135 * -- CONNECT_ACK
136 * -- CONNECT_NACk
137 */
138
139typedef uint32 rpc_header_t;
140
141#define RPC_HDR_LEN	sizeof(rpc_header_t)
142#define RPC_ACN_LEN	sizeof(uint32)
143#define RPC_VER_LEN	sizeof(EPI_VERSION_NUM)
144#define RPC_RC_LEN	sizeof(uint32)
145#define RPC_CHIPID_LEN	sizeof(uint32)
146
147#define RPC_HDR_TYPE(_rpch) (((_rpch) >> 24) & 0xff)
148#define RPC_HDR_SESSION(_rpch) (((_rpch) >> 16) & 0xff)
149#define RPC_HDR_XACTION(_rpch) ((_rpch) & 0xffff) /* When the type is data or return */
150
151#define NAME_ENTRY(x) #x
152
153/* RPC Header defines -- attached to every RPC call */
154typedef enum {
155	RPC_TYPE_UNKNOWN, /* Unknown header type */
156	RPC_TYPE_DATA,	  /* RPC call that go straight through */
157	RPC_TYPE_RTN,	  /* RPC calls that are syncrhonous */
158	RPC_TYPE_MGN,	  /* RPC state management */
159} rpc_type_t;
160
161typedef enum {
162	RPC_RC_ACK =  0,
163	RPC_RC_HELLO,
164	RPC_RC_RECONNECT,
165	RPC_RC_VER_MISMATCH
166} rpc_rc_t;
167
168/* Management actions */
169typedef enum {
170	RPC_NULL = 0,
171	RPC_HELLO,
172	RPC_CONNECT,		/* Master (high) to slave (low). Slave to copy current
173				 * session id and transaction id (mostly 0)
174				 */
175	RPC_CONNECT_ACK,	/* Ack from LOW_RPC */
176	RPC_DOWN,		/* Down the other-end. The actual action is
177				 * end specific.
178				 */
179	RPC_CONNECT_NACK,	/* Nack from LOW_RPC. This indicates potentially that
180				 * dongle could already be running
181				 */
182	RPC_RESET		/* Resync using other end's session id (mostly HIGH->LOW)
183				 * Also, reset the oe_trans, and trans to 0
184				 */
185} rpc_acn_t;
186
187/* RPC States */
188typedef enum {
189	UNINITED = 0,
190	WAIT_HELLO,
191	HELLO_RECEIVED,
192	WAIT_INITIALIZING,
193	ESTABLISHED,
194	DISCONNECTED,
195	ASLEEP,
196	WAIT_RESUME
197} rpc_state_t;
198
199#define	HDR_STATE_MISMATCH	0x1
200#define	HDR_SESSION_MISMATCH	0x2
201#define	HDR_XACTION_MISMATCH	0x4
202
203#ifdef	BCMDBG_RPC
204#define RPC_PKTLOG_DATASIZE	4
205struct rpc_pktlog {
206	uint16	trans;
207	int	len;
208	uint32	data[RPC_PKTLOG_DATASIZE]; /* First few bytes of the payload only */
209};
210#endif /* BCMDBG_RPC */
211
212#ifdef WLC_LOW
213static void bcm_rpc_dump_state(uint32 arg, uint argc, char *argv[]);
214#else
215static void bcm_rpc_fatal_dump(void *arg);
216#endif	/* WLC_LOW */
217
218#ifdef BCMDBG_RPC
219static void _bcm_rpc_dump_pktlog(rpc_info_t *rpci);
220#ifdef WLC_HIGH
221static void bcm_rpc_dump_pktlog_high(rpc_info_t *rpci);
222#else
223static void bcm_rpc_dump_pktlog_low(uint32 arg, uint argc, char *argv[]);
224#endif
225#endif	/* BCMDBG_RPC */
226
227#ifdef WLC_HIGH
228/* This lock is needed to handle the Receive Re-order queue that guarantees
229 * in-order receive as it was observed that in NDIS at least, USB subsystem does
230 * not guarantee it
231 */
232#ifdef NDIS
233#define RPC_RO_LOCK(ri)		NdisAcquireSpinLock(&(ri)->reorder_lock)
234#define RPC_RO_UNLOCK(ri)	NdisReleaseSpinLock(&(ri)->reorder_lock)
235#else
236#define RPC_RO_LOCK(ri)		spin_lock_irqsave(&(ri)->reorder_lock, (ri)->reorder_flags);
237#define RPC_RO_UNLOCK(ri)	spin_unlock_irqrestore(&(ri)->reorder_lock, (ri)->reorder_flags);
238#endif /* NDIS */
239#else
240#define RPC_RO_LOCK(ri)		do { } while (0)
241#define RPC_RO_UNLOCK(ri)	do { } while (0)
242#endif /* WLC_HIGH */
243
244struct rpc_info {
245	void *pdev;			/* Per-port driver handle for rx callback */
246	struct rpc_transport_info *rpc_th;	/* transport layer handle */
247	osl_t *osh;
248
249	rpc_dispatch_cb_t dispatchcb;	/* callback when data is received */
250	void *ctx;			/* Callback context */
251
252	rpc_down_cb_t dncb;		/* callback when RPC goes down */
253	void *dnctx;			/* Callback context */
254
255	rpc_resync_cb_t resync_cb;	/* callback when host reenabled and dongle
256					 * was not rebooted. Uses dnctx
257					 */
258	rpc_txdone_cb_t txdone_cb;	/* when non-null, called when a tx has completed. */
259	uint8 rpc_tp_hdr_len;		/* header len for rpc and tp layer */
260
261	uint8 session;			/* 255 sessions enough ? */
262	uint16 trans;			/* More than 255 can't be pending */
263	uint16 oe_trans;		/* OtherEnd tran id, dongle->host */
264	uint16 rtn_trans;		/* BMAC: callreturn Id dongle->host */
265	uint16 oe_rtn_trans;		/* HIGH: received BMAC callreturn id */
266
267	rpc_buf_t *rtn_rpcbuf;		/* RPC ID for return transaction */
268
269	rpc_state_t  state;
270	uint reset;			/* # of resets */
271	uint cnt_xidooo;		/* transactionID out of order */
272	uint cnt_rx_drop_hole;		/* number of rcp calls dropped due to reorder overflow */
273	uint cnt_reorder_overflow;	/* number of time the reorder queue overflowed,
274					 * causing drops
275					 */
276	uint32 version;
277
278	bool wait_init;
279	bool wait_return;
280
281	rpc_osl_t *rpc_osh;
282
283#ifdef BCMDBG_RPC
284	struct rpc_pktlog *send_log;
285	uint16 send_log_idx;	/* Point to the next slot to fill-in */
286	uint16 send_log_num;	/* Number of entries */
287
288	struct rpc_pktlog *recv_log;
289	uint16 recv_log_idx;	/* Point to the next slot to fill-in */
290	uint16 recv_log_num;	/* Number of entries */
291#endif /* BCMDBG_RPC */
292
293#ifdef WLC_HIGH
294#if defined(NDIS)
295	NDIS_SPIN_LOCK reorder_lock; /* TO RAISE the IRQ */
296	bool reorder_lock_alloced;
297	bool down_oe_pending;
298	bool down_pending;
299#elif defined(linux)
300	spinlock_t	reorder_lock;
301	ulong reorder_flags;
302#endif /* NDIS */
303#endif /* WLC_HIGH */
304	/* Protect against rx reordering */
305	rpc_buf_t *reorder_pktq;
306	uint	reorder_depth;
307	uint	reorder_depth_max;
308	uint 	chipid;
309	uint	suspend_enable;
310};
311
312static void bcm_rpc_tx_complete(void *ctx, rpc_buf_t *buf, int status);
313static void bcm_rpc_buf_recv(void *context, rpc_buf_t *);
314static void bcm_rpc_process_reorder_queue(rpc_info_t *rpci);
315static bool bcm_rpc_buf_recv_inorder(rpc_info_t *rpci, rpc_buf_t *rpc_buf, mbool hdr_invalid);
316
317#ifdef WLC_HIGH
318static rpc_buf_t *bcm_rpc_buf_recv_high(struct rpc_info *rpci, rpc_type_t type, rpc_acn_t acn,
319	rpc_buf_t *rpc_buf);
320static int bcm_rpc_resume_oe(struct rpc_info *rpci);
321#ifdef NDIS
322static int bcm_rpc_hello(rpc_info_t *rpci);
323#endif
324#else
325static rpc_buf_t *bcm_rpc_buf_recv_low(struct rpc_info *rpci, rpc_header_t header,
326	rpc_acn_t acn, rpc_buf_t *rpc_buf);
327#endif /* WLC_HIGH */
328static int bcm_rpc_up(rpc_info_t *rpci);
329static uint16 bcm_rpc_reorder_next_xid(struct rpc_info *rpci);
330
331
332#ifdef BCMDBG_RPC
333static void bcm_rpc_pktlog_init(rpc_info_t *rpci);
334static void bcm_rpc_pktlog_deinit(rpc_info_t *rpci);
335static struct rpc_pktlog *bcm_rpc_prep_entry(struct rpc_info * rpci, rpc_buf_t *b,
336                                             struct rpc_pktlog *cur, bool tx);
337static void bcm_rpc_add_entry_tx(struct rpc_info * rpci, struct rpc_pktlog *cur);
338static void bcm_rpc_add_entry_rx(struct rpc_info * rpci, struct rpc_pktlog *cur);
339#endif /* BCMDBG_RPC */
340
341
342/* Header and componet retrieval functions */
343static INLINE rpc_header_t
344bcm_rpc_header(struct rpc_info *rpci, rpc_buf_t *rpc_buf)
345{
346	rpc_header_t *rpch = (rpc_header_t *)bcm_rpc_buf_data(rpci->rpc_th, rpc_buf);
347	return ltoh32(*rpch);
348}
349
350static INLINE rpc_acn_t
351bcm_rpc_mgn_acn(struct rpc_info *rpci, rpc_buf_t *rpc_buf)
352{
353	rpc_header_t *rpch = (rpc_header_t *)bcm_rpc_buf_data(rpci->rpc_th, rpc_buf);
354
355	return (rpc_acn_t)ltoh32(*rpch);
356}
357
358static INLINE uint32
359bcm_rpc_mgn_ver(struct rpc_info *rpci, rpc_buf_t *rpc_buf)
360{
361	rpc_header_t *rpch = (rpc_header_t *)bcm_rpc_buf_data(rpci->rpc_th, rpc_buf);
362
363	return ltoh32(*rpch);
364}
365
366static INLINE rpc_rc_t
367bcm_rpc_mgn_reason(struct rpc_info *rpci, rpc_buf_t *rpc_buf)
368{
369	rpc_header_t *rpch = (rpc_header_t *)bcm_rpc_buf_data(rpci->rpc_th, rpc_buf);
370	return (rpc_rc_t)ltoh32(*rpch);
371}
372
373#ifdef WLC_HIGH
374static uint32
375bcm_rpc_mgn_chipid(struct rpc_info *rpci, rpc_buf_t *rpc_buf)
376{
377	rpc_header_t *rpch = (rpc_header_t *)bcm_rpc_buf_data(rpci->rpc_th, rpc_buf);
378
379	return ltoh32(*rpch);
380}
381#endif /* WLC_HIGH */
382
383static INLINE uint
384bcm_rpc_hdr_xaction_validate(struct rpc_info *rpci, rpc_header_t header, uint32 *xaction,
385                             bool verbose)
386{
387	uint type;
388
389	type = RPC_HDR_TYPE(header);
390	*xaction = RPC_HDR_XACTION(header);
391
392	/* High driver does not check the return transaction to be in order */
393	if (type != RPC_TYPE_MGN &&
394#ifdef WLC_HIGH
395	    type != RPC_TYPE_RTN &&
396#endif
397	     *xaction != rpci->oe_trans) {
398#ifdef WLC_HIGH
399		if (verbose) {
400			RPC_ERR(("Transaction mismatch: expected:0x%x got:0x%x type: %d\n",
401				rpci->oe_trans, *xaction, type));
402		}
403#endif
404		return HDR_XACTION_MISMATCH;
405	}
406
407	return 0;
408}
409
410static INLINE uint
411bcm_rpc_hdr_session_validate(struct rpc_info *rpci, rpc_header_t header)
412{
413#ifdef WLC_LOW
414	if (RPC_HDR_TYPE(header) == RPC_TYPE_MGN)
415		return 0;
416#endif
417
418	if (rpci->session != RPC_HDR_SESSION(header))
419	    return HDR_SESSION_MISMATCH;
420	return 0;
421}
422
423static INLINE uint
424bcm_rpc_hdr_state_validate(struct rpc_info *rpci, rpc_header_t header)
425{
426	uint type = RPC_HDR_TYPE(header);
427
428	if ((type == RPC_TYPE_UNKNOWN) || (type > RPC_TYPE_MGN))
429		return HDR_STATE_MISMATCH;
430
431	/* Everything allowed during this transition time */
432	if (rpci->state == ASLEEP)
433		return 0;
434
435	/* Only managment frames allowed before ESTABLISHED state */
436	if ((rpci->state != ESTABLISHED) && (type != RPC_TYPE_MGN)) {
437		RPC_ERR(("bcm_rpc_header_validate: State mismatch: state:%d type:%d\n",
438		           rpci->state, type));
439		return HDR_STATE_MISMATCH;
440	}
441
442	return 0;
443}
444
445static INLINE mbool
446bcm_rpc_hdr_validate(struct rpc_info *rpci, rpc_buf_t *rpc_buf, uint32 *xaction,
447                     bool verbose)
448{
449	/* First the state against the type */
450	mbool ret = 0;
451	rpc_header_t header = bcm_rpc_header(rpci, rpc_buf);
452
453	mboolset(ret, bcm_rpc_hdr_state_validate(rpci, header));
454	mboolset(ret, bcm_rpc_hdr_xaction_validate(rpci, header, xaction, verbose));
455	mboolset(ret, bcm_rpc_hdr_session_validate(rpci, header));
456
457	return ret;
458}
459
460struct rpc_info *
461BCMATTACHFN(bcm_rpc_attach)(void *pdev, osl_t *osh, struct rpc_transport_info *rpc_th,
462	uint16 *devid)
463{
464	struct rpc_info *rpci;
465
466#ifndef WLC_HIGH
467	UNUSED_PARAMETER(devid);
468#endif /* WLC_HIGH */
469
470	if ((rpci = (struct rpc_info *)MALLOC(osh, sizeof(struct rpc_info))) == NULL)
471		return NULL;
472
473	bzero(rpci, sizeof(struct rpc_info));
474
475	rpci->osh = osh;
476	rpci->pdev = pdev;
477	rpci->rpc_th = rpc_th;
478	rpci->session = 0x69;
479
480	/* initialize lock and queue */
481	rpci->rpc_osh = rpc_osl_attach(osh);
482
483	if (rpci->rpc_osh == NULL) {
484		RPC_ERR(("bcm_rpc_attach: osl attach failed\n"));
485		goto fail;
486	}
487
488	bcm_rpc_tp_register_cb(rpc_th, bcm_rpc_tx_complete, rpci,
489	                       bcm_rpc_buf_recv, rpci, rpci->rpc_osh);
490
491	rpci->version = EPI_VERSION_NUM;
492
493	rpci->rpc_tp_hdr_len = RPC_HDR_LEN + bcm_rpc_buf_tp_header_len(rpci->rpc_th);
494
495#if defined(WLC_HIGH) && defined(NDIS)
496	bcm_rpc_hello(rpci);
497#endif
498
499	if (bcm_rpc_up(rpci)) {
500		RPC_ERR(("bcm_rpc_attach: rpc_up failed\n"));
501		goto fail;
502	}
503
504#ifdef WLC_HIGH
505	*devid = (uint16)rpci->chipid;
506#endif
507
508	return rpci;
509fail:
510	bcm_rpc_detach(rpci);
511	return NULL;
512}
513
514static uint16
515bcm_rpc_reorder_next_xid(struct rpc_info *rpci)
516{
517	rpc_buf_t * buf;
518	rpc_header_t header;
519	uint16 cur_xid = rpci->oe_trans;
520	uint16 min_xid = 0;
521	uint16 min_delta = 0xffff;
522	uint16 xid, delta;
523
524	ASSERT(rpci->rpc_th);
525	for (buf = rpci->reorder_pktq;
526	     buf != NULL;
527	     buf = bcm_rpc_buf_next_get(rpci->rpc_th, buf)) {
528		header = bcm_rpc_header(rpci, buf);
529		xid = RPC_HDR_XACTION(header);
530		delta = xid - cur_xid;
531
532		if (delta < min_delta) {
533			min_delta = delta;
534			min_xid = xid;
535		}
536	}
537
538	return min_xid;
539}
540
541void
542BCMATTACHFN(bcm_rpc_detach)(struct rpc_info *rpci)
543{
544	if (!rpci)
545		return;
546
547	bcm_rpc_down(rpci);
548
549	if (rpci->reorder_pktq) {
550		rpc_buf_t * node;
551		ASSERT(rpci->rpc_th);
552		while ((node = rpci->reorder_pktq)) {
553			rpci->reorder_pktq = bcm_rpc_buf_next_get(rpci->rpc_th,
554			                                          node);
555			bcm_rpc_buf_next_set(rpci->rpc_th, node, NULL);
556#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) || defined(BCM_RPC_ROC)
557			PKTFREE(rpci->osh, node, FALSE);
558#else
559			bcm_rpc_tp_buf_free(rpci->rpc_th, node);
560#endif /* BCM_RPC_NOCOPY || BCM_RPC_RXNOCOPY || BCM_RPC_ROC */
561		}
562		ASSERT(rpci->reorder_pktq == NULL);
563		rpci->reorder_depth = 0;
564		rpci->reorder_depth_max = 0;
565	}
566
567#ifdef WLC_HIGH
568#if defined(NDIS)
569	if (rpci->reorder_lock_alloced)
570		NdisFreeSpinLock(&rpci->reorder_lock);
571#endif
572#endif /* WLC_HIGH */
573
574	/* rpc is going away, cut off registered cbs from rpc_tp layer */
575	bcm_rpc_tp_deregister_cb(rpci->rpc_th);
576
577#ifdef WLC_LOW
578	bcm_rpc_tp_txflowctlcb_deinit(rpci->rpc_th);
579#endif
580
581	if (rpci->rpc_osh)
582		rpc_osl_detach(rpci->rpc_osh);
583
584	MFREE(rpci->osh, rpci, sizeof(struct rpc_info));
585	rpci = NULL;
586}
587
588rpc_buf_t *
589bcm_rpc_buf_alloc(struct rpc_info *rpci, int datalen)
590{
591	rpc_buf_t *rpc_buf;
592	int len = datalen + RPC_HDR_LEN;
593
594	ASSERT(rpci->rpc_th);
595	rpc_buf = bcm_rpc_tp_buf_alloc(rpci->rpc_th, len);
596
597	if (rpc_buf == NULL)
598		return NULL;
599
600	/* Reserve space for RPC Header */
601	bcm_rpc_buf_pull(rpci->rpc_th, rpc_buf, RPC_HDR_LEN);
602
603	return rpc_buf;
604}
605
606uint
607bcm_rpc_buf_header_len(struct rpc_info *rpci)
608{
609	return rpci->rpc_tp_hdr_len;
610}
611
612void
613bcm_rpc_buf_free(struct rpc_info *rpci, rpc_buf_t *rpc_buf)
614{
615	bcm_rpc_tp_buf_free(rpci->rpc_th, rpc_buf);
616}
617
618void
619bcm_rpc_rxcb_init(struct rpc_info *rpci, void *ctx, rpc_dispatch_cb_t cb,
620	void *dnctx, rpc_down_cb_t dncb, rpc_resync_cb_t resync_cb, rpc_txdone_cb_t txdone_cb)
621{
622	rpci->dispatchcb = cb;
623	rpci->ctx = ctx;
624	rpci->dnctx = dnctx;
625	rpci->dncb = dncb;
626	rpci->resync_cb = resync_cb;
627	rpci->txdone_cb = txdone_cb;
628}
629
630void
631bcm_rpc_rxcb_deinit(struct rpc_info *rpci)
632{
633	if (!rpci)
634		return;
635
636	rpci->dispatchcb = NULL;
637	rpci->ctx = NULL;
638	rpci->dnctx = NULL;
639	rpci->dncb = NULL;
640	rpci->resync_cb = NULL;
641}
642
643struct rpc_transport_info *
644bcm_rpc_tp_get(struct rpc_info *rpci)
645{
646	return rpci->rpc_th;
647}
648
649/* get original os handle */
650osl_t*
651bcm_rpc_osh_get(struct rpc_info *rpci)
652{
653	return rpci->osh;
654}
655
656
657#ifdef BCM_RPC_TOC
658static void
659bcm_rpc_tp_tx_encap(struct rpc_info *rpci, rpc_buf_t *rpc_buf)
660{
661	uint32 *tp_lenp;
662	uint32 rpc_len;
663
664	rpc_len = pkttotlen(rpci->osh, rpc_buf);
665	tp_lenp = (uint32*)bcm_rpc_buf_push(rpci->rpc_th, rpc_buf, BCM_RPC_TP_ENCAP_LEN);
666	*tp_lenp = htol32(rpc_len);
667
668}
669#endif
670static void
671rpc_header_prep(struct rpc_info *rpci, rpc_header_t *header, uint type, uint action)
672{
673	uint32 v;
674
675	v = 0;
676	v |= (type << 24);
677
678	/* Mgmt action follows the header */
679	if (type == RPC_TYPE_MGN) {
680		*(header + 1) = htol32(action);
681#ifdef WLC_HIGH
682		if (action == RPC_CONNECT || action == RPC_RESET || action == RPC_HELLO)
683			*(header + 2) = htol32(rpci->version);
684#endif
685	}
686#ifdef WLC_LOW
687	else if (type == RPC_TYPE_RTN)
688		v |= (rpci->rtn_trans);
689#endif
690	else
691		v |= (rpci->trans);
692
693	v |= (rpci->session << 16);
694
695	*header = htol32(v);
696
697	RPC_TRACE(("rpc_header_prep: type:0x%x action: %d trans:0x%x\n",
698	           type, action, rpci->trans));
699}
700
701#if defined(WLC_HIGH) && defined(NDIS)
702
703static int
704bcm_rpc_hello(struct rpc_info *rpci)
705{
706	int ret = -1, count = 10;
707	rpc_buf_t *rpc_buf;
708	rpc_header_t *header;
709
710	RPC_OSL_LOCK(rpci->rpc_osh);
711	rpci->state = WAIT_HELLO;
712	rpci->wait_init = TRUE;
713	RPC_OSL_UNLOCK(rpci->rpc_osh);
714
715	while (ret && (count-- > 0)) {
716
717		/* Allocate a frame, prep it, send and wait */
718		rpc_buf = bcm_rpc_tp_buf_alloc(rpci->rpc_th, RPC_HDR_LEN + RPC_ACN_LEN + RPC_VER_LEN
719			+ RPC_CHIPID_LEN);
720
721		if (!rpc_buf)
722			break;
723
724		header = (rpc_header_t *)bcm_rpc_buf_data(rpci->rpc_th, rpc_buf);
725
726		rpc_header_prep(rpci, header, RPC_TYPE_MGN, RPC_HELLO);
727
728		if (bcm_rpc_tp_buf_send(rpci->rpc_th, rpc_buf)) {
729			RPC_ERR(("%s: bcm_rpc_tp_buf_send() call failed\n", __FUNCTION__));
730			return -1;
731		}
732
733		RPC_ERR(("%s: waiting to receive hello\n", __FUNCTION__));
734
735		RPC_OSL_WAIT(rpci->rpc_osh, RPC_INIT_WAIT_TIMEOUT_MSEC, NULL);
736
737		RPC_TRACE(("%s: wait done, ret = %d\n", __FUNCTION__, ret));
738
739		/* See if we timed out or actually initialized */
740		RPC_OSL_LOCK(rpci->rpc_osh);
741		if (rpci->state == HELLO_RECEIVED)
742			ret = 0;
743		RPC_OSL_UNLOCK(rpci->rpc_osh);
744
745	}
746
747	/* See if we timed out or actually initialized */
748	RPC_OSL_LOCK(rpci->rpc_osh);
749	rpci->wait_init = FALSE;
750	RPC_OSL_UNLOCK(rpci->rpc_osh);
751
752	return ret;
753}
754
755#endif /* WLC_HIGH && NDIS */
756
757#ifdef WLC_HIGH
758static int
759bcm_rpc_up(struct rpc_info *rpci)
760{
761	rpc_buf_t *rpc_buf;
762	rpc_header_t *header;
763	int ret;
764
765	/* Allocate a frame, prep it, send and wait */
766	rpc_buf = bcm_rpc_tp_buf_alloc(rpci->rpc_th, RPC_HDR_LEN + RPC_ACN_LEN + RPC_VER_LEN
767		+ RPC_CHIPID_LEN);
768
769	if (!rpc_buf)
770		return -1;
771
772	header = (rpc_header_t *)bcm_rpc_buf_data(rpci->rpc_th, rpc_buf);
773
774	rpc_header_prep(rpci, header, RPC_TYPE_MGN, RPC_CONNECT);
775
776	RPC_OSL_LOCK(rpci->rpc_osh);
777	rpci->state = WAIT_INITIALIZING;
778	rpci->wait_init = TRUE;
779
780#if defined(NDIS)
781	if (!rpci->reorder_lock_alloced) {
782		NdisAllocateSpinLock(&rpci->reorder_lock);
783		rpci->reorder_lock_alloced = TRUE;
784	}
785#elif defined(linux)
786	spin_lock_init(&rpci->reorder_lock);
787#endif
788
789	RPC_OSL_UNLOCK(rpci->rpc_osh);
790
791	if (bcm_rpc_tp_buf_send(rpci->rpc_th, rpc_buf)) {
792		RPC_ERR(("%s: bcm_rpc_tp_buf_send() call failed\n", __FUNCTION__));
793		return -1;
794	}
795
796	/* Wait for state to change to established. The receive thread knows what to do */
797	RPC_ERR(("%s: waiting to be connected\n", __FUNCTION__));
798
799	ret = RPC_OSL_WAIT(rpci->rpc_osh, RPC_INIT_WAIT_TIMEOUT_MSEC, NULL);
800
801	RPC_TRACE(("%s: wait done, ret = %d\n", __FUNCTION__, ret));
802
803	if (ret < 0) {
804		rpci->wait_init = FALSE;
805		return ret;
806	}
807
808	/* See if we timed out or actually initialized */
809	RPC_OSL_LOCK(rpci->rpc_osh);
810	if (rpci->state == ESTABLISHED)
811		ret = 0;
812	else
813		ret = -1;
814	rpci->wait_init = FALSE;
815	RPC_OSL_UNLOCK(rpci->rpc_osh);
816
817#ifdef BCMDBG_RPC
818	bcm_rpc_pktlog_init(rpci);
819#endif
820
821	return ret;
822}
823
824int
825bcm_rpc_is_asleep(struct rpc_info *rpci)
826{
827	return (rpci->state == ASLEEP);
828}
829
830bool
831bcm_rpc_sleep(struct rpc_info *rpci)
832{
833	if (!rpci->suspend_enable)
834		return TRUE;
835	bcm_rpc_tp_sleep(rpci->rpc_th);
836	rpci->state = ASLEEP;
837	/* Ignore anything coming after this */
838#ifdef NDIS
839	bcm_rpc_down(rpci);
840#else
841	rpci->session++;
842#endif
843	return TRUE;
844}
845
846#ifdef NDIS
847int
848bcm_rpc_shutdown(struct rpc_info *rpci)
849{
850	int ret = -1;
851
852	if (rpci) {
853		ret = bcm_rpc_tp_shutdown(rpci->rpc_th);
854		rpci->state = DISCONNECTED;
855	}
856	return ret;
857}
858#endif /* NDIS */
859
860bool
861bcm_rpc_resume(struct rpc_info *rpci, int *fw_reload)
862{
863	if (!rpci->suspend_enable)
864		return TRUE;
865
866	bcm_rpc_tp_resume(rpci->rpc_th, fw_reload);
867#ifdef NDIS
868	if (fw_reload) {
869		rpci->trans = 0;
870		rpci->oe_trans = 0;
871		bcm_rpc_hello(rpci);
872		bcm_rpc_up(rpci);
873	}
874	else
875		rpci->state = ESTABLISHED;
876#else
877	if (bcm_rpc_resume_oe(rpci) == 0) {
878		rpci->trans = 0;
879		rpci->oe_trans = 0;
880	}
881#endif
882	RPC_TRACE(("bcm_rpc_resume done, state %d\n", rpci->state));
883	return (rpci->state == ESTABLISHED);
884}
885
886static int
887bcm_rpc_resume_oe(struct rpc_info *rpci)
888{
889	rpc_buf_t *rpc_buf;
890	rpc_header_t *header;
891	int ret;
892
893	/* Allocate a frame, prep it, send and wait */
894	rpc_buf = bcm_rpc_tp_buf_alloc(rpci->rpc_th, RPC_HDR_LEN + RPC_ACN_LEN + RPC_VER_LEN);
895
896	if (!rpc_buf)
897		return -1;
898
899	header = (rpc_header_t *)bcm_rpc_buf_data(rpci->rpc_th, rpc_buf);
900
901	rpc_header_prep(rpci, header, RPC_TYPE_MGN, RPC_RESET);
902
903	RPC_OSL_LOCK(rpci->rpc_osh);
904	rpci->state = WAIT_RESUME;
905	rpci->wait_init = TRUE;
906	RPC_OSL_UNLOCK(rpci->rpc_osh);
907
908	/* Don't care for the return value */
909	if (bcm_rpc_tp_buf_send(rpci->rpc_th, rpc_buf)) {
910		RPC_ERR(("%s: bcm_rpc_tp_buf_send() call failed\n", __FUNCTION__));
911		return -1;
912	}
913
914	/* Wait for state to change to established. The receive thread knows what to do */
915	RPC_ERR(("%s: waiting to be resumed\n", __FUNCTION__));
916
917	ret = RPC_OSL_WAIT(rpci->rpc_osh, RPC_INIT_WAIT_TIMEOUT_MSEC, NULL);
918
919	RPC_TRACE(("%s: wait done, ret = %d\n", __FUNCTION__, ret));
920
921	if (ret < 0) {
922		rpci->wait_init = FALSE;
923		return ret;
924	}
925
926	/* See if we timed out or actually initialized */
927	RPC_OSL_LOCK(rpci->rpc_osh);
928	if (rpci->state == ESTABLISHED)
929		ret = 0;
930	else
931		ret = -1;
932	rpci->wait_init = FALSE;
933	RPC_OSL_UNLOCK(rpci->rpc_osh);
934
935	return ret;
936}
937#else
938static int
939bcm_rpc_up(struct rpc_info *rpci)
940{
941	rpci->state = WAIT_INITIALIZING;
942
943#ifdef BCMDBG_RPC
944	bcm_rpc_pktlog_init(rpci);
945	hndrte_cons_addcmd("rpcpktdump", bcm_rpc_dump_pktlog_low, (uint32)rpci);
946#endif
947	hndrte_cons_addcmd("rpcdump", bcm_rpc_dump_state, (uint32)rpci);
948	return 0;
949}
950
951static int
952bcm_rpc_connect_resp(struct rpc_info *rpci, rpc_acn_t acn, uint32 reason)
953{
954	rpc_buf_t *rpc_buf;
955	rpc_header_t *header;
956
957	/* Allocate a frame, prep it, send and wait */
958	rpc_buf = bcm_rpc_tp_buf_alloc(rpci->rpc_th, RPC_HDR_LEN + RPC_ACN_LEN +
959	                               RPC_RC_LEN + RPC_VER_LEN + RPC_CHIPID_LEN);
960	if (!rpc_buf) {
961		RPC_ERR(("%s: bcm_rpc_tp_buf_alloc() failed\n", __FUNCTION__));
962		return FALSE;
963	}
964
965	header = (rpc_header_t *)bcm_rpc_buf_data(rpci->rpc_th, rpc_buf);
966
967	rpc_header_prep(rpci, header, RPC_TYPE_MGN, acn);
968
969	*(header + 2) = ltoh32(rpci->version);
970	*(header + 3) = ltoh32(reason);
971#ifdef BCMCHIPID
972	*(header + 4) = ltoh32(BCMCHIPID);
973#endif /* BCMCHIPID */
974	if (bcm_rpc_tp_buf_send(rpci->rpc_th, rpc_buf)) {
975		RPC_ERR(("%s: bcm_rpc_tp_buf_send() call failed\n", __FUNCTION__));
976		return FALSE;
977	}
978
979	return TRUE;
980}
981#endif /* WLC_HIGH */
982
983void
984bcm_rpc_watchdog(struct rpc_info *rpci)
985{
986	static uint32 uptime = 0;
987
988#ifdef WLC_LOW
989/* rpc watchdog is called every 5 msec in the low driver */
990	static uint32 count = 0;
991	count++;
992	if (count % 200 == 0) {
993		 count = 0;
994		 uptime++;
995		if (uptime % 60 == 0)
996			RPC_ERR(("rpc uptime %d minutes\n", (uptime / 60)));
997	}
998#else
999	uptime++;
1000	if (uptime % 60 == 0) {
1001		RPC_ERR(("rpc uptime %d minutes\n", (uptime / 60)));
1002	}
1003#endif
1004	bcm_rpc_tp_watchdog(rpci->rpc_th);
1005}
1006
1007void
1008bcm_rpc_down(struct rpc_info *rpci)
1009{
1010	RPC_ERR(("%s\n", __FUNCTION__));
1011
1012#ifdef BCMDBG_RPC
1013	bcm_rpc_pktlog_deinit(rpci);
1014#endif
1015
1016	RPC_OSL_LOCK(rpci->rpc_osh);
1017	if (rpci->state != DISCONNECTED && rpci->state != ASLEEP) {
1018		RPC_OSL_UNLOCK(rpci->rpc_osh);
1019#ifdef WLC_HIGH
1020		bcm_rpc_fatal_dump(rpci);
1021#else
1022		bcm_rpc_dump_state((uint32)rpci, 0, NULL);
1023#endif
1024		RPC_OSL_LOCK(rpci->rpc_osh);
1025		rpci->state = DISCONNECTED;
1026		RPC_OSL_UNLOCK(rpci->rpc_osh);
1027		if (rpci->dncb)
1028			(rpci->dncb)(rpci->dnctx);
1029		bcm_rpc_tp_down(rpci->rpc_th);
1030		return;
1031	}
1032	RPC_OSL_UNLOCK(rpci->rpc_osh);
1033}
1034
1035#if defined(USBAP) && (defined(WLC_HIGH) && !defined(WLC_LOW))
1036/* For USBAP external image, reboot system upon RPC error instead of just turning RPC down */
1037#include <siutils.h>
1038void
1039bcm_rpc_err_down(struct rpc_info *rpci)
1040{
1041	si_t *sih = si_kattach(SI_OSH);
1042
1043	RPC_ERR(("%s: rebooting system due to RPC error.\n", __FUNCTION__));
1044	si_watchdog(sih, 1);
1045}
1046#else
1047#define bcm_rpc_err_down	bcm_rpc_down
1048#endif
1049
1050static void
1051bcm_rpc_tx_complete(void *ctx, rpc_buf_t *buf, int status)
1052{
1053	struct rpc_info *rpci = (struct rpc_info *)ctx;
1054
1055	RPC_TRACE(("%s: status 0x%x\n", __FUNCTION__, status));
1056
1057	ASSERT(rpci && rpci->rpc_th);
1058
1059	if (buf) {
1060		if (rpci->txdone_cb) {
1061			/* !!must pull off the rpc/tp header after dbus is done for wl driver */
1062			rpci->txdone_cb(rpci->ctx, buf);
1063		} else
1064			bcm_rpc_tp_buf_free(rpci->rpc_th, buf);
1065	}
1066}
1067
1068int
1069bcm_rpc_call(struct rpc_info *rpci, rpc_buf_t *b)
1070{
1071	rpc_header_t *header;
1072	int err = 0;
1073#ifdef BCMDBG_RPC
1074	struct rpc_pktlog cur;
1075#endif
1076
1077	RPC_TRACE(("%s:\n", __FUNCTION__));
1078
1079	RPC_OSL_LOCK(rpci->rpc_osh);
1080	if (rpci->state != ESTABLISHED) {
1081		err = -1;
1082		RPC_OSL_UNLOCK(rpci->rpc_osh);
1083#ifdef BCM_RPC_TOC
1084
1085		header = (rpc_header_t *)bcm_rpc_buf_push(rpci->rpc_th, b, RPC_HDR_LEN);
1086		rpc_header_prep(rpci, header, RPC_TYPE_DATA, 0);
1087		bcm_rpc_tp_tx_encap(rpci, b);
1088		if (rpci->txdone_cb) {
1089			/* !!must pull off the rpc/tp header after dbus is done for wl driver */
1090			rpci->txdone_cb(rpci->ctx, b);
1091		} else
1092
1093#endif
1094			bcm_rpc_buf_free(rpci, b);
1095
1096		goto done;
1097	}
1098	RPC_OSL_UNLOCK(rpci->rpc_osh);
1099
1100#ifdef BCMDBG_RPC
1101	/* Prepare the current log entry but add only if the TX was successful */
1102	/* This is done here before DATA pointer gets modified */
1103	if (RPC_PKTLOG_ON())
1104		bcm_rpc_prep_entry(rpci, b, &cur, TRUE);
1105#endif
1106
1107	header = (rpc_header_t *)bcm_rpc_buf_push(rpci->rpc_th, b, RPC_HDR_LEN);
1108
1109	rpc_header_prep(rpci, header, RPC_TYPE_DATA, 0);
1110
1111#ifdef BCMDBG_RPC
1112	if (RPC_PKTTRACE_ON()) {
1113#ifdef BCMDBG
1114		prhex("RPC Call ", bcm_rpc_buf_data(rpci->rpc_th, b),
1115		      bcm_rpc_buf_len_get(rpci->rpc_th, b));
1116#endif
1117	}
1118#endif	/* BCMDBG_RPC */
1119
1120	if (bcm_rpc_tp_buf_send(rpci->rpc_th, b)) {
1121		RPC_ERR(("%s: bcm_rpc_tp_buf_send() call failed\n", __FUNCTION__));
1122
1123		if (rpci->txdone_cb) {
1124			rpci->txdone_cb(rpci->ctx, b);
1125		} else
1126			bcm_rpc_tp_buf_free(rpci->rpc_th, b);
1127
1128		bcm_rpc_err_down(rpci);
1129		return -1;
1130	}
1131
1132	RPC_OSL_LOCK(rpci->rpc_osh);
1133	rpci->trans++;
1134	RPC_OSL_UNLOCK(rpci->rpc_osh);
1135
1136#ifdef BCMDBG_RPC	/* Since successful add the entry */
1137	if (RPC_PKTLOG_ON()) {
1138		bcm_rpc_add_entry_tx(rpci, &cur);
1139	}
1140#endif
1141done:
1142	return err;
1143}
1144
1145#ifdef WLC_HIGH
1146rpc_buf_t *
1147bcm_rpc_call_with_return(struct rpc_info *rpci, rpc_buf_t *b)
1148{
1149	rpc_header_t *header;
1150	rpc_buf_t *retb = NULL;
1151	int ret;
1152#ifdef BCMDBG_RPC
1153	struct rpc_pktlog cur;
1154#endif
1155	bool timedout = FALSE;
1156	uint32 start_wait_time;
1157
1158	RPC_TRACE(("%s:\n", __FUNCTION__));
1159
1160	RPC_OSL_LOCK(rpci->rpc_osh);
1161	if (rpci->state != ESTABLISHED) {
1162		RPC_OSL_UNLOCK(rpci->rpc_osh);
1163		RPC_ERR(("%s: RPC call before ESTABLISHED state\n", __FUNCTION__));
1164		bcm_rpc_buf_free(rpci, b);
1165		return NULL;
1166	}
1167	RPC_OSL_UNLOCK(rpci->rpc_osh);
1168
1169#ifdef BCMDBG_RPC
1170	/* Prepare the current log entry but add only if the TX was successful */
1171	/* This is done here before DATA pointer gets modified */
1172	if (RPC_PKTLOG_ON())
1173		bcm_rpc_prep_entry(rpci, b, &cur, TRUE);
1174#endif
1175
1176	header = (rpc_header_t *)bcm_rpc_buf_push(rpci->rpc_th, b, RPC_HDR_LEN);
1177
1178	rpc_header_prep(rpci, header, RPC_TYPE_RTN, 0);
1179
1180	RPC_OSL_LOCK(rpci->rpc_osh);
1181	rpci->trans++;
1182	ASSERT(rpci->rtn_rpcbuf == NULL);
1183	rpci->wait_return = TRUE;
1184	RPC_OSL_UNLOCK(rpci->rpc_osh);
1185
1186	/* Prep the return packet BEFORE sending the buffer and also within spinlock
1187	 * within raised IRQ
1188	 */
1189	if ((ret = bcm_rpc_tp_recv_rtn(rpci->rpc_th)) != BCME_OK) {
1190		RPC_ERR(("%s: bcm_rpc_tp_recv_rtn() failed\n", __FUNCTION__));
1191
1192		RPC_OSL_LOCK(rpci->rpc_osh);
1193		rpci->wait_return = FALSE;
1194		RPC_OSL_UNLOCK(rpci->rpc_osh);
1195		if ((ret != BCME_NORESOURCE) && (ret != BCME_BUSY))
1196			bcm_rpc_err_down(rpci);
1197		return NULL;
1198	}
1199
1200
1201#ifdef BCMDBG_RPC
1202	if (RPC_PKTTRACE_ON()) {
1203#ifdef BCMDBG
1204		prhex("RPC Call With Return Buf", bcm_rpc_buf_data(rpci->rpc_th, b),
1205		      bcm_rpc_buf_len_get(rpci->rpc_th, b));
1206#endif
1207	}
1208#endif	/* BCMDBG_RPC */
1209
1210	if (bcm_rpc_tp_buf_send(rpci->rpc_th, b)) {
1211		RPC_ERR(("%s: bcm_rpc_bus_buf_send() failed\n", __FUNCTION__));
1212
1213		RPC_OSL_LOCK(rpci->rpc_osh);
1214		rpci->wait_return = FALSE;
1215		RPC_OSL_UNLOCK(rpci->rpc_osh);
1216		bcm_rpc_err_down(rpci);
1217		return NULL;
1218	}
1219
1220	bcm_rpc_tp_agg_set(rpci->rpc_th, BCM_RPC_TP_HOST_AGG_AMPDU, FALSE);
1221
1222	start_wait_time = OSL_SYSUPTIME();
1223	ret = RPC_OSL_WAIT(rpci->rpc_osh, RPC_RETURN_WAIT_TIMEOUT_MSEC, &timedout);
1224
1225	/* When RPC_OSL_WAIT returns because of signal pending. wait for the signal to
1226	 * be processed
1227	 */
1228	RPC_OSL_LOCK(rpci->rpc_osh);
1229	while ((ret < 0) && ((OSL_SYSUPTIME() - start_wait_time) <= RPC_RETURN_WAIT_TIMEOUT_MSEC)) {
1230		RPC_OSL_UNLOCK(rpci->rpc_osh);
1231		ret = RPC_OSL_WAIT(rpci->rpc_osh, RPC_RETURN_WAIT_TIMEOUT_MSEC, &timedout);
1232		RPC_OSL_LOCK(rpci->rpc_osh);
1233	}
1234
1235	if (ret || timedout) {
1236		RPC_ERR(("%s: RPC call trans 0x%x return wait err %d timedout %d limit %d(ms)\n",
1237		         __FUNCTION__, (rpci->trans - 1), ret, timedout,
1238		         RPC_RETURN_WAIT_TIMEOUT_MSEC));
1239		rpci->wait_return = FALSE;
1240		RPC_OSL_UNLOCK(rpci->rpc_osh);
1241#ifdef BCMDBG_RPC
1242		printf("Failed trans 0x%x len %d data 0x%x\n",
1243		       cur.trans,
1244		       cur.len,
1245		       cur.data[0]);
1246		bcm_rpc_dump_pktlog_high(rpci);
1247#endif
1248		bcm_rpc_err_down(rpci);
1249		return NULL;
1250	}
1251
1252	/* See if we timed out or actually initialized */
1253	ASSERT(rpci->rtn_rpcbuf != NULL);	/* Make sure we've got the response */
1254	retb = rpci->rtn_rpcbuf;
1255	rpci->rtn_rpcbuf = NULL;
1256	rpci->wait_return = FALSE; /* Could have woken up by timeout */
1257	RPC_OSL_UNLOCK(rpci->rpc_osh);
1258
1259#ifdef BCMDBG_RPC	/* Since successful add the entry */
1260	if (RPC_PKTLOG_ON())
1261		bcm_rpc_add_entry_tx(rpci, &cur);
1262#endif
1263
1264	return retb;
1265}
1266#endif /* WLC_HIGH */
1267
1268#ifdef WLC_LOW
1269int
1270bcm_rpc_call_return(struct rpc_info *rpci, rpc_buf_t *b)
1271{
1272	rpc_header_t *header;
1273
1274	RPC_TRACE(("%s\n", __FUNCTION__));
1275
1276	header = (rpc_header_t *)bcm_rpc_buf_push(rpci->rpc_th, b, RPC_HDR_LEN);
1277
1278	rpc_header_prep(rpci, header, RPC_TYPE_RTN, 0);
1279
1280#ifdef BCMDBG_RPC
1281	if (RPC_PKTTRACE_ON()) {
1282#ifdef BCMDBG
1283		prhex("RPC Call Return Buf", bcm_rpc_buf_data(rpci->rpc_th, b),
1284		      bcm_rpc_buf_len_get(rpci->rpc_th, b));
1285#endif
1286	}
1287#endif	/* BCMDBG_RPC */
1288
1289	/* If the TX fails, it's sender's responsibilty */
1290	if (bcm_rpc_tp_send_callreturn(rpci->rpc_th, b)) {
1291		RPC_ERR(("%s: bcm_rpc_tp_buf_send() call failed\n", __FUNCTION__));
1292		bcm_rpc_err_down(rpci);
1293		return -1;
1294	}
1295
1296	rpci->rtn_trans++;
1297	return 0;
1298}
1299#endif /* WLC_LOW */
1300
1301/* This is expected to be called at DPC of the bus driver ? */
1302static void
1303bcm_rpc_buf_recv(void *context, rpc_buf_t *rpc_buf)
1304{
1305	uint xaction;
1306	struct rpc_info *rpci = (struct rpc_info *)context;
1307	mbool hdr_invalid = 0;
1308	ASSERT(rpci && rpci->rpc_th);
1309
1310	RPC_TRACE(("%s:\n", __FUNCTION__));
1311
1312	RPC_RO_LOCK(rpci);
1313
1314	/* Only if the header itself checks out , and only xaction does not */
1315	hdr_invalid = bcm_rpc_hdr_validate(rpci, rpc_buf, &xaction, TRUE);
1316
1317	if (mboolisset(hdr_invalid, HDR_XACTION_MISMATCH) &&
1318	    !mboolisset(hdr_invalid, ~HDR_XACTION_MISMATCH)) {
1319		rpc_buf_t *node = rpci->reorder_pktq;
1320		rpci->cnt_xidooo++;
1321		rpci->reorder_depth++;
1322		if (rpci->reorder_depth > rpci->reorder_depth_max)
1323			rpci->reorder_depth_max = rpci->reorder_depth;
1324
1325		/* Catch roll-over or retries */
1326		rpci->reorder_pktq = rpc_buf;
1327
1328		if (node != NULL)
1329			bcm_rpc_buf_next_set(rpci->rpc_th, rpc_buf, node);
1330
1331		/* if we have held too many packets, move past the hole */
1332		if (rpci->reorder_depth > BCM_RPC_REORDER_LIMIT) {
1333			uint16 next_xid = bcm_rpc_reorder_next_xid(rpci);
1334
1335			RPC_ERR(("%s: reorder queue depth %d, skipping ID 0x%x to 0x%x\n",
1336			         __FUNCTION__, rpci->reorder_depth,
1337			         rpci->oe_trans, next_xid));
1338			rpci->cnt_reorder_overflow++;
1339			rpci->cnt_rx_drop_hole += (uint)(next_xid - rpci->oe_trans);
1340			rpci->oe_trans = next_xid;
1341			bcm_rpc_process_reorder_queue(rpci);
1342		}
1343
1344		goto done;
1345	}
1346
1347	/* Bail out if failed */
1348	if (!bcm_rpc_buf_recv_inorder(rpci, rpc_buf, hdr_invalid))
1349		goto done;
1350
1351	/* see if we can make progress on the reorder backlog */
1352	bcm_rpc_process_reorder_queue(rpci);
1353
1354done:
1355	RPC_RO_UNLOCK(rpci);
1356}
1357
1358static void
1359bcm_rpc_process_reorder_queue(rpc_info_t *rpci)
1360{
1361	uint32 xaction;
1362	mbool hdr_invalid = 0;
1363
1364	while (rpci->reorder_pktq) {
1365		bool found = FALSE;
1366		rpc_buf_t *buf = rpci->reorder_pktq;
1367		rpc_buf_t *prev = rpci->reorder_pktq;
1368		while (buf != NULL) {
1369			rpc_buf_t *next = bcm_rpc_buf_next_get(rpci->rpc_th, buf);
1370			hdr_invalid = bcm_rpc_hdr_validate(rpci, buf, &xaction, FALSE);
1371
1372			if (!mboolisset(hdr_invalid, HDR_XACTION_MISMATCH)) {
1373				bcm_rpc_buf_next_set(rpci->rpc_th, buf, NULL);
1374
1375				if (buf == rpci->reorder_pktq)
1376					rpci->reorder_pktq = next;
1377				else
1378					bcm_rpc_buf_next_set(rpci->rpc_th, prev, next);
1379				rpci->reorder_depth--;
1380
1381				/* Bail out if failed */
1382				if (!bcm_rpc_buf_recv_inorder(rpci, buf, hdr_invalid))
1383					return;
1384
1385				buf = NULL;
1386				found = TRUE;
1387			} else {
1388				prev = buf;
1389				buf = next;
1390			}
1391		}
1392
1393		/* bail if not found */
1394		if (!found)
1395			break;
1396	}
1397
1398	return;
1399}
1400
1401static bool
1402bcm_rpc_buf_recv_inorder(rpc_info_t *rpci, rpc_buf_t *rpc_buf, mbool hdr_invalid)
1403{
1404	rpc_header_t header;
1405	rpc_acn_t acn = RPC_NULL;
1406
1407	ASSERT(rpci && rpci->rpc_th);
1408
1409	RPC_TRACE(("%s: got rpc_buf %p len %d data %p\n", __FUNCTION__,
1410	           rpc_buf, bcm_rpc_buf_len_get(rpci->rpc_th, rpc_buf),
1411	           bcm_rpc_buf_data(rpci->rpc_th, rpc_buf)));
1412
1413#ifdef BCMDBG_RPC
1414	if (RPC_PKTTRACE_ON()) {
1415#ifdef BCMDBG
1416		prhex("RPC Rx Buf", bcm_rpc_buf_data(rpci->rpc_th, rpc_buf),
1417		      bcm_rpc_buf_len_get(rpci->rpc_th, rpc_buf));
1418#endif
1419	}
1420#endif	/* BCMDBG_RPC */
1421
1422	header = bcm_rpc_header(rpci, rpc_buf);
1423
1424	RPC_OSL_LOCK(rpci->rpc_osh);
1425
1426	if (hdr_invalid) {
1427		RPC_ERR(("%s: bcm_rpc_hdr_validate failed on 0x%08x 0x%x\n", __FUNCTION__,
1428		         header, hdr_invalid));
1429#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) || defined(BCM_RPC_ROC)
1430		if (RPC_HDR_TYPE(header) != RPC_TYPE_RTN) {
1431#if defined(USBAP)
1432			PKTFRMNATIVE(rpci->osh, rpc_buf);
1433#endif
1434			PKTFREE(rpci->osh, rpc_buf, FALSE);
1435		}
1436#else
1437		bcm_rpc_tp_buf_free(rpci->rpc_th, rpc_buf);
1438#endif /* defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) || defined(BCM_RPC_ROC) */
1439		RPC_OSL_UNLOCK(rpci->rpc_osh);
1440		return FALSE;
1441	}
1442
1443	RPC_TRACE(("%s state:0x%x type:0x%x session:0x%x xacn:0x%x\n", __FUNCTION__, rpci->state,
1444		RPC_HDR_TYPE(header), RPC_HDR_SESSION(header), RPC_HDR_XACTION(header)));
1445
1446	if (bcm_rpc_buf_len_get(rpci->rpc_th, rpc_buf) > RPC_HDR_LEN)
1447		bcm_rpc_buf_pull(rpci->rpc_th, rpc_buf, RPC_HDR_LEN);
1448	else {
1449		/* if the head packet ends with rpc_hdr, free and advance to next packet in chain */
1450		rpc_buf_t *next_p;
1451
1452		ASSERT(bcm_rpc_buf_len_get(rpci->rpc_th, rpc_buf) == RPC_HDR_LEN);
1453		next_p = (rpc_buf_t*)PKTNEXT(rpci->osh, rpc_buf);
1454
1455		RPC_TRACE(("%s: following pkt chain to pkt %p len %d\n", __FUNCTION__,
1456		           next_p, bcm_rpc_buf_len_get(rpci->rpc_th, next_p)));
1457
1458		PKTSETNEXT(rpci->osh, rpc_buf, NULL);
1459		bcm_rpc_tp_buf_free(rpci->rpc_th, rpc_buf);
1460		rpc_buf = next_p;
1461		if (rpc_buf == NULL) {
1462			RPC_OSL_UNLOCK(rpci->rpc_osh);
1463			return FALSE;
1464		}
1465	}
1466
1467	switch (RPC_HDR_TYPE(header)) {
1468	case RPC_TYPE_MGN:
1469		acn = bcm_rpc_mgn_acn(rpci, rpc_buf);
1470		bcm_rpc_buf_pull(rpci->rpc_th, rpc_buf, RPC_ACN_LEN);
1471		RPC_TRACE(("Mgn: %x\n", acn));
1472		break;
1473	case RPC_TYPE_RTN:
1474#ifdef WLC_HIGH
1475		rpci->oe_rtn_trans = RPC_HDR_XACTION(header) + 1;
1476		break;
1477#endif
1478	case RPC_TYPE_DATA:
1479		rpci->oe_trans = RPC_HDR_XACTION(header) + 1;
1480		break;
1481	default:
1482		ASSERT(0);
1483	};
1484
1485#ifdef WLC_HIGH
1486	rpc_buf = bcm_rpc_buf_recv_high(rpci, RPC_HDR_TYPE(header), acn, rpc_buf);
1487#else
1488	rpc_buf = bcm_rpc_buf_recv_low(rpci, header, acn, rpc_buf);
1489#endif
1490	RPC_OSL_UNLOCK(rpci->rpc_osh);
1491
1492	if (rpc_buf)
1493		bcm_rpc_tp_buf_free(rpci->rpc_th, rpc_buf);
1494	return TRUE;
1495}
1496
1497#ifdef WLC_HIGH
1498static void
1499bcm_rpc_buf_recv_mgn_high(struct rpc_info *rpci, rpc_acn_t acn, rpc_buf_t *rpc_buf)
1500{
1501	rpc_rc_t reason = RPC_RC_ACK;
1502	uint32 version = 0;
1503
1504	RPC_ERR(("%s: Recvd:%x Version: 0x%x\nState: %x Session:%d\n", __FUNCTION__,
1505	         acn, rpci->version, rpci->state, rpci->session));
1506
1507#ifndef NDIS
1508	if (acn == RPC_CONNECT_ACK || acn == RPC_CONNECT_NACK) {
1509#else
1510	if (acn == RPC_HELLO || acn == RPC_CONNECT_ACK || acn == RPC_CONNECT_NACK) {
1511#endif
1512		version = bcm_rpc_mgn_ver(rpci, rpc_buf);
1513		bcm_rpc_buf_pull(rpci->rpc_th, rpc_buf, RPC_VER_LEN);
1514
1515		reason = bcm_rpc_mgn_reason(rpci, rpc_buf);
1516		bcm_rpc_buf_pull(rpci->rpc_th, rpc_buf, RPC_RC_LEN);
1517
1518		RPC_ERR(("%s: Reason: %x Dongle Version: 0x%x\n", __FUNCTION__,
1519		         reason, version));
1520	}
1521
1522	switch (acn) {
1523#ifdef NDIS
1524	case RPC_HELLO:
1525		/* If the original thread has not given up,
1526		 * then change the state and wake it up
1527		 */
1528		if (rpci->state == WAIT_HELLO) {
1529			rpci->state = HELLO_RECEIVED;
1530
1531			RPC_ERR(("%s: Hello Received!\n", __FUNCTION__));
1532			if (rpci->wait_init)
1533				RPC_OSL_WAKE(rpci->rpc_osh);
1534		}
1535		break;
1536#endif
1537	case RPC_CONNECT_ACK:
1538		/* If the original thread has not given up,
1539		 * then change the state and wake it up
1540		 */
1541		if (rpci->state != UNINITED) {
1542			rpci->state = ESTABLISHED;
1543			rpci->chipid = bcm_rpc_mgn_chipid(rpci, rpc_buf);
1544			bcm_rpc_buf_pull(rpci->rpc_th, rpc_buf, RPC_CHIPID_LEN);
1545
1546			RPC_ERR(("%s: Connected!\n", __FUNCTION__));
1547			if (rpci->wait_init)
1548				RPC_OSL_WAKE(rpci->rpc_osh);
1549		}
1550		ASSERT(reason != RPC_RC_VER_MISMATCH);
1551		break;
1552
1553	case RPC_CONNECT_NACK:
1554		/* Connect failed. Just bail out by waking the thread */
1555		RPC_ERR(("%s: Connect failed !!!\n", __FUNCTION__));
1556		if (rpci->wait_init)
1557			RPC_OSL_WAKE(rpci->rpc_osh);
1558		break;
1559
1560	case RPC_DOWN:
1561		RPC_OSL_UNLOCK(rpci->rpc_osh);
1562		bcm_rpc_down(rpci);
1563
1564		RPC_OSL_LOCK(rpci->rpc_osh);
1565		break;
1566
1567	default:
1568		ASSERT(0);
1569		break;
1570	}
1571}
1572
1573static rpc_buf_t *
1574bcm_rpc_buf_recv_high(struct rpc_info *rpci, rpc_type_t type, rpc_acn_t acn, rpc_buf_t *rpc_buf)
1575{
1576	RPC_TRACE(("%s: acn %d\n", __FUNCTION__, acn));
1577
1578	switch (type) {
1579	case RPC_TYPE_RTN:
1580		if (rpci->wait_return) {
1581			rpci->rtn_rpcbuf = rpc_buf;
1582			/* This buffer will be freed in bcm_rpc_tp_recv_rtn() */
1583			rpc_buf = NULL;
1584			RPC_OSL_WAKE(rpci->rpc_osh);
1585		} else if (rpci->state != DISCONNECTED)
1586			RPC_ERR(("%s: Received return buffer but no one waiting\n", __FUNCTION__));
1587		break;
1588
1589	case RPC_TYPE_MGN:
1590#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) || defined(BCM_RPC_ROC)
1591		bcm_rpc_buf_recv_mgn_high(rpci, acn, rpc_buf);
1592#if defined(USBAP)
1593		PKTFRMNATIVE(rpci->osh, rpc_buf);
1594#endif
1595		PKTFREE(rpci->osh, rpc_buf, FALSE);
1596		rpc_buf = NULL;
1597#else
1598		bcm_rpc_buf_recv_mgn_high(rpci, acn, rpc_buf);
1599#endif /* defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) || defined(BCM_RPC_ROC) */
1600		break;
1601
1602	case RPC_TYPE_DATA:
1603		ASSERT(rpci->state == ESTABLISHED);
1604#ifdef BCMDBG_RPC
1605		/* Prepare the current log entry but add only if the TX was successful */
1606		/* This is done here before DATA pointer gets modified */
1607		if (RPC_PKTLOG_ON()) {
1608			struct rpc_pktlog cur;
1609			bcm_rpc_prep_entry(rpci, rpc_buf, &cur, FALSE);
1610			bcm_rpc_add_entry_rx(rpci, &cur);
1611		}
1612#endif /* BCMDBG_RPC */
1613		if (rpci->dispatchcb) {
1614#if !defined(USBAP)
1615#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) || defined(BCM_RPC_ROC)
1616			PKTTONATIVE(rpci->osh, rpc_buf);
1617#endif /* BCM_RPC_NOCOPY || BCM_RPC_RXNOCOPY || BCM_RPC_ROC */
1618#endif /* !USBAP */
1619			(rpci->dispatchcb)(rpci->ctx, rpc_buf);
1620			/* The dispatch routine will free the buffer */
1621			rpc_buf = NULL;
1622		} else {
1623			RPC_ERR(("%s: no rpcq callback, drop the pkt\n", __FUNCTION__));
1624		}
1625		break;
1626
1627	default:
1628		ASSERT(0);
1629	}
1630
1631	return (rpc_buf);
1632}
1633#else
1634static void
1635bcm_rpc_buf_recv_mgn_low(struct rpc_info *rpci, uint8 session, rpc_acn_t acn, rpc_buf_t *rpc_buf)
1636{
1637	uint32 reason = 0;
1638	uint32 version = 0;
1639
1640	RPC_TRACE(("%s: Recvd:%x Version: 0x%x\nState: %x Session:%d\n", __FUNCTION__,
1641	         acn,
1642	         rpci->version, rpci->state, rpci->session));
1643
1644	if (acn == RPC_HELLO) {
1645		bcm_rpc_connect_resp(rpci, RPC_HELLO, RPC_RC_HELLO);
1646	} else if (acn == RPC_CONNECT || acn == RPC_RESET) {
1647		version = bcm_rpc_mgn_ver(rpci, rpc_buf);
1648
1649		RPC_ERR(("%s: Host Version: 0x%x\n", __FUNCTION__, version));
1650
1651		ASSERT(rpci->state != UNINITED);
1652
1653		if (version != rpci->version) {
1654			RPC_ERR(("RPC Establish failed due to version mismatch\n"));
1655			RPC_ERR(("Expected: 0x%x Got: 0x%x\n", rpci->version, version));
1656			RPC_ERR(("Connect failed !!!\n"));
1657
1658			rpci->state = WAIT_INITIALIZING;
1659			bcm_rpc_connect_resp(rpci, RPC_CONNECT_NACK, RPC_RC_VER_MISMATCH);
1660			return;
1661		}
1662
1663		/* When receiving CONNECT/RESET from HIGH, just
1664		 * resync to the HIGH's session and reset the transactions
1665		 */
1666		if ((acn == RPC_CONNECT) && (rpci->state == ESTABLISHED))
1667			reason = RPC_RC_RECONNECT;
1668
1669		rpci->session = session;
1670
1671		if (bcm_rpc_connect_resp(rpci, RPC_CONNECT_ACK, reason)) {
1672			/* call the resync callback if already established */
1673			if ((acn == RPC_CONNECT) && (rpci->state == ESTABLISHED) &&
1674			    (rpci->resync_cb)) {
1675				(rpci->resync_cb)(rpci->dnctx);
1676			}
1677			rpci->state = ESTABLISHED;
1678		} else {
1679			RPC_ERR(("%s: RPC Establish failed !!!\n", __FUNCTION__));
1680		}
1681
1682		RPC_ERR(("Connected Session:%x!\n", rpci->session));
1683		rpci->oe_trans = 0;
1684		rpci->trans = 0;
1685		rpci->rtn_trans = 0;
1686	} else if (acn == RPC_DOWN) {
1687		bcm_rpc_down(rpci);
1688	}
1689}
1690
1691static rpc_buf_t *
1692bcm_rpc_buf_recv_low(struct rpc_info *rpci, rpc_header_t header,
1693                     rpc_acn_t acn, rpc_buf_t *rpc_buf)
1694{
1695	switch (RPC_HDR_TYPE(header)) {
1696	case RPC_TYPE_MGN:
1697		bcm_rpc_buf_recv_mgn_low(rpci, RPC_HDR_SESSION(header), acn, rpc_buf);
1698		break;
1699
1700	case RPC_TYPE_RTN:
1701	case RPC_TYPE_DATA:
1702		ASSERT(rpci->state == ESTABLISHED);
1703#ifdef BCMDBG_RPC
1704		/* Prepare the current log entry but add only if the TX was successful */
1705		/* This is done here before DATA pointer gets modified */
1706		if (RPC_PKTLOG_ON()) {
1707			struct rpc_pktlog cur;
1708			bcm_rpc_prep_entry(rpci, rpc_buf, &cur, FALSE);
1709			bcm_rpc_add_entry_rx(rpci, &cur);
1710		}
1711#endif /* BCMDBG_RPC */
1712
1713		if (rpci->dispatchcb) {
1714			(rpci->dispatchcb)(rpci->ctx, rpc_buf);
1715			rpc_buf = NULL;
1716		} else {
1717			RPC_ERR(("%s: no rpcq callback, drop the pkt\n", __FUNCTION__));
1718			ASSERT(0);
1719		}
1720		break;
1721
1722	default:
1723		ASSERT(0);
1724	}
1725
1726	return (rpc_buf);
1727}
1728#endif /* WLC_HIGH */
1729
1730#ifdef BCMDBG_RPC
1731static void
1732bcm_rpc_pktlog_init(rpc_info_t *rpci)
1733{
1734	rpc_msg_level |= RPC_PKTLOG_VAL;
1735
1736	if (RPC_PKTLOG_ON()) {
1737		if ((rpci->send_log = MALLOC(rpci->osh,
1738			sizeof(struct rpc_pktlog) * RPC_PKTLOG_SIZE)) == NULL)
1739			goto err;
1740		bzero(rpci->send_log, sizeof(struct rpc_pktlog) * RPC_PKTLOG_SIZE);
1741		if ((rpci->recv_log = MALLOC(rpci->osh,
1742			sizeof(struct rpc_pktlog) * RPC_PKTLOG_SIZE)) == NULL)
1743			goto err;
1744		bzero(rpci->recv_log, sizeof(struct rpc_pktlog) * RPC_PKTLOG_SIZE);
1745		return;
1746	}
1747	RPC_ERR(("pktlog is on\n"));
1748err:
1749	bcm_rpc_pktlog_deinit(rpci);
1750}
1751
1752static void
1753bcm_rpc_pktlog_deinit(rpc_info_t *rpci)
1754{
1755	if (rpci->send_log) {
1756		MFREE(rpci->osh, rpci->send_log, sizeof(struct rpc_pktlog) * RPC_PKTLOG_SIZE);
1757		rpci->send_log = NULL;
1758	}
1759	if (rpci->recv_log) {
1760		MFREE(rpci->osh, rpci->recv_log, sizeof(struct rpc_pktlog) * RPC_PKTLOG_SIZE);
1761		rpci->recv_log = NULL;
1762	}
1763	rpc_msg_level &= ~RPC_PKTLOG_VAL; /* Turn off logging on failure */
1764}
1765
1766static struct rpc_pktlog *
1767bcm_rpc_prep_entry(struct rpc_info * rpci, rpc_buf_t *b, struct rpc_pktlog *cur, bool tx)
1768{
1769	bzero(cur, sizeof(struct rpc_pktlog));
1770	if (tx) {
1771		cur->trans = rpci->trans;
1772	} else {
1773		/* this function is called after match, so the oe_trans is already advanced */
1774		cur->trans = rpci->oe_trans - 1;
1775	}
1776	cur->len = bcm_rpc_buf_len_get(rpci->rpc_th, b);
1777	bcopy(bcm_rpc_buf_data(rpci->rpc_th, b), cur->data, RPC_PKTLOG_DATASIZE);
1778	return cur;
1779}
1780
1781static void
1782bcm_rpc_add_entry_tx(struct rpc_info * rpci, struct rpc_pktlog *cur)
1783{
1784	RPC_OSL_LOCK(rpci->rpc_osh);
1785	bcopy(cur, &rpci->send_log[rpci->send_log_idx], sizeof(struct rpc_pktlog));
1786	rpci->send_log_idx = (rpci->send_log_idx + 1) % RPC_PKTLOG_SIZE;
1787
1788	if (rpci->send_log_num < RPC_PKTLOG_SIZE)
1789		rpci->send_log_num++;
1790
1791	RPC_OSL_UNLOCK(rpci->rpc_osh);
1792}
1793
1794static void
1795bcm_rpc_add_entry_rx(struct rpc_info * rpci, struct rpc_pktlog *cur)
1796{
1797	bcopy(cur, &rpci->recv_log[rpci->recv_log_idx], sizeof(struct rpc_pktlog));
1798	rpci->recv_log_idx = (rpci->recv_log_idx + 1) % RPC_PKTLOG_SIZE;
1799
1800	if (rpci->recv_log_num < RPC_PKTLOG_SIZE)
1801		rpci->recv_log_num++;
1802}
1803#endif /* BCMDBG_RPC */
1804
1805#ifdef WLC_HIGH
1806int
1807bcm_rpc_dump(rpc_info_t *rpci, struct bcmstrbuf *b)
1808{
1809#ifdef BCMDBG
1810
1811	bcm_bprintf(b, "\nHOST rpc dump:\n");
1812	RPC_OSL_LOCK(rpci->rpc_osh);
1813	bcm_bprintf(b, "Version: 0x%x State: %x\n", rpci->version, rpci->state);
1814	bcm_bprintf(b, "session %d trans 0x%x oe_trans 0x%x rtn_trans 0x%x oe_rtn_trans 0x%x\n",
1815		rpci->session, rpci->trans, rpci->oe_trans,
1816		rpci->rtn_trans, rpci->oe_rtn_trans);
1817	bcm_bprintf(b, "xactionID out of order %d\n", rpci->cnt_xidooo);
1818	bcm_bprintf(b, "reorder queue depth %u first ID 0x%x, max depth %u, tossthreshold %u\n",
1819		rpci->reorder_depth, bcm_rpc_reorder_next_xid(rpci), rpci->reorder_depth_max,
1820		BCM_RPC_REORDER_LIMIT);
1821
1822	RPC_OSL_UNLOCK(rpci->rpc_osh);
1823	return bcm_rpc_tp_dump(rpci->rpc_th, b);
1824#else
1825	return 0;
1826#endif	/* BCMDBG */
1827}
1828
1829int
1830bcm_rpc_pktlog_get(struct rpc_info *rpci, uint32 *buf, uint buf_size, bool send)
1831{
1832	int ret = -1;
1833
1834#ifdef BCMDBG_RPC
1835	int start, i, tot;
1836
1837	/* Clear the whole buffer */
1838	bzero(buf, buf_size);
1839	RPC_OSL_LOCK(rpci->rpc_osh);
1840	if (send) {
1841		ret = rpci->send_log_num;
1842		if (ret < RPC_PKTLOG_SIZE)
1843			start = 0;
1844		else
1845			start = (rpci->send_log_idx + 1) % RPC_PKTLOG_SIZE;
1846	} else {
1847		ret = rpci->recv_log_num;
1848		if (ret < RPC_PKTLOG_SIZE)
1849			start = 0;
1850		else
1851			start = (rpci->recv_log_idx + 1) % RPC_PKTLOG_SIZE;
1852	}
1853
1854	/* Return only first byte */
1855	if (buf_size < (uint) (ret * RPC_PKTLOG_RD_LEN)) {
1856		RPC_OSL_UNLOCK(rpci->rpc_osh);
1857		RPC_ERR(("%s buf too short\n", __FUNCTION__));
1858		return BCME_BUFTOOSHORT;
1859	}
1860
1861	if (ret == 0) {
1862		RPC_OSL_UNLOCK(rpci->rpc_osh);
1863		RPC_ERR(("%s no record\n", __FUNCTION__));
1864		return ret;
1865	}
1866
1867	tot = ret;
1868	for (i = 0; tot > 0; tot--, i++) {
1869		if (send) {
1870			buf[i*RPC_PKTLOG_RD_LEN] = rpci->send_log[start].data[0];
1871			buf[i*RPC_PKTLOG_RD_LEN+1] = rpci->send_log[start].trans;
1872			buf[i*RPC_PKTLOG_RD_LEN+2] = rpci->send_log[start].len;
1873			start++;
1874		} else {
1875			buf[i*RPC_PKTLOG_RD_LEN] = rpci->recv_log[start].data[0];
1876			buf[i*RPC_PKTLOG_RD_LEN+1] = rpci->recv_log[start].trans;
1877			buf[i*RPC_PKTLOG_RD_LEN+2] = rpci->recv_log[start].len;
1878			start++;
1879		}
1880		start = (start % RPC_PKTLOG_SIZE);
1881	}
1882	RPC_OSL_UNLOCK(rpci->rpc_osh);
1883
1884#endif	/* BCMDBG_RPC */
1885	return ret;
1886}
1887#endif	/* WLC_HIGH */
1888
1889
1890#ifdef BCMDBG_RPC
1891
1892static void
1893_bcm_rpc_dump_pktlog(rpc_info_t *rpci)
1894{
1895	int ret = -1;
1896	int start, i;
1897
1898	RPC_OSL_LOCK(rpci->rpc_osh);
1899	ret = rpci->send_log_num;
1900	if (ret == 0)
1901		goto done;
1902
1903	if (ret < RPC_PKTLOG_SIZE)
1904		start = 0;
1905	else
1906		start = (rpci->send_log_idx + 1) % RPC_PKTLOG_SIZE;
1907
1908	printf("send %d\n", ret);
1909	for (i = 0; ret > 0; ret--, i++) {
1910		printf("[%d] trans 0x%x len %d data 0x%x\n", i,
1911		       rpci->send_log[start].trans,
1912		       rpci->send_log[start].len,
1913		       rpci->send_log[start].data[0]);
1914		start++;
1915		start = (start % RPC_PKTLOG_SIZE);
1916	}
1917
1918	ret = rpci->recv_log_num;
1919	if (ret == 0)
1920		goto done;
1921
1922	if (ret < RPC_PKTLOG_SIZE)
1923		start = 0;
1924	else
1925		start = (rpci->recv_log_idx + 1) % RPC_PKTLOG_SIZE;
1926
1927	printf("recv %d\n", ret);
1928	for (i = 0; ret > 0; ret--, i++) {
1929		printf("[%d] trans 0x%x len %d data 0x%x\n", i,
1930		       rpci->recv_log[start].trans,
1931		       rpci->recv_log[start].len,
1932		       rpci->recv_log[start].data[0]);
1933		start++;
1934		start = (start % RPC_PKTLOG_SIZE);
1935	}
1936
1937done:
1938	RPC_OSL_UNLOCK(rpci->rpc_osh);
1939}
1940
1941#ifdef WLC_HIGH
1942static void
1943bcm_rpc_dump_pktlog_high(rpc_info_t *rpci)
1944{
1945	printf("HOST rpc pktlog dump:\n");
1946	_bcm_rpc_dump_pktlog(rpci);
1947}
1948
1949#else
1950
1951static void
1952bcm_rpc_dump_pktlog_low(uint32 arg, uint argc, char *argv[])
1953{
1954	rpc_info_t *rpci;
1955
1956	rpci = (rpc_info_t *)(uintptr)arg;
1957
1958	printf("DONGLE rpc pktlog dump:\n");
1959	_bcm_rpc_dump_pktlog(rpci);
1960}
1961#endif /* WLC_HIGH */
1962#endif /* BCMDBG_RPC */
1963
1964#ifdef WLC_LOW
1965static void
1966bcm_rpc_dump_state(uint32 arg, uint argc, char *argv[])
1967#else
1968static void
1969bcm_rpc_fatal_dump(void *arg)
1970#endif
1971{
1972#ifdef BCMDBG_RPC
1973#ifndef WLC_LOW
1974	struct bcmstrbuf b;
1975	char *buf, *t, *p;
1976	uint size = 1024*1024;
1977#endif /* WLC_LOW */
1978#endif /* BCMDBG_RPC */
1979	rpc_info_t *rpci = (rpc_info_t *)(uintptr)arg;
1980	printf("DONGLE rpc dump:\n");
1981	printf("Version: 0x%x State: %x\n", rpci->version, rpci->state);
1982	printf("session %d trans 0x%x oe_trans 0x%x rtn_trans 0x%x\n",
1983	       rpci->session, rpci->trans, rpci->oe_trans,
1984	       rpci->rtn_trans);
1985	printf("xactionID out of order %u reorder ovfl %u dropped hole %u\n",
1986	       rpci->cnt_xidooo, rpci->cnt_reorder_overflow, rpci->cnt_rx_drop_hole);
1987	printf("reorder queue depth %u first ID 0x%x reorder_q_depth_max %d, tossthreshold %u\n",
1988	       rpci->reorder_depth, bcm_rpc_reorder_next_xid(rpci), rpci->reorder_depth_max,
1989	       BCM_RPC_REORDER_LIMIT);
1990
1991#ifdef BCMDBG_RPC
1992#ifdef WLC_LOW
1993	bcm_rpc_tp_dump(rpci->rpc_th);
1994#else
1995	buf = (char *)MALLOC(rpci->osh, size);
1996
1997	if (buf != NULL) {
1998		bzero(buf, size);
1999		bcm_binit(&b, buf, size);
2000		bcm_rpc_tp_dump(rpci->rpc_th, &b);
2001		p = buf;
2002		while (p != NULL) {
2003			while ((((uintptr)p) < (((uintptr)buf) + size)) && (*p == '\0'))
2004					p++;
2005			if (((uintptr)p) >= (((uintptr)buf) + size))
2006					break;
2007			if ((t = strchr(p, '\n')) != NULL) {
2008				*t++ = '\0';
2009				printf("%s\n", p);
2010			}
2011
2012			p = t;
2013		}
2014		MFREE(rpci->osh, buf, size);
2015	}
2016#endif /* WLC_LOW */
2017#endif /* BCMDBG_RPC */
2018}
2019
2020void
2021bcm_rpc_msglevel_set(struct rpc_info *rpci, uint16 msglevel, bool high)
2022{
2023#ifdef WLC_HIGH
2024	ASSERT(high == TRUE);
2025	/* high 8 bits are for rpc, low 8 bits are for tp */
2026	rpc_msg_level = msglevel >> 8;
2027	bcm_rpc_tp_msglevel_set(rpci->rpc_th, (uint8)(msglevel & 0xff), TRUE);
2028	return;
2029#else
2030	ASSERT(high == FALSE);
2031	/* high 8 bits are for rpc, low 8 bits are for tp */
2032	rpc_msg_level = msglevel >> 8;
2033	bcm_rpc_tp_msglevel_set(rpci->rpc_th, (uint8)(msglevel & 0xff), FALSE);
2034	return;
2035#endif
2036}
2037
2038void
2039bcm_rpc_dngl_suspend_enable_set(rpc_info_t *rpc, uint32 val)
2040{
2041	rpc->suspend_enable = val;
2042}
2043
2044void
2045bcm_rpc_dngl_suspend_enable_get(rpc_info_t *rpc, uint32 *pval)
2046{
2047	*pval = rpc->suspend_enable;
2048}
2049