• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/scsi/bfa/
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfa_ioc.h>
20#include <bfa_fwimg_priv.h>
21#include <cna/bfa_cna_trcmod.h>
22#include <cs/bfa_debug.h>
23#include <bfi/bfi_ioc.h>
24#include <bfi/bfi_ctreg.h>
25#include <aen/bfa_aen_ioc.h>
26#include <aen/bfa_aen.h>
27#include <log/bfa_log_hal.h>
28#include <defs/bfa_defs_pci.h>
29
30BFA_TRC_FILE(CNA, IOC);
31
32/**
33 * IOC local definitions
34 */
35#define BFA_IOC_TOV		2000	/* msecs */
36#define BFA_IOC_HWSEM_TOV       500     /* msecs */
37#define BFA_IOC_HB_TOV          500     /* msecs */
38#define BFA_IOC_HWINIT_MAX      2
39#define BFA_IOC_FWIMG_MINSZ     (16 * 1024)
40#define BFA_IOC_TOV_RECOVER      BFA_IOC_HB_TOV
41
42#define bfa_ioc_timer_start(__ioc)					\
43	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
44			bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
45#define bfa_ioc_timer_stop(__ioc)   bfa_timer_stop(&(__ioc)->ioc_timer)
46
47#define BFA_DBG_FWTRC_ENTS	(BFI_IOC_TRC_ENTS)
48#define BFA_DBG_FWTRC_LEN					\
49	(BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) +	\
50	 (sizeof(struct bfa_trc_mod_s) -			\
51	  BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
52#define BFA_DBG_FWTRC_OFF(_fn)	(BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
53
54/**
55 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
56 */
57
58#define bfa_ioc_firmware_lock(__ioc)                    \
59			((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
60#define bfa_ioc_firmware_unlock(__ioc)                  \
61			((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
62#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
63#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
64#define bfa_ioc_notify_hbfail(__ioc)                    \
65			((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
66#define bfa_ioc_is_optrom(__ioc)        \
67	(bfi_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
68
69bfa_boolean_t   bfa_auto_recover = BFA_TRUE;
70
71/*
72 * forward declarations
73 */
74static void     bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
75static void     bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc);
76static void     bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
77static void     bfa_ioc_timeout(void *ioc);
78static void     bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
79static void     bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
80static void     bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
81static void     bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
82static void     bfa_ioc_hb_stop(struct bfa_ioc_s *ioc);
83static void     bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force);
84static void     bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
85static void     bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
86static void     bfa_ioc_recover(struct bfa_ioc_s *ioc);
87static void	bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
88static void     bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
89static void     bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
90
91/**
92 *  bfa_ioc_sm
93 */
94
95/**
96 * IOC state machine events
97 */
98enum ioc_event {
99	IOC_E_ENABLE = 1,	/*  IOC enable request */
100	IOC_E_DISABLE = 2,	/*  IOC disable request */
101	IOC_E_TIMEOUT = 3,	/*  f/w response timeout */
102	IOC_E_FWREADY = 4,	/*  f/w initialization done */
103	IOC_E_FWRSP_GETATTR = 5,	/*  IOC get attribute response */
104	IOC_E_FWRSP_ENABLE = 6,	/*  enable f/w response */
105	IOC_E_FWRSP_DISABLE = 7,	/*  disable f/w response */
106	IOC_E_HBFAIL = 8,	/*  heartbeat failure */
107	IOC_E_HWERROR = 9,	/*  hardware error interrupt */
108	IOC_E_SEMLOCKED = 10,	/*  h/w semaphore is locked */
109	IOC_E_DETACH = 11,	/*  driver detach cleanup */
110};
111
112bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
113bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc_s, enum ioc_event);
114bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc_s, enum ioc_event);
115bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc_s, enum ioc_event);
116bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc_s, enum ioc_event);
117bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
118bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
119bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
120bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc_s, enum ioc_event);
121bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc_s, enum ioc_event);
122bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
123bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
124
125static struct bfa_sm_table_s ioc_sm_table[] = {
126	{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
127	{BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH},
128	{BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH},
129	{BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT},
130	{BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT},
131	{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT},
132	{BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
133	{BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
134	{BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
135	{BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL},
136	{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
137	{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
138};
139
140/**
141 * Reset entry actions -- initialize state machine
142 */
143static void
144bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
145{
146	ioc->retry_count = 0;
147	ioc->auto_recover = bfa_auto_recover;
148}
149
150/**
151 * Beginning state. IOC is in reset state.
152 */
153static void
154bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
155{
156	bfa_trc(ioc, event);
157
158	switch (event) {
159	case IOC_E_ENABLE:
160		bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
161		break;
162
163	case IOC_E_DISABLE:
164		bfa_ioc_disable_comp(ioc);
165		break;
166
167	case IOC_E_DETACH:
168		break;
169
170	default:
171		bfa_sm_fault(ioc, event);
172	}
173}
174
175/**
176 * Semaphore should be acquired for version check.
177 */
178static void
179bfa_ioc_sm_fwcheck_entry(struct bfa_ioc_s *ioc)
180{
181	bfa_ioc_hw_sem_get(ioc);
182}
183
184/**
185 * Awaiting h/w semaphore to continue with version check.
186 */
187static void
188bfa_ioc_sm_fwcheck(struct bfa_ioc_s *ioc, enum ioc_event event)
189{
190	bfa_trc(ioc, event);
191
192	switch (event) {
193	case IOC_E_SEMLOCKED:
194		if (bfa_ioc_firmware_lock(ioc)) {
195			ioc->retry_count = 0;
196			bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
197		} else {
198			bfa_ioc_hw_sem_release(ioc);
199			bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch);
200		}
201		break;
202
203	case IOC_E_DISABLE:
204		bfa_ioc_disable_comp(ioc);
205		/*
206		 * fall through
207		 */
208
209	case IOC_E_DETACH:
210		bfa_ioc_hw_sem_get_cancel(ioc);
211		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
212		break;
213
214	case IOC_E_FWREADY:
215		break;
216
217	default:
218		bfa_sm_fault(ioc, event);
219	}
220}
221
222/**
223 * Notify enable completion callback and generate mismatch AEN.
224 */
225static void
226bfa_ioc_sm_mismatch_entry(struct bfa_ioc_s *ioc)
227{
228	/**
229	 * Provide enable completion callback and AEN notification only once.
230	 */
231	if (ioc->retry_count == 0) {
232		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
233		bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
234	}
235	ioc->retry_count++;
236	bfa_ioc_timer_start(ioc);
237}
238
239/**
240 * Awaiting firmware version match.
241 */
242static void
243bfa_ioc_sm_mismatch(struct bfa_ioc_s *ioc, enum ioc_event event)
244{
245	bfa_trc(ioc, event);
246
247	switch (event) {
248	case IOC_E_TIMEOUT:
249		bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
250		break;
251
252	case IOC_E_DISABLE:
253		bfa_ioc_disable_comp(ioc);
254		/*
255		 * fall through
256		 */
257
258	case IOC_E_DETACH:
259		bfa_ioc_timer_stop(ioc);
260		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
261		break;
262
263	case IOC_E_FWREADY:
264		break;
265
266	default:
267		bfa_sm_fault(ioc, event);
268	}
269}
270
271/**
272 * Request for semaphore.
273 */
274static void
275bfa_ioc_sm_semwait_entry(struct bfa_ioc_s *ioc)
276{
277	bfa_ioc_hw_sem_get(ioc);
278}
279
280/**
281 * Awaiting semaphore for h/w initialzation.
282 */
283static void
284bfa_ioc_sm_semwait(struct bfa_ioc_s *ioc, enum ioc_event event)
285{
286	bfa_trc(ioc, event);
287
288	switch (event) {
289	case IOC_E_SEMLOCKED:
290		ioc->retry_count = 0;
291		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
292		break;
293
294	case IOC_E_DISABLE:
295		bfa_ioc_hw_sem_get_cancel(ioc);
296		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
297		break;
298
299	default:
300		bfa_sm_fault(ioc, event);
301	}
302}
303
304
305static void
306bfa_ioc_sm_hwinit_entry(struct bfa_ioc_s *ioc)
307{
308	bfa_ioc_timer_start(ioc);
309	bfa_ioc_reset(ioc, BFA_FALSE);
310}
311
312/**
313 * Hardware is being initialized. Interrupts are enabled.
314 * Holding hardware semaphore lock.
315 */
316static void
317bfa_ioc_sm_hwinit(struct bfa_ioc_s *ioc, enum ioc_event event)
318{
319	bfa_trc(ioc, event);
320
321	switch (event) {
322	case IOC_E_FWREADY:
323		bfa_ioc_timer_stop(ioc);
324		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
325		break;
326
327	case IOC_E_HWERROR:
328		bfa_ioc_timer_stop(ioc);
329		/*
330		 * fall through
331		 */
332
333	case IOC_E_TIMEOUT:
334		ioc->retry_count++;
335		if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
336			bfa_ioc_timer_start(ioc);
337			bfa_ioc_reset(ioc, BFA_TRUE);
338			break;
339		}
340
341		bfa_ioc_hw_sem_release(ioc);
342		bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
343		break;
344
345	case IOC_E_DISABLE:
346		bfa_ioc_hw_sem_release(ioc);
347		bfa_ioc_timer_stop(ioc);
348		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
349		break;
350
351	default:
352		bfa_sm_fault(ioc, event);
353	}
354}
355
356
357static void
358bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
359{
360	bfa_ioc_timer_start(ioc);
361	bfa_ioc_send_enable(ioc);
362}
363
364/**
365 * Host IOC function is being enabled, awaiting response from firmware.
366 * Semaphore is acquired.
367 */
368static void
369bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
370{
371	bfa_trc(ioc, event);
372
373	switch (event) {
374	case IOC_E_FWRSP_ENABLE:
375		bfa_ioc_timer_stop(ioc);
376		bfa_ioc_hw_sem_release(ioc);
377		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
378		break;
379
380	case IOC_E_HWERROR:
381		bfa_ioc_timer_stop(ioc);
382		/*
383		 * fall through
384		 */
385
386	case IOC_E_TIMEOUT:
387		ioc->retry_count++;
388		if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
389			bfa_reg_write(ioc->ioc_regs.ioc_fwstate,
390				      BFI_IOC_UNINIT);
391			bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
392			break;
393		}
394
395		bfa_ioc_hw_sem_release(ioc);
396		bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
397		break;
398
399	case IOC_E_DISABLE:
400		bfa_ioc_timer_stop(ioc);
401		bfa_ioc_hw_sem_release(ioc);
402		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
403		break;
404
405	case IOC_E_FWREADY:
406		bfa_ioc_send_enable(ioc);
407		break;
408
409	default:
410		bfa_sm_fault(ioc, event);
411	}
412}
413
414
415static void
416bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
417{
418	bfa_ioc_timer_start(ioc);
419	bfa_ioc_send_getattr(ioc);
420}
421
422/**
423 * IOC configuration in progress. Timer is active.
424 */
425static void
426bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
427{
428	bfa_trc(ioc, event);
429
430	switch (event) {
431	case IOC_E_FWRSP_GETATTR:
432		bfa_ioc_timer_stop(ioc);
433		bfa_ioc_check_attr_wwns(ioc);
434		bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
435		break;
436
437	case IOC_E_HWERROR:
438		bfa_ioc_timer_stop(ioc);
439		/*
440		 * fall through
441		 */
442
443	case IOC_E_TIMEOUT:
444		bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
445		break;
446
447	case IOC_E_DISABLE:
448		bfa_ioc_timer_stop(ioc);
449		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
450		break;
451
452	default:
453		bfa_sm_fault(ioc, event);
454	}
455}
456
457
458static void
459bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
460{
461	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
462	bfa_ioc_hb_monitor(ioc);
463	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
464}
465
466static void
467bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
468{
469	bfa_trc(ioc, event);
470
471	switch (event) {
472	case IOC_E_ENABLE:
473		break;
474
475	case IOC_E_DISABLE:
476		bfa_ioc_hb_stop(ioc);
477		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
478		break;
479
480	case IOC_E_HWERROR:
481	case IOC_E_FWREADY:
482		/**
483		 * Hard error or IOC recovery by other function.
484		 * Treat it same as heartbeat failure.
485		 */
486		bfa_ioc_hb_stop(ioc);
487		/*
488		 * !!! fall through !!!
489		 */
490
491	case IOC_E_HBFAIL:
492		bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail);
493		break;
494
495	default:
496		bfa_sm_fault(ioc, event);
497	}
498}
499
500
501static void
502bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
503{
504	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
505	bfa_ioc_timer_start(ioc);
506	bfa_ioc_send_disable(ioc);
507}
508
509/**
510 * IOC is being disabled
511 */
512static void
513bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
514{
515	bfa_trc(ioc, event);
516
517	switch (event) {
518	case IOC_E_FWRSP_DISABLE:
519		bfa_ioc_timer_stop(ioc);
520		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
521		break;
522
523	case IOC_E_HWERROR:
524		bfa_ioc_timer_stop(ioc);
525		/*
526		 * !!! fall through !!!
527		 */
528
529	case IOC_E_TIMEOUT:
530		bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
531		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
532		break;
533
534	default:
535		bfa_sm_fault(ioc, event);
536	}
537}
538
539/**
540 * IOC disable completion entry.
541 */
542static void
543bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
544{
545	bfa_ioc_disable_comp(ioc);
546}
547
548static void
549bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
550{
551	bfa_trc(ioc, event);
552
553	switch (event) {
554	case IOC_E_ENABLE:
555		bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
556		break;
557
558	case IOC_E_DISABLE:
559		ioc->cbfn->disable_cbfn(ioc->bfa);
560		break;
561
562	case IOC_E_FWREADY:
563		break;
564
565	case IOC_E_DETACH:
566		bfa_ioc_firmware_unlock(ioc);
567		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
568		break;
569
570	default:
571		bfa_sm_fault(ioc, event);
572	}
573}
574
575
576static void
577bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc)
578{
579	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
580	bfa_ioc_timer_start(ioc);
581}
582
583/**
584 * Hardware initialization failed.
585 */
586static void
587bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event)
588{
589	bfa_trc(ioc, event);
590
591	switch (event) {
592	case IOC_E_DISABLE:
593		bfa_ioc_timer_stop(ioc);
594		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
595		break;
596
597	case IOC_E_DETACH:
598		bfa_ioc_timer_stop(ioc);
599		bfa_ioc_firmware_unlock(ioc);
600		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
601		break;
602
603	case IOC_E_TIMEOUT:
604		bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
605		break;
606
607	default:
608		bfa_sm_fault(ioc, event);
609	}
610}
611
612
613static void
614bfa_ioc_sm_hbfail_entry(struct bfa_ioc_s *ioc)
615{
616	struct list_head *qe;
617	struct bfa_ioc_hbfail_notify_s *notify;
618
619	/**
620	 * Mark IOC as failed in hardware and stop firmware.
621	 */
622	bfa_ioc_lpu_stop(ioc);
623	bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
624
625	/**
626	 * Notify other functions on HB failure.
627	 */
628	bfa_ioc_notify_hbfail(ioc);
629
630	/**
631	 * Notify driver and common modules registered for notification.
632	 */
633	ioc->cbfn->hbfail_cbfn(ioc->bfa);
634	list_for_each(qe, &ioc->hb_notify_q) {
635		notify = (struct bfa_ioc_hbfail_notify_s *)qe;
636		notify->cbfn(notify->cbarg);
637	}
638
639	/**
640	 * Flush any queued up mailbox requests.
641	 */
642	bfa_ioc_mbox_hbfail(ioc);
643	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
644
645	/**
646	 * Trigger auto-recovery after a delay.
647	 */
648	if (ioc->auto_recover) {
649		bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer,
650				bfa_ioc_timeout, ioc, BFA_IOC_TOV_RECOVER);
651	}
652}
653
654/**
655 * IOC heartbeat failure.
656 */
657static void
658bfa_ioc_sm_hbfail(struct bfa_ioc_s *ioc, enum ioc_event event)
659{
660	bfa_trc(ioc, event);
661
662	switch (event) {
663
664	case IOC_E_ENABLE:
665		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
666		break;
667
668	case IOC_E_DISABLE:
669		if (ioc->auto_recover)
670			bfa_ioc_timer_stop(ioc);
671		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
672		break;
673
674	case IOC_E_TIMEOUT:
675		bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
676		break;
677
678	case IOC_E_FWREADY:
679		/**
680		 * Recovery is already initiated by other function.
681		 */
682		break;
683
684	case IOC_E_HWERROR:
685		/*
686		 * HB failure notification, ignore.
687		 */
688		break;
689
690	default:
691		bfa_sm_fault(ioc, event);
692	}
693}
694
695
696
697/**
698 *  bfa_ioc_pvt BFA IOC private functions
699 */
700
701static void
702bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
703{
704	struct list_head *qe;
705	struct bfa_ioc_hbfail_notify_s *notify;
706
707	ioc->cbfn->disable_cbfn(ioc->bfa);
708
709	/**
710	 * Notify common modules registered for notification.
711	 */
712	list_for_each(qe, &ioc->hb_notify_q) {
713		notify = (struct bfa_ioc_hbfail_notify_s *)qe;
714		notify->cbfn(notify->cbarg);
715	}
716}
717
718void
719bfa_ioc_sem_timeout(void *ioc_arg)
720{
721	struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg;
722
723	bfa_ioc_hw_sem_get(ioc);
724}
725
726bfa_boolean_t
727bfa_ioc_sem_get(bfa_os_addr_t sem_reg)
728{
729	u32 r32;
730	int cnt = 0;
731#define BFA_SEM_SPINCNT 3000
732
733	r32 = bfa_reg_read(sem_reg);
734
735	while (r32 && (cnt < BFA_SEM_SPINCNT)) {
736		cnt++;
737		bfa_os_udelay(2);
738		r32 = bfa_reg_read(sem_reg);
739	}
740
741	if (r32 == 0)
742		return BFA_TRUE;
743
744	bfa_assert(cnt < BFA_SEM_SPINCNT);
745	return BFA_FALSE;
746}
747
748void
749bfa_ioc_sem_release(bfa_os_addr_t sem_reg)
750{
751	bfa_reg_write(sem_reg, 1);
752}
753
754static void
755bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
756{
757	u32        r32;
758
759	/**
760	 * First read to the semaphore register will return 0, subsequent reads
761	 * will return 1. Semaphore is released by writing 1 to the register
762	 */
763	r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
764	if (r32 == 0) {
765		bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED);
766		return;
767	}
768
769	bfa_timer_begin(ioc->timer_mod, &ioc->sem_timer, bfa_ioc_sem_timeout,
770			ioc, BFA_IOC_HWSEM_TOV);
771}
772
773void
774bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc)
775{
776	bfa_reg_write(ioc->ioc_regs.ioc_sem_reg, 1);
777}
778
779static void
780bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc)
781{
782	bfa_timer_stop(&ioc->sem_timer);
783}
784
785/**
786 * Initialize LPU local memory (aka secondary memory / SRAM)
787 */
788static void
789bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
790{
791	u32        pss_ctl;
792	int             i;
793#define PSS_LMEM_INIT_TIME  10000
794
795	pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
796	pss_ctl &= ~__PSS_LMEM_RESET;
797	pss_ctl |= __PSS_LMEM_INIT_EN;
798	pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
799	bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
800
801	/**
802	 * wait for memory initialization to be complete
803	 */
804	i = 0;
805	do {
806		pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
807		i++;
808	} while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
809
810	/**
811	 * If memory initialization is not successful, IOC timeout will catch
812	 * such failures.
813	 */
814	bfa_assert(pss_ctl & __PSS_LMEM_INIT_DONE);
815	bfa_trc(ioc, pss_ctl);
816
817	pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
818	bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
819}
820
821static void
822bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
823{
824	u32        pss_ctl;
825
826	/**
827	 * Take processor out of reset.
828	 */
829	pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
830	pss_ctl &= ~__PSS_LPU0_RESET;
831
832	bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
833}
834
835static void
836bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
837{
838	u32        pss_ctl;
839
840	/**
841	 * Put processors in reset.
842	 */
843	pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
844	pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
845
846	bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
847}
848
849/**
850 * Get driver and firmware versions.
851 */
852void
853bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
854{
855	u32        pgnum, pgoff;
856	u32        loff = 0;
857	int             i;
858	u32       *fwsig = (u32 *) fwhdr;
859
860	pgnum = bfa_ioc_smem_pgnum(ioc, loff);
861	pgoff = bfa_ioc_smem_pgoff(ioc, loff);
862	bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
863
864	for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
865	     i++) {
866		fwsig[i] = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
867		loff += sizeof(u32);
868	}
869}
870
871/**
872 * Returns TRUE if same.
873 */
874bfa_boolean_t
875bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
876{
877	struct bfi_ioc_image_hdr_s *drv_fwhdr;
878	int             i;
879
880	drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
881			bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
882
883	for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
884		if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
885			bfa_trc(ioc, i);
886			bfa_trc(ioc, fwhdr->md5sum[i]);
887			bfa_trc(ioc, drv_fwhdr->md5sum[i]);
888			return BFA_FALSE;
889		}
890	}
891
892	bfa_trc(ioc, fwhdr->md5sum[0]);
893	return BFA_TRUE;
894}
895
896/**
897 * Return true if current running version is valid. Firmware signature and
898 * execution context (driver/bios) must match.
899 */
900static          bfa_boolean_t
901bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc)
902{
903	struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
904
905	/**
906	 * If bios/efi boot (flash based) -- return true
907	 */
908	if (bfa_ioc_is_optrom(ioc))
909		return BFA_TRUE;
910
911	bfa_ioc_fwver_get(ioc, &fwhdr);
912	drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
913			bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
914
915
916	if (fwhdr.signature != drv_fwhdr->signature) {
917		bfa_trc(ioc, fwhdr.signature);
918		bfa_trc(ioc, drv_fwhdr->signature);
919		return BFA_FALSE;
920	}
921
922	if (fwhdr.exec != drv_fwhdr->exec) {
923		bfa_trc(ioc, fwhdr.exec);
924		bfa_trc(ioc, drv_fwhdr->exec);
925		return BFA_FALSE;
926	}
927
928	return bfa_ioc_fwver_cmp(ioc, &fwhdr);
929}
930
931/**
932 * Conditionally flush any pending message from firmware at start.
933 */
934static void
935bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
936{
937	u32        r32;
938
939	r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
940	if (r32)
941		bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1);
942}
943
944
945static void
946bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
947{
948	enum bfi_ioc_state ioc_fwstate;
949	bfa_boolean_t   fwvalid;
950
951	ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
952
953	if (force)
954		ioc_fwstate = BFI_IOC_UNINIT;
955
956	bfa_trc(ioc, ioc_fwstate);
957
958	/**
959	 * check if firmware is valid
960	 */
961	fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
962			BFA_FALSE : bfa_ioc_fwver_valid(ioc);
963
964	if (!fwvalid) {
965		bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
966		return;
967	}
968
969	/**
970	 * If hardware initialization is in progress (initialized by other IOC),
971	 * just wait for an initialization completion interrupt.
972	 */
973	if (ioc_fwstate == BFI_IOC_INITING) {
974		bfa_trc(ioc, ioc_fwstate);
975		ioc->cbfn->reset_cbfn(ioc->bfa);
976		return;
977	}
978
979	/**
980	 * If IOC function is disabled and firmware version is same,
981	 * just re-enable IOC.
982	 *
983	 * If option rom, IOC must not be in operational state. With
984	 * convergence, IOC will be in operational state when 2nd driver
985	 * is loaded.
986	 */
987	if (ioc_fwstate == BFI_IOC_DISABLED ||
988		(!bfa_ioc_is_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
989		bfa_trc(ioc, ioc_fwstate);
990
991		/**
992		 * When using MSI-X any pending firmware ready event should
993		 * be flushed. Otherwise MSI-X interrupts are not delivered.
994		 */
995		bfa_ioc_msgflush(ioc);
996		ioc->cbfn->reset_cbfn(ioc->bfa);
997		bfa_fsm_send_event(ioc, IOC_E_FWREADY);
998		return;
999	}
1000
1001	/**
1002	 * Initialize the h/w for any other states.
1003	 */
1004	bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
1005}
1006
1007static void
1008bfa_ioc_timeout(void *ioc_arg)
1009{
1010	struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg;
1011
1012	bfa_trc(ioc, 0);
1013	bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1014}
1015
1016void
1017bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1018{
1019	u32       *msgp = (u32 *) ioc_msg;
1020	u32        i;
1021
1022	bfa_trc(ioc, msgp[0]);
1023	bfa_trc(ioc, len);
1024
1025	bfa_assert(len <= BFI_IOC_MSGLEN_MAX);
1026
1027	/*
1028	 * first write msg to mailbox registers
1029	 */
1030	for (i = 0; i < len / sizeof(u32); i++)
1031		bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32),
1032			      bfa_os_wtole(msgp[i]));
1033
1034	for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1035		bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32), 0);
1036
1037	/*
1038	 * write 1 to mailbox CMD to trigger LPU event
1039	 */
1040	bfa_reg_write(ioc->ioc_regs.hfn_mbox_cmd, 1);
1041	(void)bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
1042}
1043
1044static void
1045bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1046{
1047	struct bfi_ioc_ctrl_req_s enable_req;
1048
1049	bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1050		    bfa_ioc_portid(ioc));
1051	enable_req.ioc_class = ioc->ioc_mc;
1052	bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1053}
1054
1055static void
1056bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1057{
1058	struct bfi_ioc_ctrl_req_s disable_req;
1059
1060	bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1061		    bfa_ioc_portid(ioc));
1062	bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1063}
1064
1065static void
1066bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1067{
1068	struct bfi_ioc_getattr_req_s attr_req;
1069
1070	bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1071		    bfa_ioc_portid(ioc));
1072	bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1073	bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1074}
1075
1076static void
1077bfa_ioc_hb_check(void *cbarg)
1078{
1079	struct bfa_ioc_s  *ioc = cbarg;
1080	u32     hb_count;
1081
1082	hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
1083	if (ioc->hb_count == hb_count) {
1084		bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE,
1085			hb_count);
1086		bfa_ioc_recover(ioc);
1087		return;
1088	} else {
1089		ioc->hb_count = hb_count;
1090	}
1091
1092	bfa_ioc_mbox_poll(ioc);
1093	bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check,
1094			ioc, BFA_IOC_HB_TOV);
1095}
1096
1097static void
1098bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1099{
1100	ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
1101	bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc,
1102			BFA_IOC_HB_TOV);
1103}
1104
1105static void
1106bfa_ioc_hb_stop(struct bfa_ioc_s *ioc)
1107{
1108	bfa_timer_stop(&ioc->ioc_timer);
1109}
1110
1111/**
1112 *      Initiate a full firmware download.
1113 */
1114static void
1115bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1116		    u32 boot_param)
1117{
1118	u32       *fwimg;
1119	u32        pgnum, pgoff;
1120	u32        loff = 0;
1121	u32        chunkno = 0;
1122	u32        i;
1123
1124	/**
1125	 * Initialize LMEM first before code download
1126	 */
1127	bfa_ioc_lmem_init(ioc);
1128
1129	/**
1130	 * Flash based firmware boot
1131	 */
1132	bfa_trc(ioc, bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
1133	if (bfa_ioc_is_optrom(ioc))
1134		boot_type = BFI_BOOT_TYPE_FLASH;
1135	fwimg = bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1136
1137
1138	pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1139	pgoff = bfa_ioc_smem_pgoff(ioc, loff);
1140
1141	bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1142
1143	for (i = 0; i < bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
1144
1145		if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1146			chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1147			fwimg = bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
1148					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1149		}
1150
1151		/**
1152		 * write smem
1153		 */
1154		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1155			      fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
1156
1157		loff += sizeof(u32);
1158
1159		/**
1160		 * handle page offset wrap around
1161		 */
1162		loff = PSS_SMEM_PGOFF(loff);
1163		if (loff == 0) {
1164			pgnum++;
1165			bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1166		}
1167	}
1168
1169	bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
1170		      bfa_ioc_smem_pgnum(ioc, 0));
1171
1172	/*
1173	 * Set boot type and boot param at the end.
1174	 */
1175	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
1176			bfa_os_swap32(boot_type));
1177	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_PARAM_OFF,
1178			bfa_os_swap32(boot_param));
1179}
1180
1181static void
1182bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1183{
1184	bfa_ioc_hwinit(ioc, force);
1185}
1186
1187/**
1188 * Update BFA configuration from firmware configuration.
1189 */
1190static void
1191bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1192{
1193	struct bfi_ioc_attr_s *attr = ioc->attr;
1194
1195	attr->adapter_prop = bfa_os_ntohl(attr->adapter_prop);
1196	attr->card_type     = bfa_os_ntohl(attr->card_type);
1197	attr->maxfrsize = bfa_os_ntohs(attr->maxfrsize);
1198
1199	bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1200}
1201
1202/**
1203 * Attach time initialization of mbox logic.
1204 */
1205static void
1206bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1207{
1208	struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1209	int             mc;
1210
1211	INIT_LIST_HEAD(&mod->cmd_q);
1212	for (mc = 0; mc < BFI_MC_MAX; mc++) {
1213		mod->mbhdlr[mc].cbfn = NULL;
1214		mod->mbhdlr[mc].cbarg = ioc->bfa;
1215	}
1216}
1217
1218/**
1219 * Mbox poll timer -- restarts any pending mailbox requests.
1220 */
1221static void
1222bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1223{
1224	struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1225	struct bfa_mbox_cmd_s *cmd;
1226	u32        stat;
1227
1228	/**
1229	 * If no command pending, do nothing
1230	 */
1231	if (list_empty(&mod->cmd_q))
1232		return;
1233
1234	/**
1235	 * If previous command is not yet fetched by firmware, do nothing
1236	 */
1237	stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
1238	if (stat)
1239		return;
1240
1241	/**
1242	 * Enqueue command to firmware.
1243	 */
1244	bfa_q_deq(&mod->cmd_q, &cmd);
1245	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1246}
1247
1248/**
1249 * Cleanup any pending requests.
1250 */
1251static void
1252bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
1253{
1254	struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1255	struct bfa_mbox_cmd_s *cmd;
1256
1257	while (!list_empty(&mod->cmd_q))
1258		bfa_q_deq(&mod->cmd_q, &cmd);
1259}
1260
1261/**
1262 *  bfa_ioc_public
1263 */
1264
1265/**
1266 * Interface used by diag module to do firmware boot with memory test
1267 * as the entry vector.
1268 */
1269void
1270bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param)
1271{
1272	bfa_os_addr_t   rb;
1273
1274	bfa_ioc_stats(ioc, ioc_boots);
1275
1276	if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1277		return;
1278
1279	/**
1280	 * Initialize IOC state of all functions on a chip reset.
1281	 */
1282	rb = ioc->pcidev.pci_bar_kva;
1283	if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
1284		bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_MEMTEST);
1285		bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_MEMTEST);
1286	} else {
1287		bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_INITING);
1288		bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_INITING);
1289	}
1290
1291	bfa_ioc_msgflush(ioc);
1292	bfa_ioc_download_fw(ioc, boot_type, boot_param);
1293
1294	/**
1295	 * Enable interrupts just before starting LPU
1296	 */
1297	ioc->cbfn->reset_cbfn(ioc->bfa);
1298	bfa_ioc_lpu_start(ioc);
1299}
1300
1301/**
1302 * Enable/disable IOC failure auto recovery.
1303 */
1304void
1305bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
1306{
1307	bfa_auto_recover = auto_recover;
1308}
1309
1310
1311bfa_boolean_t
1312bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
1313{
1314	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
1315}
1316
1317void
1318bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
1319{
1320	u32       *msgp = mbmsg;
1321	u32        r32;
1322	int             i;
1323
1324	/**
1325	 * read the MBOX msg
1326	 */
1327	for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1328	     i++) {
1329		r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox +
1330				   i * sizeof(u32));
1331		msgp[i] = bfa_os_htonl(r32);
1332	}
1333
1334	/**
1335	 * turn off mailbox interrupt by clearing mailbox status
1336	 */
1337	bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1);
1338	bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
1339}
1340
1341void
1342bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
1343{
1344	union bfi_ioc_i2h_msg_u *msg;
1345
1346	msg = (union bfi_ioc_i2h_msg_u *)m;
1347
1348	bfa_ioc_stats(ioc, ioc_isrs);
1349
1350	switch (msg->mh.msg_id) {
1351	case BFI_IOC_I2H_HBEAT:
1352		break;
1353
1354	case BFI_IOC_I2H_READY_EVENT:
1355		bfa_fsm_send_event(ioc, IOC_E_FWREADY);
1356		break;
1357
1358	case BFI_IOC_I2H_ENABLE_REPLY:
1359		bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE);
1360		break;
1361
1362	case BFI_IOC_I2H_DISABLE_REPLY:
1363		bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE);
1364		break;
1365
1366	case BFI_IOC_I2H_GETATTR_REPLY:
1367		bfa_ioc_getattr_reply(ioc);
1368		break;
1369
1370	default:
1371		bfa_trc(ioc, msg->mh.msg_id);
1372		bfa_assert(0);
1373	}
1374}
1375
1376/**
1377 * IOC attach time initialization and setup.
1378 *
1379 * @param[in]	ioc	memory for IOC
1380 * @param[in]	bfa	driver instance structure
1381 * @param[in]	trcmod	kernel trace module
1382 * @param[in]	aen	kernel aen event module
1383 * @param[in]	logm	kernel logging module
1384 */
1385void
1386bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
1387	       struct bfa_timer_mod_s *timer_mod, struct bfa_trc_mod_s *trcmod,
1388	       struct bfa_aen_s *aen, struct bfa_log_mod_s *logm)
1389{
1390	ioc->bfa = bfa;
1391	ioc->cbfn = cbfn;
1392	ioc->timer_mod = timer_mod;
1393	ioc->trcmod = trcmod;
1394	ioc->aen = aen;
1395	ioc->logm = logm;
1396	ioc->fcmode = BFA_FALSE;
1397	ioc->pllinit = BFA_FALSE;
1398	ioc->dbg_fwsave_once = BFA_TRUE;
1399
1400	bfa_ioc_mbox_attach(ioc);
1401	INIT_LIST_HEAD(&ioc->hb_notify_q);
1402
1403	bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
1404}
1405
1406/**
1407 * Driver detach time IOC cleanup.
1408 */
1409void
1410bfa_ioc_detach(struct bfa_ioc_s *ioc)
1411{
1412	bfa_fsm_send_event(ioc, IOC_E_DETACH);
1413}
1414
1415/**
1416 * Setup IOC PCI properties.
1417 *
1418 * @param[in]	pcidev	PCI device information for this IOC
1419 */
1420void
1421bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
1422		 enum bfi_mclass mc)
1423{
1424	ioc->ioc_mc = mc;
1425	ioc->pcidev = *pcidev;
1426	ioc->ctdev  = bfa_asic_id_ct(ioc->pcidev.device_id);
1427	ioc->cna = ioc->ctdev && !ioc->fcmode;
1428
1429	/**
1430	 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
1431	 */
1432	if (ioc->ctdev)
1433		bfa_ioc_set_ct_hwif(ioc);
1434	else
1435		bfa_ioc_set_cb_hwif(ioc);
1436
1437	bfa_ioc_map_port(ioc);
1438	bfa_ioc_reg_init(ioc);
1439}
1440
1441/**
1442 * Initialize IOC dma memory
1443 *
1444 * @param[in]	dm_kva	kernel virtual address of IOC dma memory
1445 * @param[in]	dm_pa	physical address of IOC dma memory
1446 */
1447void
1448bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
1449{
1450	/**
1451	 * dma memory for firmware attribute
1452	 */
1453	ioc->attr_dma.kva = dm_kva;
1454	ioc->attr_dma.pa = dm_pa;
1455	ioc->attr = (struct bfi_ioc_attr_s *)dm_kva;
1456}
1457
1458/**
1459 * Return size of dma memory required.
1460 */
1461u32
1462bfa_ioc_meminfo(void)
1463{
1464	return BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
1465}
1466
1467void
1468bfa_ioc_enable(struct bfa_ioc_s *ioc)
1469{
1470	bfa_ioc_stats(ioc, ioc_enables);
1471	ioc->dbg_fwsave_once = BFA_TRUE;
1472
1473	bfa_fsm_send_event(ioc, IOC_E_ENABLE);
1474}
1475
1476void
1477bfa_ioc_disable(struct bfa_ioc_s *ioc)
1478{
1479	bfa_ioc_stats(ioc, ioc_disables);
1480	bfa_fsm_send_event(ioc, IOC_E_DISABLE);
1481}
1482
1483/**
1484 * Returns memory required for saving firmware trace in case of crash.
1485 * Driver must call this interface to allocate memory required for
1486 * automatic saving of firmware trace. Driver should call
1487 * bfa_ioc_debug_memclaim() right after bfa_ioc_attach() to setup this
1488 * trace memory.
1489 */
1490int
1491bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover)
1492{
1493return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
1494}
1495
1496/**
1497 * Initialize memory for saving firmware trace. Driver must initialize
1498 * trace memory before call bfa_ioc_enable().
1499 */
1500void
1501bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
1502{
1503	ioc->dbg_fwsave = dbg_fwsave;
1504	ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->auto_recover);
1505}
1506
1507u32
1508bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr)
1509{
1510	return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
1511}
1512
1513u32
1514bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr)
1515{
1516	return PSS_SMEM_PGOFF(fmaddr);
1517}
1518
1519/**
1520 * Register mailbox message handler functions
1521 *
1522 * @param[in]	ioc		IOC instance
1523 * @param[in]	mcfuncs		message class handler functions
1524 */
1525void
1526bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
1527{
1528	struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1529	int             mc;
1530
1531	for (mc = 0; mc < BFI_MC_MAX; mc++)
1532		mod->mbhdlr[mc].cbfn = mcfuncs[mc];
1533}
1534
1535/**
1536 * Register mailbox message handler function, to be called by common modules
1537 */
1538void
1539bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
1540		    bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
1541{
1542	struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1543
1544	mod->mbhdlr[mc].cbfn = cbfn;
1545	mod->mbhdlr[mc].cbarg = cbarg;
1546}
1547
1548/**
1549 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
1550 * Responsibility of caller to serialize
1551 *
1552 * @param[in]	ioc	IOC instance
1553 * @param[i]	cmd	Mailbox command
1554 */
1555void
1556bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
1557{
1558	struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1559	u32        stat;
1560
1561	/**
1562	 * If a previous command is pending, queue new command
1563	 */
1564	if (!list_empty(&mod->cmd_q)) {
1565		list_add_tail(&cmd->qe, &mod->cmd_q);
1566		return;
1567	}
1568
1569	/**
1570	 * If mailbox is busy, queue command for poll timer
1571	 */
1572	stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
1573	if (stat) {
1574		list_add_tail(&cmd->qe, &mod->cmd_q);
1575		return;
1576	}
1577
1578	/**
1579	 * mailbox is free -- queue command to firmware
1580	 */
1581	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1582}
1583
1584/**
1585 * Handle mailbox interrupts
1586 */
1587void
1588bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
1589{
1590	struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1591	struct bfi_mbmsg_s m;
1592	int             mc;
1593
1594	bfa_ioc_msgget(ioc, &m);
1595
1596	/**
1597	 * Treat IOC message class as special.
1598	 */
1599	mc = m.mh.msg_class;
1600	if (mc == BFI_MC_IOC) {
1601		bfa_ioc_isr(ioc, &m);
1602		return;
1603	}
1604
1605	if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
1606		return;
1607
1608	mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
1609}
1610
1611void
1612bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
1613{
1614	bfa_fsm_send_event(ioc, IOC_E_HWERROR);
1615}
1616
1617void
1618bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
1619{
1620	ioc->fcmode  = BFA_TRUE;
1621	ioc->port_id = bfa_ioc_pcifn(ioc);
1622}
1623
1624#ifndef BFA_BIOS_BUILD
1625
1626/**
1627 * return true if IOC is disabled
1628 */
1629bfa_boolean_t
1630bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
1631{
1632	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling)
1633		|| bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
1634}
1635
1636/**
1637 * return true if IOC firmware is different.
1638 */
1639bfa_boolean_t
1640bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
1641{
1642	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset)
1643		|| bfa_fsm_cmp_state(ioc, bfa_ioc_sm_fwcheck)
1644		|| bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch);
1645}
1646
1647#define bfa_ioc_state_disabled(__sm)		\
1648	(((__sm) == BFI_IOC_UNINIT) ||		\
1649	 ((__sm) == BFI_IOC_INITING) ||		\
1650	 ((__sm) == BFI_IOC_HWINIT) ||		\
1651	 ((__sm) == BFI_IOC_DISABLED) ||	\
1652	 ((__sm) == BFI_IOC_FAIL) ||		\
1653	 ((__sm) == BFI_IOC_CFG_DISABLED))
1654
1655/**
1656 * Check if adapter is disabled -- both IOCs should be in a disabled
1657 * state.
1658 */
1659bfa_boolean_t
1660bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
1661{
1662	u32        ioc_state;
1663	bfa_os_addr_t   rb = ioc->pcidev.pci_bar_kva;
1664
1665	if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
1666		return BFA_FALSE;
1667
1668	ioc_state = bfa_reg_read(rb + BFA_IOC0_STATE_REG);
1669	if (!bfa_ioc_state_disabled(ioc_state))
1670		return BFA_FALSE;
1671
1672	ioc_state = bfa_reg_read(rb + BFA_IOC1_STATE_REG);
1673	if (!bfa_ioc_state_disabled(ioc_state))
1674		return BFA_FALSE;
1675
1676	return BFA_TRUE;
1677}
1678
1679/**
1680 * Add to IOC heartbeat failure notification queue. To be used by common
1681 * modules such as
1682 */
1683void
1684bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
1685			struct bfa_ioc_hbfail_notify_s *notify)
1686{
1687	list_add_tail(&notify->qe, &ioc->hb_notify_q);
1688}
1689
1690#define BFA_MFG_NAME "Brocade"
1691void
1692bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
1693			 struct bfa_adapter_attr_s *ad_attr)
1694{
1695	struct bfi_ioc_attr_s *ioc_attr;
1696
1697	ioc_attr = ioc->attr;
1698
1699	bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
1700	bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
1701	bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
1702	bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
1703	bfa_os_memcpy(&ad_attr->vpd, &ioc_attr->vpd,
1704		      sizeof(struct bfa_mfg_vpd_s));
1705
1706	ad_attr->nports = bfa_ioc_get_nports(ioc);
1707	ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
1708
1709	bfa_ioc_get_adapter_model(ioc, ad_attr->model);
1710	/* For now, model descr uses same model string */
1711	bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
1712
1713	ad_attr->card_type = ioc_attr->card_type;
1714	ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
1715
1716	if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
1717		ad_attr->prototype = 1;
1718	else
1719		ad_attr->prototype = 0;
1720
1721	ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
1722	ad_attr->mac = bfa_ioc_get_mac(ioc);
1723
1724	ad_attr->pcie_gen = ioc_attr->pcie_gen;
1725	ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
1726	ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
1727	ad_attr->asic_rev = ioc_attr->asic_rev;
1728
1729	bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
1730
1731	ad_attr->cna_capable = ioc->cna;
1732}
1733
1734enum bfa_ioc_type_e
1735bfa_ioc_get_type(struct bfa_ioc_s *ioc)
1736{
1737	if (!ioc->ctdev || ioc->fcmode)
1738		return BFA_IOC_TYPE_FC;
1739	else if (ioc->ioc_mc == BFI_MC_IOCFC)
1740		return BFA_IOC_TYPE_FCoE;
1741	else if (ioc->ioc_mc == BFI_MC_LL)
1742		return BFA_IOC_TYPE_LL;
1743	else {
1744		bfa_assert(ioc->ioc_mc == BFI_MC_LL);
1745		return BFA_IOC_TYPE_LL;
1746	}
1747}
1748
1749void
1750bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
1751{
1752	bfa_os_memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
1753	bfa_os_memcpy((void *)serial_num,
1754			(void *)ioc->attr->brcd_serialnum,
1755			BFA_ADAPTER_SERIAL_NUM_LEN);
1756}
1757
1758void
1759bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
1760{
1761	bfa_os_memset((void *)fw_ver, 0, BFA_VERSION_LEN);
1762	bfa_os_memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
1763}
1764
1765void
1766bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
1767{
1768	bfa_assert(chip_rev);
1769
1770	bfa_os_memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
1771
1772	chip_rev[0] = 'R';
1773	chip_rev[1] = 'e';
1774	chip_rev[2] = 'v';
1775	chip_rev[3] = '-';
1776	chip_rev[4] = ioc->attr->asic_rev;
1777	chip_rev[5] = '\0';
1778}
1779
1780void
1781bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
1782{
1783	bfa_os_memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
1784	bfa_os_memcpy(optrom_ver, ioc->attr->optrom_version,
1785		BFA_VERSION_LEN);
1786}
1787
1788void
1789bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
1790{
1791	bfa_os_memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
1792	bfa_os_memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
1793}
1794
1795void
1796bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
1797{
1798	struct bfi_ioc_attr_s   *ioc_attr;
1799
1800	bfa_assert(model);
1801	bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
1802
1803	ioc_attr = ioc->attr;
1804
1805	/**
1806	 * model name
1807	 */
1808	snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
1809			BFA_MFG_NAME, ioc_attr->card_type);
1810}
1811
1812enum bfa_ioc_state
1813bfa_ioc_get_state(struct bfa_ioc_s *ioc)
1814{
1815	return bfa_sm_to_state(ioc_sm_table, ioc->fsm);
1816}
1817
1818void
1819bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
1820{
1821	bfa_os_memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
1822
1823	ioc_attr->state = bfa_ioc_get_state(ioc);
1824	ioc_attr->port_id = ioc->port_id;
1825
1826	ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
1827
1828	bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
1829
1830	ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
1831	ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
1832	bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
1833}
1834
1835/**
1836 *  bfa_wwn_public
1837 */
1838wwn_t
1839bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc)
1840{
1841	return ioc->attr->pwwn;
1842}
1843
1844wwn_t
1845bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc)
1846{
1847	return ioc->attr->nwwn;
1848}
1849
1850u64
1851bfa_ioc_get_adid(struct bfa_ioc_s *ioc)
1852{
1853	return ioc->attr->mfg_pwwn;
1854}
1855
1856mac_t
1857bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
1858{
1859	/*
1860	 * Currently mfg mac is used as FCoE enode mac (not configured by PBC)
1861	 */
1862	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
1863		return bfa_ioc_get_mfg_mac(ioc);
1864	else
1865		return ioc->attr->mac;
1866}
1867
1868wwn_t
1869bfa_ioc_get_mfg_pwwn(struct bfa_ioc_s *ioc)
1870{
1871	return ioc->attr->mfg_pwwn;
1872}
1873
1874wwn_t
1875bfa_ioc_get_mfg_nwwn(struct bfa_ioc_s *ioc)
1876{
1877	return ioc->attr->mfg_nwwn;
1878}
1879
1880mac_t
1881bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
1882{
1883	mac_t   mac;
1884
1885	mac = ioc->attr->mfg_mac;
1886	mac.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
1887
1888	return mac;
1889}
1890
1891bfa_boolean_t
1892bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
1893{
1894	return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
1895}
1896
1897/**
1898 * Send AEN notification
1899 */
1900void
1901bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
1902{
1903	union bfa_aen_data_u aen_data;
1904	struct bfa_log_mod_s *logmod = ioc->logm;
1905	s32         inst_num = 0;
1906	enum bfa_ioc_type_e ioc_type;
1907
1908	bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, event), inst_num);
1909
1910	memset(&aen_data.ioc.pwwn, 0, sizeof(aen_data.ioc.pwwn));
1911	memset(&aen_data.ioc.mac, 0, sizeof(aen_data.ioc.mac));
1912	ioc_type = bfa_ioc_get_type(ioc);
1913	switch (ioc_type) {
1914	case BFA_IOC_TYPE_FC:
1915		aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc);
1916		break;
1917	case BFA_IOC_TYPE_FCoE:
1918		aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc);
1919		aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
1920		break;
1921	case BFA_IOC_TYPE_LL:
1922		aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
1923		break;
1924	default:
1925		bfa_assert(ioc_type == BFA_IOC_TYPE_FC);
1926		break;
1927	}
1928	aen_data.ioc.ioc_type = ioc_type;
1929}
1930
1931/**
1932 * Retrieve saved firmware trace from a prior IOC failure.
1933 */
1934bfa_status_t
1935bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
1936{
1937	int             tlen;
1938
1939	if (ioc->dbg_fwsave_len == 0)
1940		return BFA_STATUS_ENOFSAVE;
1941
1942	tlen = *trclen;
1943	if (tlen > ioc->dbg_fwsave_len)
1944		tlen = ioc->dbg_fwsave_len;
1945
1946	bfa_os_memcpy(trcdata, ioc->dbg_fwsave, tlen);
1947	*trclen = tlen;
1948	return BFA_STATUS_OK;
1949}
1950
1951/**
1952 * Clear saved firmware trace
1953 */
1954void
1955bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc)
1956{
1957	ioc->dbg_fwsave_once = BFA_TRUE;
1958}
1959
1960/**
1961 * Retrieve saved firmware trace from a prior IOC failure.
1962 */
1963bfa_status_t
1964bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
1965{
1966	u32        pgnum;
1967	u32        loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
1968	int             i, tlen;
1969	u32       *tbuf = trcdata, r32;
1970
1971	bfa_trc(ioc, *trclen);
1972
1973	pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1974	loff = bfa_ioc_smem_pgoff(ioc, loff);
1975
1976	/*
1977	 *  Hold semaphore to serialize pll init and fwtrc.
1978	 */
1979	if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg))
1980		return BFA_STATUS_FAILED;
1981
1982	bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1983
1984	tlen = *trclen;
1985	if (tlen > BFA_DBG_FWTRC_LEN)
1986		tlen = BFA_DBG_FWTRC_LEN;
1987	tlen /= sizeof(u32);
1988
1989	bfa_trc(ioc, tlen);
1990
1991	for (i = 0; i < tlen; i++) {
1992		r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1993		tbuf[i] = bfa_os_ntohl(r32);
1994		loff += sizeof(u32);
1995
1996		/**
1997		 * handle page offset wrap around
1998		 */
1999		loff = PSS_SMEM_PGOFF(loff);
2000		if (loff == 0) {
2001			pgnum++;
2002			bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
2003		}
2004	}
2005	bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
2006		      bfa_ioc_smem_pgnum(ioc, 0));
2007
2008	/*
2009	 *  release semaphore.
2010	 */
2011	bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
2012
2013	bfa_trc(ioc, pgnum);
2014
2015	*trclen = tlen * sizeof(u32);
2016	return BFA_STATUS_OK;
2017}
2018
2019/**
2020 * Save firmware trace if configured.
2021 */
2022static void
2023bfa_ioc_debug_save(struct bfa_ioc_s *ioc)
2024{
2025	int             tlen;
2026
2027	if (ioc->dbg_fwsave_len) {
2028		tlen = ioc->dbg_fwsave_len;
2029		bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2030	}
2031}
2032
2033/**
2034 * Firmware failure detected. Start recovery actions.
2035 */
2036static void
2037bfa_ioc_recover(struct bfa_ioc_s *ioc)
2038{
2039	if (ioc->dbg_fwsave_once) {
2040		ioc->dbg_fwsave_once = BFA_FALSE;
2041		bfa_ioc_debug_save(ioc);
2042	}
2043
2044	bfa_ioc_stats(ioc, ioc_hbfails);
2045	bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2046}
2047
2048static void
2049bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
2050{
2051	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2052		return;
2053
2054	if (ioc->attr->nwwn == 0)
2055		bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_NWWN);
2056	if (ioc->attr->pwwn == 0)
2057		bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_PWWN);
2058}
2059
2060#endif
2061