• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/scsi/bfa/
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfa_svc.h>
20#include <cs/bfa_debug.h>
21#include <bfi/bfi_rport.h>
22#include "bfa_intr_priv.h"
23
24BFA_TRC_FILE(HAL, RPORT);
25BFA_MODULE(rport);
26
27#define bfa_rport_offline_cb(__rp) do {				\
28	if ((__rp)->bfa->fcs)						\
29		bfa_cb_rport_offline((__rp)->rport_drv);      \
30	else {								\
31		bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,		\
32				__bfa_cb_rport_offline, (__rp));      \
33	}								\
34} while (0)
35
36#define bfa_rport_online_cb(__rp) do {				\
37	if ((__rp)->bfa->fcs)						\
38		bfa_cb_rport_online((__rp)->rport_drv);      \
39	else {								\
40		bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,		\
41				  __bfa_cb_rport_online, (__rp));      \
42		}							\
43} while (0)
44
45/*
46 * forward declarations
47 */
48static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
49static void bfa_rport_free(struct bfa_rport_s *rport);
50static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
51static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
52static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
53static void __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete);
54static void __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete);
55
56/**
57 *  bfa_rport_sm BFA rport state machine
58 */
59
60
61enum bfa_rport_event {
62	BFA_RPORT_SM_CREATE	= 1,	/*  rport create event		*/
63	BFA_RPORT_SM_DELETE	= 2,	/*  deleting an existing rport */
64	BFA_RPORT_SM_ONLINE	= 3,	/*  rport is online		*/
65	BFA_RPORT_SM_OFFLINE	= 4,	/*  rport is offline		*/
66	BFA_RPORT_SM_FWRSP	= 5,	/*  firmware response		*/
67	BFA_RPORT_SM_HWFAIL	= 6,	/*  IOC h/w failure		*/
68	BFA_RPORT_SM_QOS_SCN	= 7,	/*  QoS SCN from firmware	*/
69	BFA_RPORT_SM_SET_SPEED	= 8,	/*  Set Rport Speed 		*/
70	BFA_RPORT_SM_QRESUME	= 9,	/*  space in requeue queue	*/
71};
72
73static void	bfa_rport_sm_uninit(struct bfa_rport_s *rp,
74					enum bfa_rport_event event);
75static void	bfa_rport_sm_created(struct bfa_rport_s *rp,
76					 enum bfa_rport_event event);
77static void	bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
78					  enum bfa_rport_event event);
79static void	bfa_rport_sm_online(struct bfa_rport_s *rp,
80					enum bfa_rport_event event);
81static void	bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
82					  enum bfa_rport_event event);
83static void	bfa_rport_sm_offline(struct bfa_rport_s *rp,
84					 enum bfa_rport_event event);
85static void	bfa_rport_sm_deleting(struct bfa_rport_s *rp,
86					  enum bfa_rport_event event);
87static void	bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
88					  enum bfa_rport_event event);
89static void	bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
90					  enum bfa_rport_event event);
91static void	bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
92					    enum bfa_rport_event event);
93static void	bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
94					  enum bfa_rport_event event);
95static void	bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
96					  enum bfa_rport_event event);
97static void	bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
98					  enum bfa_rport_event event);
99
100/**
101 * Beginning state, only online event expected.
102 */
103static void
104bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
105{
106	bfa_trc(rp->bfa, rp->rport_tag);
107	bfa_trc(rp->bfa, event);
108
109	switch (event) {
110	case BFA_RPORT_SM_CREATE:
111		bfa_stats(rp, sm_un_cr);
112		bfa_sm_set_state(rp, bfa_rport_sm_created);
113		break;
114
115	default:
116		bfa_stats(rp, sm_un_unexp);
117		bfa_sm_fault(rp->bfa, event);
118	}
119}
120
121static void
122bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
123{
124	bfa_trc(rp->bfa, rp->rport_tag);
125	bfa_trc(rp->bfa, event);
126
127	switch (event) {
128	case BFA_RPORT_SM_ONLINE:
129		bfa_stats(rp, sm_cr_on);
130		if (bfa_rport_send_fwcreate(rp))
131			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
132		else
133			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
134		break;
135
136	case BFA_RPORT_SM_DELETE:
137		bfa_stats(rp, sm_cr_del);
138		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
139		bfa_rport_free(rp);
140		break;
141
142	case BFA_RPORT_SM_HWFAIL:
143		bfa_stats(rp, sm_cr_hwf);
144		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
145		break;
146
147	default:
148		bfa_stats(rp, sm_cr_unexp);
149		bfa_sm_fault(rp->bfa, event);
150	}
151}
152
153/**
154 * Waiting for rport create response from firmware.
155 */
156static void
157bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
158{
159	bfa_trc(rp->bfa, rp->rport_tag);
160	bfa_trc(rp->bfa, event);
161
162	switch (event) {
163	case BFA_RPORT_SM_FWRSP:
164		bfa_stats(rp, sm_fwc_rsp);
165		bfa_sm_set_state(rp, bfa_rport_sm_online);
166		bfa_rport_online_cb(rp);
167		break;
168
169	case BFA_RPORT_SM_DELETE:
170		bfa_stats(rp, sm_fwc_del);
171		bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
172		break;
173
174	case BFA_RPORT_SM_OFFLINE:
175		bfa_stats(rp, sm_fwc_off);
176		bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
177		break;
178
179	case BFA_RPORT_SM_HWFAIL:
180		bfa_stats(rp, sm_fwc_hwf);
181		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
182		break;
183
184	default:
185		bfa_stats(rp, sm_fwc_unexp);
186		bfa_sm_fault(rp->bfa, event);
187	}
188}
189
190/**
191 * Request queue is full, awaiting queue resume to send create request.
192 */
193static void
194bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
195{
196	bfa_trc(rp->bfa, rp->rport_tag);
197	bfa_trc(rp->bfa, event);
198
199	switch (event) {
200	case BFA_RPORT_SM_QRESUME:
201		bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
202		bfa_rport_send_fwcreate(rp);
203		break;
204
205	case BFA_RPORT_SM_DELETE:
206		bfa_stats(rp, sm_fwc_del);
207		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
208		bfa_reqq_wcancel(&rp->reqq_wait);
209		bfa_rport_free(rp);
210		break;
211
212	case BFA_RPORT_SM_OFFLINE:
213		bfa_stats(rp, sm_fwc_off);
214		bfa_sm_set_state(rp, bfa_rport_sm_offline);
215		bfa_reqq_wcancel(&rp->reqq_wait);
216		bfa_rport_offline_cb(rp);
217		break;
218
219	case BFA_RPORT_SM_HWFAIL:
220		bfa_stats(rp, sm_fwc_hwf);
221		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
222		bfa_reqq_wcancel(&rp->reqq_wait);
223		break;
224
225	default:
226		bfa_stats(rp, sm_fwc_unexp);
227		bfa_sm_fault(rp->bfa, event);
228	}
229}
230
231/**
232 * Online state - normal parking state.
233 */
234static void
235bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
236{
237	struct bfi_rport_qos_scn_s *qos_scn;
238
239	bfa_trc(rp->bfa, rp->rport_tag);
240	bfa_trc(rp->bfa, event);
241
242	switch (event) {
243	case BFA_RPORT_SM_OFFLINE:
244		bfa_stats(rp, sm_on_off);
245		if (bfa_rport_send_fwdelete(rp))
246			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
247		else
248			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
249		break;
250
251	case BFA_RPORT_SM_DELETE:
252		bfa_stats(rp, sm_on_del);
253		if (bfa_rport_send_fwdelete(rp))
254			bfa_sm_set_state(rp, bfa_rport_sm_deleting);
255		else
256			bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
257		break;
258
259	case BFA_RPORT_SM_HWFAIL:
260		bfa_stats(rp, sm_on_hwf);
261		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
262		break;
263
264	case BFA_RPORT_SM_SET_SPEED:
265		bfa_rport_send_fwspeed(rp);
266		break;
267
268	case BFA_RPORT_SM_QOS_SCN:
269		qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
270		rp->qos_attr = qos_scn->new_qos_attr;
271		bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
272		bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
273		bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
274		bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
275
276		qos_scn->old_qos_attr.qos_flow_id  =
277			bfa_os_ntohl(qos_scn->old_qos_attr.qos_flow_id);
278		qos_scn->new_qos_attr.qos_flow_id  =
279			bfa_os_ntohl(qos_scn->new_qos_attr.qos_flow_id);
280		qos_scn->old_qos_attr.qos_priority =
281			bfa_os_ntohl(qos_scn->old_qos_attr.qos_priority);
282		qos_scn->new_qos_attr.qos_priority =
283			bfa_os_ntohl(qos_scn->new_qos_attr.qos_priority);
284
285		if (qos_scn->old_qos_attr.qos_flow_id !=
286			qos_scn->new_qos_attr.qos_flow_id)
287			bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
288						    qos_scn->old_qos_attr,
289						    qos_scn->new_qos_attr);
290		if (qos_scn->old_qos_attr.qos_priority !=
291			qos_scn->new_qos_attr.qos_priority)
292			bfa_cb_rport_qos_scn_prio(rp->rport_drv,
293						  qos_scn->old_qos_attr,
294						  qos_scn->new_qos_attr);
295		break;
296
297	default:
298		bfa_stats(rp, sm_on_unexp);
299		bfa_sm_fault(rp->bfa, event);
300	}
301}
302
303/**
304 * Firmware rport is being deleted - awaiting f/w response.
305 */
306static void
307bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
308{
309	bfa_trc(rp->bfa, rp->rport_tag);
310	bfa_trc(rp->bfa, event);
311
312	switch (event) {
313	case BFA_RPORT_SM_FWRSP:
314		bfa_stats(rp, sm_fwd_rsp);
315		bfa_sm_set_state(rp, bfa_rport_sm_offline);
316		bfa_rport_offline_cb(rp);
317		break;
318
319	case BFA_RPORT_SM_DELETE:
320		bfa_stats(rp, sm_fwd_del);
321		bfa_sm_set_state(rp, bfa_rport_sm_deleting);
322		break;
323
324	case BFA_RPORT_SM_HWFAIL:
325		bfa_stats(rp, sm_fwd_hwf);
326		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
327		bfa_rport_offline_cb(rp);
328		break;
329
330	default:
331		bfa_stats(rp, sm_fwd_unexp);
332		bfa_sm_fault(rp->bfa, event);
333	}
334}
335
336static void
337bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
338{
339	bfa_trc(rp->bfa, rp->rport_tag);
340	bfa_trc(rp->bfa, event);
341
342	switch (event) {
343	case BFA_RPORT_SM_QRESUME:
344		bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
345		bfa_rport_send_fwdelete(rp);
346		break;
347
348	case BFA_RPORT_SM_DELETE:
349		bfa_stats(rp, sm_fwd_del);
350		bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
351		break;
352
353	case BFA_RPORT_SM_HWFAIL:
354		bfa_stats(rp, sm_fwd_hwf);
355		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
356		bfa_reqq_wcancel(&rp->reqq_wait);
357		bfa_rport_offline_cb(rp);
358		break;
359
360	default:
361		bfa_stats(rp, sm_fwd_unexp);
362		bfa_sm_fault(rp->bfa, event);
363	}
364}
365
366/**
367 * Offline state.
368 */
369static void
370bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
371{
372	bfa_trc(rp->bfa, rp->rport_tag);
373	bfa_trc(rp->bfa, event);
374
375	switch (event) {
376	case BFA_RPORT_SM_DELETE:
377		bfa_stats(rp, sm_off_del);
378		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
379		bfa_rport_free(rp);
380		break;
381
382	case BFA_RPORT_SM_ONLINE:
383		bfa_stats(rp, sm_off_on);
384		if (bfa_rport_send_fwcreate(rp))
385			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
386		else
387			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
388		break;
389
390	case BFA_RPORT_SM_HWFAIL:
391		bfa_stats(rp, sm_off_hwf);
392		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
393		break;
394
395	default:
396		bfa_stats(rp, sm_off_unexp);
397		bfa_sm_fault(rp->bfa, event);
398	}
399}
400
401/**
402 * Rport is deleted, waiting for firmware response to delete.
403 */
404static void
405bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
406{
407	bfa_trc(rp->bfa, rp->rport_tag);
408	bfa_trc(rp->bfa, event);
409
410	switch (event) {
411	case BFA_RPORT_SM_FWRSP:
412		bfa_stats(rp, sm_del_fwrsp);
413		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
414		bfa_rport_free(rp);
415		break;
416
417	case BFA_RPORT_SM_HWFAIL:
418		bfa_stats(rp, sm_del_hwf);
419		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
420		bfa_rport_free(rp);
421		break;
422
423	default:
424		bfa_sm_fault(rp->bfa, event);
425	}
426}
427
428static void
429bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
430{
431	bfa_trc(rp->bfa, rp->rport_tag);
432	bfa_trc(rp->bfa, event);
433
434	switch (event) {
435	case BFA_RPORT_SM_QRESUME:
436		bfa_stats(rp, sm_del_fwrsp);
437		bfa_sm_set_state(rp, bfa_rport_sm_deleting);
438		bfa_rport_send_fwdelete(rp);
439		break;
440
441	case BFA_RPORT_SM_HWFAIL:
442		bfa_stats(rp, sm_del_hwf);
443		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
444		bfa_reqq_wcancel(&rp->reqq_wait);
445		bfa_rport_free(rp);
446		break;
447
448	default:
449		bfa_sm_fault(rp->bfa, event);
450	}
451}
452
453/**
454 * Waiting for rport create response from firmware. A delete is pending.
455 */
456static void
457bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
458				enum bfa_rport_event event)
459{
460	bfa_trc(rp->bfa, rp->rport_tag);
461	bfa_trc(rp->bfa, event);
462
463	switch (event) {
464	case BFA_RPORT_SM_FWRSP:
465		bfa_stats(rp, sm_delp_fwrsp);
466		if (bfa_rport_send_fwdelete(rp))
467			bfa_sm_set_state(rp, bfa_rport_sm_deleting);
468		else
469			bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
470		break;
471
472	case BFA_RPORT_SM_HWFAIL:
473		bfa_stats(rp, sm_delp_hwf);
474		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
475		bfa_rport_free(rp);
476		break;
477
478	default:
479		bfa_stats(rp, sm_delp_unexp);
480		bfa_sm_fault(rp->bfa, event);
481	}
482}
483
484/**
485 * Waiting for rport create response from firmware. Rport offline is pending.
486 */
487static void
488bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
489				 enum bfa_rport_event event)
490{
491	bfa_trc(rp->bfa, rp->rport_tag);
492	bfa_trc(rp->bfa, event);
493
494	switch (event) {
495	case BFA_RPORT_SM_FWRSP:
496		bfa_stats(rp, sm_offp_fwrsp);
497		if (bfa_rport_send_fwdelete(rp))
498			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
499		else
500			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
501		break;
502
503	case BFA_RPORT_SM_DELETE:
504		bfa_stats(rp, sm_offp_del);
505		bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
506		break;
507
508	case BFA_RPORT_SM_HWFAIL:
509		bfa_stats(rp, sm_offp_hwf);
510		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
511		break;
512
513	default:
514		bfa_stats(rp, sm_offp_unexp);
515		bfa_sm_fault(rp->bfa, event);
516	}
517}
518
519/**
520 * IOC h/w failed.
521 */
522static void
523bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
524{
525	bfa_trc(rp->bfa, rp->rport_tag);
526	bfa_trc(rp->bfa, event);
527
528	switch (event) {
529	case BFA_RPORT_SM_OFFLINE:
530		bfa_stats(rp, sm_iocd_off);
531		bfa_rport_offline_cb(rp);
532		break;
533
534	case BFA_RPORT_SM_DELETE:
535		bfa_stats(rp, sm_iocd_del);
536		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
537		bfa_rport_free(rp);
538		break;
539
540	case BFA_RPORT_SM_ONLINE:
541		bfa_stats(rp, sm_iocd_on);
542		if (bfa_rport_send_fwcreate(rp))
543			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
544		else
545			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
546		break;
547
548	case BFA_RPORT_SM_HWFAIL:
549		break;
550
551	default:
552		bfa_stats(rp, sm_iocd_unexp);
553		bfa_sm_fault(rp->bfa, event);
554	}
555}
556
557
558
559/**
560 *  bfa_rport_private BFA rport private functions
561 */
562
563static void
564__bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
565{
566	struct bfa_rport_s *rp = cbarg;
567
568	if (complete)
569		bfa_cb_rport_online(rp->rport_drv);
570}
571
572static void
573__bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
574{
575	struct bfa_rport_s *rp = cbarg;
576
577	if (complete)
578		bfa_cb_rport_offline(rp->rport_drv);
579}
580
581static void
582bfa_rport_qresume(void *cbarg)
583{
584	struct bfa_rport_s	*rp = cbarg;
585
586	bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
587}
588
589static void
590bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
591		u32 *dm_len)
592{
593	if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
594		cfg->fwcfg.num_rports = BFA_RPORT_MIN;
595
596	*km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s);
597}
598
599static void
600bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
601		     struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
602{
603	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
604	struct bfa_rport_s *rp;
605	u16        i;
606
607	INIT_LIST_HEAD(&mod->rp_free_q);
608	INIT_LIST_HEAD(&mod->rp_active_q);
609
610	rp = (struct bfa_rport_s *) bfa_meminfo_kva(meminfo);
611	mod->rps_list = rp;
612	mod->num_rports = cfg->fwcfg.num_rports;
613
614	bfa_assert(mod->num_rports
615		   && !(mod->num_rports & (mod->num_rports - 1)));
616
617	for (i = 0; i < mod->num_rports; i++, rp++) {
618		bfa_os_memset(rp, 0, sizeof(struct bfa_rport_s));
619		rp->bfa = bfa;
620		rp->rport_tag = i;
621		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
622
623		/**
624		 *  - is unused
625		 */
626		if (i)
627			list_add_tail(&rp->qe, &mod->rp_free_q);
628
629		bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
630	}
631
632	/**
633	 * consume memory
634	 */
635	bfa_meminfo_kva(meminfo) = (u8 *) rp;
636}
637
638static void
639bfa_rport_detach(struct bfa_s *bfa)
640{
641}
642
643static void
644bfa_rport_start(struct bfa_s *bfa)
645{
646}
647
648static void
649bfa_rport_stop(struct bfa_s *bfa)
650{
651}
652
653static void
654bfa_rport_iocdisable(struct bfa_s *bfa)
655{
656	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
657	struct bfa_rport_s *rport;
658	struct list_head        *qe, *qen;
659
660	list_for_each_safe(qe, qen, &mod->rp_active_q) {
661		rport = (struct bfa_rport_s *) qe;
662		bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
663	}
664}
665
666static struct bfa_rport_s *
667bfa_rport_alloc(struct bfa_rport_mod_s *mod)
668{
669	struct bfa_rport_s *rport;
670
671	bfa_q_deq(&mod->rp_free_q, &rport);
672	if (rport)
673		list_add_tail(&rport->qe, &mod->rp_active_q);
674
675	return rport;
676}
677
678static void
679bfa_rport_free(struct bfa_rport_s *rport)
680{
681	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
682
683	bfa_assert(bfa_q_is_on_q(&mod->rp_active_q, rport));
684	list_del(&rport->qe);
685	list_add_tail(&rport->qe, &mod->rp_free_q);
686}
687
688static bfa_boolean_t
689bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
690{
691	struct bfi_rport_create_req_s *m;
692
693	/**
694	 * check for room in queue to send request now
695	 */
696	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
697	if (!m) {
698		bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
699		return BFA_FALSE;
700	}
701
702	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
703			bfa_lpuid(rp->bfa));
704	m->bfa_handle = rp->rport_tag;
705	m->max_frmsz = bfa_os_htons(rp->rport_info.max_frmsz);
706	m->pid = rp->rport_info.pid;
707	m->lp_tag = rp->rport_info.lp_tag;
708	m->local_pid = rp->rport_info.local_pid;
709	m->fc_class = rp->rport_info.fc_class;
710	m->vf_en = rp->rport_info.vf_en;
711	m->vf_id = rp->rport_info.vf_id;
712	m->cisc = rp->rport_info.cisc;
713
714	/**
715	 * queue I/O message to firmware
716	 */
717	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
718	return BFA_TRUE;
719}
720
721static bfa_boolean_t
722bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
723{
724	struct bfi_rport_delete_req_s *m;
725
726	/**
727	 * check for room in queue to send request now
728	 */
729	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
730	if (!m) {
731		bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
732		return BFA_FALSE;
733	}
734
735	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
736			bfa_lpuid(rp->bfa));
737	m->fw_handle = rp->fw_handle;
738
739	/**
740	 * queue I/O message to firmware
741	 */
742	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
743	return BFA_TRUE;
744}
745
746static bfa_boolean_t
747bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
748{
749	struct bfa_rport_speed_req_s *m;
750
751	/**
752	 * check for room in queue to send request now
753	 */
754	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
755	if (!m) {
756		bfa_trc(rp->bfa, rp->rport_info.speed);
757		return BFA_FALSE;
758	}
759
760	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
761			bfa_lpuid(rp->bfa));
762	m->fw_handle = rp->fw_handle;
763	m->speed = (u8)rp->rport_info.speed;
764
765	/**
766	 * queue I/O message to firmware
767	 */
768	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
769	return BFA_TRUE;
770}
771
772
773
774/**
775 *  bfa_rport_public
776 */
777
778/**
779 * 		Rport interrupt processing.
780 */
781void
782bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
783{
784	union bfi_rport_i2h_msg_u msg;
785	struct bfa_rport_s *rp;
786
787	bfa_trc(bfa, m->mhdr.msg_id);
788
789	msg.msg = m;
790
791	switch (m->mhdr.msg_id) {
792	case BFI_RPORT_I2H_CREATE_RSP:
793		rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
794		rp->fw_handle = msg.create_rsp->fw_handle;
795		rp->qos_attr = msg.create_rsp->qos_attr;
796		bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
797		bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
798		break;
799
800	case BFI_RPORT_I2H_DELETE_RSP:
801		rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
802		bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
803		bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
804		break;
805
806	case BFI_RPORT_I2H_QOS_SCN:
807		rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
808		rp->event_arg.fw_msg = msg.qos_scn_evt;
809		bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
810		break;
811
812	default:
813		bfa_trc(bfa, m->mhdr.msg_id);
814		bfa_assert(0);
815	}
816}
817
818
819
820/**
821 *  bfa_rport_api
822 */
823
824struct bfa_rport_s *
825bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
826{
827	struct bfa_rport_s *rp;
828
829	rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
830
831	if (rp == NULL)
832		return NULL;
833
834	rp->bfa = bfa;
835	rp->rport_drv = rport_drv;
836	bfa_rport_clear_stats(rp);
837
838	bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
839	bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
840
841	return rp;
842}
843
844void
845bfa_rport_delete(struct bfa_rport_s *rport)
846{
847	bfa_sm_send_event(rport, BFA_RPORT_SM_DELETE);
848}
849
850void
851bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
852{
853	bfa_assert(rport_info->max_frmsz != 0);
854
855	/**
856	 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
857	 * responses. Default to minimum size.
858	 */
859	if (rport_info->max_frmsz == 0) {
860		bfa_trc(rport->bfa, rport->rport_tag);
861		rport_info->max_frmsz = FC_MIN_PDUSZ;
862	}
863
864	bfa_os_assign(rport->rport_info, *rport_info);
865	bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
866}
867
868void
869bfa_rport_offline(struct bfa_rport_s *rport)
870{
871	bfa_sm_send_event(rport, BFA_RPORT_SM_OFFLINE);
872}
873
874void
875bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_pport_speed speed)
876{
877	bfa_assert(speed != 0);
878	bfa_assert(speed != BFA_PPORT_SPEED_AUTO);
879
880	rport->rport_info.speed = speed;
881	bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
882}
883
884void
885bfa_rport_get_stats(struct bfa_rport_s *rport,
886	struct bfa_rport_hal_stats_s *stats)
887{
888	*stats = rport->stats;
889}
890
891void
892bfa_rport_get_qos_attr(struct bfa_rport_s *rport,
893					struct bfa_rport_qos_attr_s *qos_attr)
894{
895	qos_attr->qos_priority  = bfa_os_ntohl(rport->qos_attr.qos_priority);
896	qos_attr->qos_flow_id  = bfa_os_ntohl(rport->qos_attr.qos_flow_id);
897
898}
899
900void
901bfa_rport_clear_stats(struct bfa_rport_s *rport)
902{
903	bfa_os_memset(&rport->stats, 0, sizeof(rport->stats));
904}
905