1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c)  2003-2014 QLogic Corporation
5 */
6
7#include "qla_target.h"
8/**
9 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
10 * Continuation Type 1 IOCBs to allocate.
11 *
12 * @vha: HA context
13 * @dsds: number of data segment descriptors needed
14 *
15 * Returns the number of IOCB entries needed to store @dsds.
16 */
17static inline uint16_t
18qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
19{
20	uint16_t iocbs;
21
22	iocbs = 1;
23	if (dsds > 1) {
24		iocbs += (dsds - 1) / 5;
25		if ((dsds - 1) % 5)
26			iocbs++;
27	}
28	return iocbs;
29}
30
31/*
32 * qla2x00_debounce_register
33 *      Debounce register.
34 *
35 * Input:
36 *      port = register address.
37 *
38 * Returns:
39 *      register value.
40 */
41static __inline__ uint16_t
42qla2x00_debounce_register(volatile __le16 __iomem *addr)
43{
44	volatile uint16_t first;
45	volatile uint16_t second;
46
47	do {
48		first = rd_reg_word(addr);
49		barrier();
50		cpu_relax();
51		second = rd_reg_word(addr);
52	} while (first != second);
53
54	return (first);
55}
56
57static inline void
58qla2x00_poll(struct rsp_que *rsp)
59{
60	struct qla_hw_data *ha = rsp->hw;
61
62	if (IS_P3P_TYPE(ha))
63		qla82xx_poll(0, rsp);
64	else
65		ha->isp_ops->intr_handler(0, rsp);
66}
67
68static inline uint8_t *
69host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
70{
71       uint32_t *ifcp = (uint32_t *) fcp;
72       uint32_t *ofcp = (uint32_t *) fcp;
73       uint32_t iter = bsize >> 2;
74
75       for (; iter ; iter--)
76               *ofcp++ = swab32(*ifcp++);
77
78       return fcp;
79}
80
81static inline void
82host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize)
83{
84	uint32_t *isrc = (uint32_t *) src;
85	__le32 *odest = (__le32 *) dst;
86	uint32_t iter = bsize >> 2;
87
88	for ( ; iter--; isrc++)
89		*odest++ = cpu_to_le32(*isrc);
90}
91
92static inline void
93qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx)
94{
95	struct dsd_dma *dsd, *tdsd;
96
97	/* clean up allocated prev pool */
98	list_for_each_entry_safe(dsd, tdsd, &ctx->dsd_list, list) {
99		dma_pool_free(ha->dl_dma_pool, dsd->dsd_addr,
100		    dsd->dsd_list_dma);
101		list_del(&dsd->list);
102		kfree(dsd);
103	}
104	INIT_LIST_HEAD(&ctx->dsd_list);
105}
106
107static inline void
108qla2x00_set_fcport_disc_state(fc_port_t *fcport, int state)
109{
110	int old_val;
111	uint8_t shiftbits, mask;
112	uint8_t port_dstate_str_sz;
113
114	/* This will have to change when the max no. of states > 16 */
115	shiftbits = 4;
116	mask = (1 << shiftbits) - 1;
117
118	port_dstate_str_sz = sizeof(port_dstate_str) / sizeof(char *);
119	fcport->disc_state = state;
120	while (1) {
121		old_val = atomic_read(&fcport->shadow_disc_state);
122		if (old_val == atomic_cmpxchg(&fcport->shadow_disc_state,
123		    old_val, (old_val << shiftbits) | state)) {
124			ql_dbg(ql_dbg_disc, fcport->vha, 0x2134,
125			    "FCPort %8phC disc_state transition: %s to %s - portid=%06x.\n",
126			    fcport->port_name, (old_val & mask) < port_dstate_str_sz ?
127				    port_dstate_str[old_val & mask] : "Unknown",
128			    port_dstate_str[state], fcport->d_id.b24);
129			return;
130		}
131	}
132}
133
134static inline int
135qla2x00_hba_err_chk_enabled(srb_t *sp)
136{
137	/*
138	 * Uncomment when corresponding SCSI changes are done.
139	 *
140	if (!sp->cmd->prot_chk)
141		return 0;
142	 *
143	 */
144	switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
145	case SCSI_PROT_READ_STRIP:
146	case SCSI_PROT_WRITE_INSERT:
147		if (ql2xenablehba_err_chk >= 1)
148			return 1;
149		break;
150	case SCSI_PROT_READ_PASS:
151	case SCSI_PROT_WRITE_PASS:
152		if (ql2xenablehba_err_chk >= 2)
153			return 1;
154		break;
155	case SCSI_PROT_READ_INSERT:
156	case SCSI_PROT_WRITE_STRIP:
157		return 1;
158	}
159	return 0;
160}
161
162static inline int
163qla2x00_reset_active(scsi_qla_host_t *vha)
164{
165	scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
166
167	/* Test appropriate base-vha and vha flags. */
168	return test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) ||
169	    test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
170	    test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
171	    test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
172	    test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
173}
174
175static inline int
176qla2x00_chip_is_down(scsi_qla_host_t *vha)
177{
178	return (qla2x00_reset_active(vha) || !vha->hw->flags.fw_started);
179}
180
181static void qla2xxx_init_sp(srb_t *sp, scsi_qla_host_t *vha,
182			    struct qla_qpair *qpair, fc_port_t *fcport)
183{
184	memset(sp, 0, sizeof(*sp));
185	sp->fcport = fcport;
186	sp->iocbs = 1;
187	sp->vha = vha;
188	sp->qpair = qpair;
189	sp->cmd_type = TYPE_SRB;
190	/* ref : INIT - normal flow */
191	kref_init(&sp->cmd_kref);
192	INIT_LIST_HEAD(&sp->elem);
193}
194
195static inline srb_t *
196qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair,
197    fc_port_t *fcport, gfp_t flag)
198{
199	srb_t *sp = NULL;
200	uint8_t bail;
201
202	QLA_QPAIR_MARK_BUSY(qpair, bail);
203	if (unlikely(bail))
204		return NULL;
205
206	sp = mempool_alloc(qpair->srb_mempool, flag);
207	if (sp)
208		qla2xxx_init_sp(sp, vha, qpair, fcport);
209	else
210		QLA_QPAIR_MARK_NOT_BUSY(qpair);
211	return sp;
212}
213
214void qla2xxx_rel_done_warning(srb_t *sp, int res);
215void qla2xxx_rel_free_warning(srb_t *sp);
216
217static inline void
218qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp)
219{
220	sp->qpair = NULL;
221	sp->done = qla2xxx_rel_done_warning;
222	sp->free = qla2xxx_rel_free_warning;
223	mempool_free(sp, qpair->srb_mempool);
224	QLA_QPAIR_MARK_NOT_BUSY(qpair);
225}
226
227static inline srb_t *
228qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
229{
230	srb_t *sp = NULL;
231	struct qla_qpair *qpair;
232
233	if (unlikely(qla_vha_mark_busy(vha)))
234		return NULL;
235
236	qpair = vha->hw->base_qpair;
237	sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, flag);
238	if (!sp)
239		goto done;
240
241	sp->vha = vha;
242done:
243	if (!sp)
244		QLA_VHA_MARK_NOT_BUSY(vha);
245	return sp;
246}
247
248static inline void
249qla2x00_rel_sp(srb_t *sp)
250{
251	QLA_VHA_MARK_NOT_BUSY(sp->vha);
252	qla2xxx_rel_qpair_sp(sp->qpair, sp);
253}
254
255static inline int
256qla2x00_gid_list_size(struct qla_hw_data *ha)
257{
258	if (IS_QLAFX00(ha))
259		return sizeof(uint32_t) * 32;
260	else
261		return sizeof(struct gid_list_info) * ha->max_fibre_devices;
262}
263
264static inline void
265qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status)
266{
267	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
268	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
269		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
270		clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
271		complete(&ha->mbx_intr_comp);
272	}
273}
274
275static inline void
276qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t sts_qual)
277{
278	u8 scope;
279	u16 qual;
280#define SQ_SCOPE_MASK		0xc000 /* SAM-6 rev5 5.3.2 */
281#define SQ_SCOPE_SHIFT		14
282#define SQ_QUAL_MASK		0x3fff
283
284#define SQ_MAX_WAIT_SEC		60 /* Max I/O hold off time in seconds. */
285#define SQ_MAX_WAIT_TIME	(SQ_MAX_WAIT_SEC * 10) /* in 100ms. */
286
287	if (!sts_qual) /* Common case. */
288		return;
289
290	scope = (sts_qual & SQ_SCOPE_MASK) >> SQ_SCOPE_SHIFT;
291	/* Handle only scope 1 or 2, which is for I-T nexus. */
292	if (scope != 1 && scope != 2)
293		return;
294
295	/* Skip processing, if retry delay timer is already in effect. */
296	if (fcport->retry_delay_timestamp &&
297	    time_before(jiffies, fcport->retry_delay_timestamp))
298		return;
299
300	qual = sts_qual & SQ_QUAL_MASK;
301	if (qual < 1 || qual > 0x3fef)
302		return;
303	qual = min(qual, (u16)SQ_MAX_WAIT_TIME);
304
305	/* qual is expressed in 100ms increments. */
306	fcport->retry_delay_timestamp = jiffies + (qual * HZ / 10);
307
308	ql_log(ql_log_warn, fcport->vha, 0x5101,
309	       "%8phC: I/O throttling requested (status qualifier = %04xh), holding off I/Os for %ums.\n",
310	       fcport->port_name, sts_qual, qual * 100);
311}
312
313static inline bool
314qla_is_exch_offld_enabled(struct scsi_qla_host *vha)
315{
316	if (qla_ini_mode_enabled(vha) &&
317	    (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT))
318		return true;
319	else if (qla_tgt_mode_enabled(vha) &&
320	    (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT))
321		return true;
322	else if (qla_dual_mode_enabled(vha) &&
323	    ((vha->ql2xiniexchg + vha->ql2xexchoffld) > FW_DEF_EXCHANGES_CNT))
324		return true;
325	else
326		return false;
327}
328
329static inline void
330qla_cpu_update(struct qla_qpair *qpair, uint16_t cpuid)
331{
332	qpair->cpuid = cpuid;
333
334	if (!list_empty(&qpair->hints_list)) {
335		struct qla_qpair_hint *h;
336
337		list_for_each_entry(h, &qpair->hints_list, hint_elem)
338			h->cpuid = qpair->cpuid;
339	}
340}
341
342static inline struct qla_qpair_hint *
343qla_qpair_to_hint(struct qla_tgt *tgt, struct qla_qpair *qpair)
344{
345	struct qla_qpair_hint *h;
346	u16 i;
347
348	for (i = 0; i < tgt->ha->max_qpairs + 1; i++) {
349		h = &tgt->qphints[i];
350		if (h->qpair == qpair)
351			return h;
352	}
353
354	return NULL;
355}
356
357static inline void
358qla_83xx_start_iocbs(struct qla_qpair *qpair)
359{
360	struct req_que *req = qpair->req;
361
362	req->ring_index++;
363	if (req->ring_index == req->length) {
364		req->ring_index = 0;
365		req->ring_ptr = req->ring;
366	} else
367		req->ring_ptr++;
368
369	wrt_reg_dword(req->req_q_in, req->ring_index);
370}
371
372static inline int
373qla2xxx_get_fc4_priority(struct scsi_qla_host *vha)
374{
375	uint32_t data;
376
377	data =
378	    ((uint8_t *)vha->hw->nvram)[NVRAM_DUAL_FCP_NVME_FLAG_OFFSET];
379
380
381	return (data >> 6) & BIT_0 ? FC4_PRIORITY_FCP : FC4_PRIORITY_NVME;
382}
383
384enum {
385	RESOURCE_NONE,
386	RESOURCE_IOCB = BIT_0,
387	RESOURCE_EXCH = BIT_1,  /* exchange */
388	RESOURCE_FORCE = BIT_2,
389	RESOURCE_HA = BIT_3,
390};
391
392static inline int
393qla_get_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
394{
395	u16 iocbs_used, i;
396	u16 exch_used;
397	struct qla_hw_data *ha = qp->hw;
398
399	if (!ql2xenforce_iocb_limit) {
400		iores->res_type = RESOURCE_NONE;
401		return 0;
402	}
403	if (iores->res_type & RESOURCE_FORCE)
404		goto force;
405
406	if ((iores->iocb_cnt + qp->fwres.iocbs_used) >= qp->fwres.iocbs_qp_limit) {
407		/* no need to acquire qpair lock. It's just rough calculation */
408		iocbs_used = ha->base_qpair->fwres.iocbs_used;
409		for (i = 0; i < ha->max_qpairs; i++) {
410			if (ha->queue_pair_map[i])
411				iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
412		}
413
414		if ((iores->iocb_cnt + iocbs_used) >= qp->fwres.iocbs_limit) {
415			iores->res_type = RESOURCE_NONE;
416			return -ENOSPC;
417		}
418	}
419
420	if (iores->res_type & RESOURCE_EXCH) {
421		exch_used = ha->base_qpair->fwres.exch_used;
422		for (i = 0; i < ha->max_qpairs; i++) {
423			if (ha->queue_pair_map[i])
424				exch_used += ha->queue_pair_map[i]->fwres.exch_used;
425		}
426
427		if ((exch_used + iores->exch_cnt) >= qp->fwres.exch_limit) {
428			iores->res_type = RESOURCE_NONE;
429			return -ENOSPC;
430		}
431	}
432
433	if (ql2xenforce_iocb_limit == 2) {
434		if ((iores->iocb_cnt + atomic_read(&ha->fwres.iocb_used)) >=
435		    ha->fwres.iocb_limit) {
436			iores->res_type = RESOURCE_NONE;
437			return -ENOSPC;
438		}
439
440		if (iores->res_type & RESOURCE_EXCH) {
441			if ((iores->exch_cnt + atomic_read(&ha->fwres.exch_used)) >=
442			    ha->fwres.exch_limit) {
443				iores->res_type = RESOURCE_NONE;
444				return -ENOSPC;
445			}
446		}
447	}
448
449force:
450	qp->fwres.iocbs_used += iores->iocb_cnt;
451	qp->fwres.exch_used += iores->exch_cnt;
452	if (ql2xenforce_iocb_limit == 2) {
453		atomic_add(iores->iocb_cnt, &ha->fwres.iocb_used);
454		atomic_add(iores->exch_cnt, &ha->fwres.exch_used);
455		iores->res_type |= RESOURCE_HA;
456	}
457	return 0;
458}
459
460/*
461 * decrement to zero.  This routine will not decrement below zero
462 * @v:  pointer of type atomic_t
463 * @amount: amount to decrement from v
464 */
465static void qla_atomic_dtz(atomic_t *v, int amount)
466{
467	int c, old, dec;
468
469	c = atomic_read(v);
470	for (;;) {
471		dec = c - amount;
472		if (unlikely(dec < 0))
473			dec = 0;
474
475		old = atomic_cmpxchg((v), c, dec);
476		if (likely(old == c))
477			break;
478		c = old;
479	}
480}
481
482static inline void
483qla_put_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
484{
485	struct qla_hw_data *ha = qp->hw;
486
487	if (iores->res_type & RESOURCE_HA) {
488		if (iores->res_type & RESOURCE_IOCB)
489			qla_atomic_dtz(&ha->fwres.iocb_used, iores->iocb_cnt);
490
491		if (iores->res_type & RESOURCE_EXCH)
492			qla_atomic_dtz(&ha->fwres.exch_used, iores->exch_cnt);
493	}
494
495	if (iores->res_type & RESOURCE_IOCB) {
496		if (qp->fwres.iocbs_used >= iores->iocb_cnt) {
497			qp->fwres.iocbs_used -= iores->iocb_cnt;
498		} else {
499			/* should not happen */
500			qp->fwres.iocbs_used = 0;
501		}
502	}
503
504	if (iores->res_type & RESOURCE_EXCH) {
505		if (qp->fwres.exch_used >= iores->exch_cnt) {
506			qp->fwres.exch_used -= iores->exch_cnt;
507		} else {
508			/* should not happen */
509			qp->fwres.exch_used = 0;
510		}
511	}
512	iores->res_type = RESOURCE_NONE;
513}
514
515#define ISP_REG_DISCONNECT 0xffffffffU
516/**************************************************************************
517 * qla2x00_isp_reg_stat
518 *
519 * Description:
520 *        Read the host status register of ISP before aborting the command.
521 *
522 * Input:
523 *       ha = pointer to host adapter structure.
524 *
525 *
526 * Returns:
527 *       Either true or false.
528 *
529 * Note: Return true if there is register disconnect.
530 **************************************************************************/
531static inline
532uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
533{
534	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
535	struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
536
537	if (IS_P3P_TYPE(ha))
538		return ((rd_reg_dword(&reg82->host_int)) == ISP_REG_DISCONNECT);
539	else
540		return ((rd_reg_dword(&reg->host_status)) ==
541			ISP_REG_DISCONNECT);
542}
543
544static inline
545bool qla_pci_disconnected(struct scsi_qla_host *vha,
546			  struct device_reg_24xx __iomem *reg)
547{
548	uint32_t stat;
549	bool ret = false;
550
551	stat = rd_reg_dword(&reg->host_status);
552	if (stat == 0xffffffff) {
553		ql_log(ql_log_info, vha, 0x8041,
554		       "detected PCI disconnect.\n");
555		qla_schedule_eeh_work(vha);
556		ret = true;
557	}
558	return ret;
559}
560
561static inline bool
562fcport_is_smaller(fc_port_t *fcport)
563{
564	if (wwn_to_u64(fcport->port_name) <
565		wwn_to_u64(fcport->vha->port_name))
566		return true;
567	else
568		return false;
569}
570
571static inline bool
572fcport_is_bigger(fc_port_t *fcport)
573{
574	return !fcport_is_smaller(fcport);
575}
576
577static inline struct qla_qpair *
578qla_mapq_nvme_select_qpair(struct qla_hw_data *ha, struct qla_qpair *qpair)
579{
580	int cpuid = raw_smp_processor_id();
581
582	if (qpair->cpuid != cpuid &&
583	    ha->qp_cpu_map[cpuid]) {
584		qpair = ha->qp_cpu_map[cpuid];
585	}
586	return qpair;
587}
588
589static inline void
590qla_mapq_init_qp_cpu_map(struct qla_hw_data *ha,
591			 struct qla_msix_entry *msix,
592			 struct qla_qpair *qpair)
593{
594	const struct cpumask *mask;
595	unsigned int cpu;
596
597	if (!ha->qp_cpu_map)
598		return;
599	mask = pci_irq_get_affinity(ha->pdev, msix->vector_base0);
600	if (!mask)
601		return;
602	qpair->cpuid = cpumask_first(mask);
603	for_each_cpu(cpu, mask) {
604		ha->qp_cpu_map[cpu] = qpair;
605	}
606	msix->cpuid = qpair->cpuid;
607	qpair->cpu_mapped = true;
608}
609
610static inline void
611qla_mapq_free_qp_cpu_map(struct qla_hw_data *ha)
612{
613	if (ha->qp_cpu_map) {
614		kfree(ha->qp_cpu_map);
615		ha->qp_cpu_map = NULL;
616	}
617}
618
619static inline int qla_mapq_alloc_qp_cpu_map(struct qla_hw_data *ha)
620{
621	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
622
623	if (!ha->qp_cpu_map) {
624		ha->qp_cpu_map = kcalloc(NR_CPUS, sizeof(struct qla_qpair *),
625					 GFP_KERNEL);
626		if (!ha->qp_cpu_map) {
627			ql_log(ql_log_fatal, vha, 0x0180,
628			       "Unable to allocate memory for qp_cpu_map ptrs.\n");
629			return -1;
630		}
631	}
632	return 0;
633}
634