1// SPDX-License-Identifier: GPL-2.0
2/*
3 * zfcp device driver
4 *
5 * Debug traces for zfcp.
6 *
7 * Copyright IBM Corp. 2002, 2023
8 */
9
10#define KMSG_COMPONENT "zfcp"
11#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12
13#include <linux/module.h>
14#include <linux/ctype.h>
15#include <linux/slab.h>
16#include <asm/debug.h>
17#include "zfcp_dbf.h"
18#include "zfcp_ext.h"
19#include "zfcp_fc.h"
20
21static u32 dbfsize = 4;
22
23module_param(dbfsize, uint, 0400);
24MODULE_PARM_DESC(dbfsize,
25		 "number of pages for each debug feature area (default 4)");
26
27static u32 dbflevel = 3;
28
29module_param(dbflevel, uint, 0400);
30MODULE_PARM_DESC(dbflevel,
31		 "log level for each debug feature area "
32		 "(default 3, range 0..6)");
33
34static inline unsigned int zfcp_dbf_plen(unsigned int offset)
35{
36	return sizeof(struct zfcp_dbf_pay) + offset - ZFCP_DBF_PAY_MAX_REC;
37}
38
39static inline
40void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
41		       u64 req_id)
42{
43	struct zfcp_dbf_pay *pl = &dbf->pay_buf;
44	u16 offset = 0, rec_length;
45
46	spin_lock(&dbf->pay_lock);
47	memset(pl, 0, sizeof(*pl));
48	pl->fsf_req_id = req_id;
49	memcpy(pl->area, area, ZFCP_DBF_TAG_LEN);
50
51	while (offset < length) {
52		rec_length = min((u16) ZFCP_DBF_PAY_MAX_REC,
53				 (u16) (length - offset));
54		memcpy(pl->data, data + offset, rec_length);
55		debug_event(dbf->pay, 1, pl, zfcp_dbf_plen(rec_length));
56
57		offset += rec_length;
58		pl->counter++;
59	}
60
61	spin_unlock(&dbf->pay_lock);
62}
63
64/**
65 * zfcp_dbf_hba_fsf_res - trace event for fsf responses
66 * @tag: tag indicating which kind of FSF response has been received
67 * @level: trace level to be used for event
68 * @req: request for which a response was received
69 */
70void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
71{
72	struct zfcp_dbf *dbf = req->adapter->dbf;
73	struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
74	struct fsf_qtcb_header *q_head = &req->qtcb->header;
75	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
76	unsigned long flags;
77
78	spin_lock_irqsave(&dbf->hba_lock, flags);
79	memset(rec, 0, sizeof(*rec));
80
81	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
82	rec->id = ZFCP_DBF_HBA_RES;
83	rec->fsf_req_id = req->req_id;
84	rec->fsf_req_status = req->status;
85	rec->fsf_cmd = q_head->fsf_command;
86	rec->fsf_seq_no = q_pref->req_seq_no;
87	rec->u.res.req_issued = req->issued;
88	rec->u.res.prot_status = q_pref->prot_status;
89	rec->u.res.fsf_status = q_head->fsf_status;
90	rec->u.res.port_handle = q_head->port_handle;
91	rec->u.res.lun_handle = q_head->lun_handle;
92
93	memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual,
94	       FSF_PROT_STATUS_QUAL_SIZE);
95	memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual,
96	       FSF_STATUS_QUALIFIER_SIZE);
97
98	rec->pl_len = q_head->log_length;
99	zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
100			  rec->pl_len, "fsf_res", req->req_id);
101
102	debug_event(dbf->hba, level, rec, sizeof(*rec));
103	spin_unlock_irqrestore(&dbf->hba_lock, flags);
104}
105
106/**
107 * zfcp_dbf_hba_fsf_fces - trace event for fsf responses related to
108 *			   FC Endpoint Security (FCES)
109 * @tag: tag indicating which kind of FC Endpoint Security event has occurred
110 * @req: request for which a response was received
111 * @wwpn: remote port or ZFCP_DBF_INVALID_WWPN
112 * @fc_security_old: old FC Endpoint Security of FCP device or connection
113 * @fc_security_new: new FC Endpoint Security of FCP device or connection
114 */
115void zfcp_dbf_hba_fsf_fces(char *tag, const struct zfcp_fsf_req *req, u64 wwpn,
116			   u32 fc_security_old, u32 fc_security_new)
117{
118	struct zfcp_dbf *dbf = req->adapter->dbf;
119	struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
120	struct fsf_qtcb_header *q_head = &req->qtcb->header;
121	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
122	static int const level = 3;
123	unsigned long flags;
124
125	if (unlikely(!debug_level_enabled(dbf->hba, level)))
126		return;
127
128	spin_lock_irqsave(&dbf->hba_lock, flags);
129	memset(rec, 0, sizeof(*rec));
130
131	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
132	rec->id = ZFCP_DBF_HBA_FCES;
133	rec->fsf_req_id = req->req_id;
134	rec->fsf_req_status = req->status;
135	rec->fsf_cmd = q_head->fsf_command;
136	rec->fsf_seq_no = q_pref->req_seq_no;
137	rec->u.fces.req_issued = req->issued;
138	rec->u.fces.fsf_status = q_head->fsf_status;
139	rec->u.fces.port_handle = q_head->port_handle;
140	rec->u.fces.wwpn = wwpn;
141	rec->u.fces.fc_security_old = fc_security_old;
142	rec->u.fces.fc_security_new = fc_security_new;
143
144	debug_event(dbf->hba, level, rec, sizeof(*rec));
145	spin_unlock_irqrestore(&dbf->hba_lock, flags);
146}
147
148/**
149 * zfcp_dbf_hba_fsf_reqid - trace only the tag and a request ID
150 * @tag: tag documenting the source
151 * @level: trace level
152 * @adapter: adapter instance the request ID belongs to
153 * @req_id: the request ID to trace
154 */
155void zfcp_dbf_hba_fsf_reqid(const char *const tag, const int level,
156			    struct zfcp_adapter *const adapter,
157			    const u64 req_id)
158{
159	struct zfcp_dbf *const dbf = adapter->dbf;
160	struct zfcp_dbf_hba *const rec = &dbf->hba_buf;
161	struct zfcp_dbf_hba_res *const res = &rec->u.res;
162	unsigned long flags;
163
164	if (unlikely(!debug_level_enabled(dbf->hba, level)))
165		return;
166
167	spin_lock_irqsave(&dbf->hba_lock, flags);
168	memset(rec, 0, sizeof(*rec));
169
170	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
171
172	rec->id = ZFCP_DBF_HBA_RES;
173	rec->fsf_req_id = req_id;
174	rec->fsf_req_status = ~0u;
175	rec->fsf_cmd = ~0u;
176	rec->fsf_seq_no = ~0u;
177
178	res->req_issued = ~0ull;
179	res->prot_status = ~0u;
180	memset(res->prot_status_qual, 0xff, sizeof(res->prot_status_qual));
181	res->fsf_status = ~0u;
182	memset(res->fsf_status_qual, 0xff, sizeof(res->fsf_status_qual));
183	res->port_handle = ~0u;
184	res->lun_handle = ~0u;
185
186	debug_event(dbf->hba, level, rec, sizeof(*rec));
187	spin_unlock_irqrestore(&dbf->hba_lock, flags);
188}
189
190/**
191 * zfcp_dbf_hba_fsf_uss - trace event for an unsolicited status buffer
192 * @tag: tag indicating which kind of unsolicited status has been received
193 * @req: request providing the unsolicited status
194 */
195void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req)
196{
197	struct zfcp_dbf *dbf = req->adapter->dbf;
198	struct fsf_status_read_buffer *srb = req->data;
199	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
200	static int const level = 2;
201	unsigned long flags;
202
203	if (unlikely(!debug_level_enabled(dbf->hba, level)))
204		return;
205
206	spin_lock_irqsave(&dbf->hba_lock, flags);
207	memset(rec, 0, sizeof(*rec));
208
209	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
210	rec->id = ZFCP_DBF_HBA_USS;
211	rec->fsf_req_id = req->req_id;
212	rec->fsf_req_status = req->status;
213	rec->fsf_cmd = FSF_QTCB_UNSOLICITED_STATUS;
214
215	if (!srb)
216		goto log;
217
218	rec->u.uss.status_type = srb->status_type;
219	rec->u.uss.status_subtype = srb->status_subtype;
220	rec->u.uss.d_id = ntoh24(srb->d_id);
221	rec->u.uss.lun = srb->fcp_lun;
222	memcpy(&rec->u.uss.queue_designator, &srb->queue_designator,
223	       sizeof(rec->u.uss.queue_designator));
224
225	/* status read buffer payload length */
226	rec->pl_len = (!srb->length) ? 0 : srb->length -
227			offsetof(struct fsf_status_read_buffer, payload);
228
229	if (rec->pl_len)
230		zfcp_dbf_pl_write(dbf, srb->payload.data, rec->pl_len,
231				  "fsf_uss", req->req_id);
232log:
233	debug_event(dbf->hba, level, rec, sizeof(*rec));
234	spin_unlock_irqrestore(&dbf->hba_lock, flags);
235}
236
237/**
238 * zfcp_dbf_hba_bit_err - trace event for bit error conditions
239 * @tag: tag indicating which kind of bit error unsolicited status was received
240 * @req: request which caused the bit_error condition
241 */
242void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
243{
244	struct zfcp_dbf *dbf = req->adapter->dbf;
245	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
246	struct fsf_status_read_buffer *sr_buf = req->data;
247	static int const level = 1;
248	unsigned long flags;
249
250	if (unlikely(!debug_level_enabled(dbf->hba, level)))
251		return;
252
253	spin_lock_irqsave(&dbf->hba_lock, flags);
254	memset(rec, 0, sizeof(*rec));
255
256	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
257	rec->id = ZFCP_DBF_HBA_BIT;
258	rec->fsf_req_id = req->req_id;
259	rec->fsf_req_status = req->status;
260	rec->fsf_cmd = FSF_QTCB_UNSOLICITED_STATUS;
261	memcpy(&rec->u.be, &sr_buf->payload.bit_error,
262	       sizeof(struct fsf_bit_error_payload));
263
264	debug_event(dbf->hba, level, rec, sizeof(*rec));
265	spin_unlock_irqrestore(&dbf->hba_lock, flags);
266}
267
268/**
269 * zfcp_dbf_hba_def_err - trace event for deferred error messages
270 * @adapter: pointer to struct zfcp_adapter
271 * @req_id: request id which caused the deferred error message
272 * @scount: number of sbals incl. the signaling sbal
273 * @pl: array of all involved sbals
274 */
275void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
276			  void **pl)
277{
278	struct zfcp_dbf *dbf = adapter->dbf;
279	struct zfcp_dbf_pay *payload = &dbf->pay_buf;
280	unsigned long flags;
281	static int const level = 1;
282	u16 length;
283
284	if (unlikely(!debug_level_enabled(dbf->pay, level)))
285		return;
286
287	if (!pl)
288		return;
289
290	spin_lock_irqsave(&dbf->pay_lock, flags);
291	memset(payload, 0, sizeof(*payload));
292
293	memcpy(payload->area, "def_err", 7);
294	payload->fsf_req_id = req_id;
295	payload->counter = 0;
296	length = min((u16)sizeof(struct qdio_buffer),
297		     (u16)ZFCP_DBF_PAY_MAX_REC);
298
299	while (payload->counter < scount && (char *)pl[payload->counter]) {
300		memcpy(payload->data, (char *)pl[payload->counter], length);
301		debug_event(dbf->pay, level, payload, zfcp_dbf_plen(length));
302		payload->counter++;
303	}
304
305	spin_unlock_irqrestore(&dbf->pay_lock, flags);
306}
307
308static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
309				struct zfcp_adapter *adapter,
310				struct zfcp_port *port,
311				struct scsi_device *sdev)
312{
313	rec->adapter_status = atomic_read(&adapter->status);
314	if (port) {
315		rec->port_status = atomic_read(&port->status);
316		rec->wwpn = port->wwpn;
317		rec->d_id = port->d_id;
318	}
319	if (sdev) {
320		rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status);
321		rec->lun = zfcp_scsi_dev_lun(sdev);
322	} else
323		rec->lun = ZFCP_DBF_INVALID_LUN;
324}
325
326/**
327 * zfcp_dbf_rec_trig - trace event related to triggered recovery
328 * @tag: identifier for event
329 * @adapter: adapter on which the erp_action should run
330 * @port: remote port involved in the erp_action
331 * @sdev: scsi device involved in the erp_action
332 * @want: wanted erp_action
333 * @need: required erp_action
334 *
335 * The adapter->erp_lock has to be held.
336 */
337void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
338		       struct zfcp_port *port, struct scsi_device *sdev,
339		       u8 want, u8 need)
340{
341	struct zfcp_dbf *dbf = adapter->dbf;
342	struct zfcp_dbf_rec *rec = &dbf->rec_buf;
343	static int const level = 1;
344	struct list_head *entry;
345	unsigned long flags;
346
347	lockdep_assert_held(&adapter->erp_lock);
348
349	if (unlikely(!debug_level_enabled(dbf->rec, level)))
350		return;
351
352	spin_lock_irqsave(&dbf->rec_lock, flags);
353	memset(rec, 0, sizeof(*rec));
354
355	rec->id = ZFCP_DBF_REC_TRIG;
356	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
357	zfcp_dbf_set_common(rec, adapter, port, sdev);
358
359	list_for_each(entry, &adapter->erp_ready_head)
360		rec->u.trig.ready++;
361
362	list_for_each(entry, &adapter->erp_running_head)
363		rec->u.trig.running++;
364
365	rec->u.trig.want = want;
366	rec->u.trig.need = need;
367
368	debug_event(dbf->rec, level, rec, sizeof(*rec));
369	spin_unlock_irqrestore(&dbf->rec_lock, flags);
370}
371
372/**
373 * zfcp_dbf_rec_trig_lock - trace event related to triggered recovery with lock
374 * @tag: identifier for event
375 * @adapter: adapter on which the erp_action should run
376 * @port: remote port involved in the erp_action
377 * @sdev: scsi device involved in the erp_action
378 * @want: wanted erp_action
379 * @need: required erp_action
380 *
381 * The adapter->erp_lock must not be held.
382 */
383void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
384			    struct zfcp_port *port, struct scsi_device *sdev,
385			    u8 want, u8 need)
386{
387	unsigned long flags;
388
389	read_lock_irqsave(&adapter->erp_lock, flags);
390	zfcp_dbf_rec_trig(tag, adapter, port, sdev, want, need);
391	read_unlock_irqrestore(&adapter->erp_lock, flags);
392}
393
394/**
395 * zfcp_dbf_rec_run_lvl - trace event related to running recovery
396 * @level: trace level to be used for event
397 * @tag: identifier for event
398 * @erp: erp_action running
399 */
400void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp)
401{
402	struct zfcp_dbf *dbf = erp->adapter->dbf;
403	struct zfcp_dbf_rec *rec = &dbf->rec_buf;
404	unsigned long flags;
405
406	if (!debug_level_enabled(dbf->rec, level))
407		return;
408
409	spin_lock_irqsave(&dbf->rec_lock, flags);
410	memset(rec, 0, sizeof(*rec));
411
412	rec->id = ZFCP_DBF_REC_RUN;
413	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
414	zfcp_dbf_set_common(rec, erp->adapter, erp->port, erp->sdev);
415
416	rec->u.run.fsf_req_id = erp->fsf_req_id;
417	rec->u.run.rec_status = erp->status;
418	rec->u.run.rec_step = erp->step;
419	rec->u.run.rec_action = erp->type;
420
421	if (erp->sdev)
422		rec->u.run.rec_count =
423			atomic_read(&sdev_to_zfcp(erp->sdev)->erp_counter);
424	else if (erp->port)
425		rec->u.run.rec_count = atomic_read(&erp->port->erp_counter);
426	else
427		rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter);
428
429	debug_event(dbf->rec, level, rec, sizeof(*rec));
430	spin_unlock_irqrestore(&dbf->rec_lock, flags);
431}
432
433/**
434 * zfcp_dbf_rec_run - trace event related to running recovery
435 * @tag: identifier for event
436 * @erp: erp_action running
437 */
438void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
439{
440	zfcp_dbf_rec_run_lvl(1, tag, erp);
441}
442
443/**
444 * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
445 * @tag: identifier for event
446 * @wka_port: well known address port
447 * @req_id: request ID to correlate with potential HBA trace record
448 */
449void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port,
450			  u64 req_id)
451{
452	struct zfcp_dbf *dbf = wka_port->adapter->dbf;
453	struct zfcp_dbf_rec *rec = &dbf->rec_buf;
454	static int const level = 1;
455	unsigned long flags;
456
457	if (unlikely(!debug_level_enabled(dbf->rec, level)))
458		return;
459
460	spin_lock_irqsave(&dbf->rec_lock, flags);
461	memset(rec, 0, sizeof(*rec));
462
463	rec->id = ZFCP_DBF_REC_RUN;
464	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
465	rec->port_status = wka_port->status;
466	rec->d_id = wka_port->d_id;
467	rec->lun = ZFCP_DBF_INVALID_LUN;
468
469	rec->u.run.fsf_req_id = req_id;
470	rec->u.run.rec_status = ~0;
471	rec->u.run.rec_step = ~0;
472	rec->u.run.rec_action = ~0;
473	rec->u.run.rec_count = ~0;
474
475	debug_event(dbf->rec, level, rec, sizeof(*rec));
476	spin_unlock_irqrestore(&dbf->rec_lock, flags);
477}
478
479#define ZFCP_DBF_SAN_LEVEL 1
480
481static inline
482void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
483		  char *paytag, struct scatterlist *sg, u8 id, u16 len,
484		  u64 req_id, u32 d_id, u16 cap_len)
485{
486	struct zfcp_dbf_san *rec = &dbf->san_buf;
487	u16 rec_len;
488	unsigned long flags;
489	struct zfcp_dbf_pay *payload = &dbf->pay_buf;
490	u16 pay_sum = 0;
491
492	spin_lock_irqsave(&dbf->san_lock, flags);
493	memset(rec, 0, sizeof(*rec));
494
495	rec->id = id;
496	rec->fsf_req_id = req_id;
497	rec->d_id = d_id;
498	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
499	rec->pl_len = len; /* full length even if we cap pay below */
500	if (!sg)
501		goto out;
502	rec_len = min_t(unsigned int, sg->length, ZFCP_DBF_SAN_MAX_PAYLOAD);
503	memcpy(rec->payload, sg_virt(sg), rec_len); /* part of 1st sg entry */
504	if (len <= rec_len)
505		goto out; /* skip pay record if full content in rec->payload */
506
507	/* if (len > rec_len):
508	 * dump data up to cap_len ignoring small duplicate in rec->payload
509	 */
510	spin_lock(&dbf->pay_lock);
511	memset(payload, 0, sizeof(*payload));
512	memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN);
513	payload->fsf_req_id = req_id;
514	payload->counter = 0;
515	for (; sg && pay_sum < cap_len; sg = sg_next(sg)) {
516		u16 pay_len, offset = 0;
517
518		while (offset < sg->length && pay_sum < cap_len) {
519			pay_len = min((u16)ZFCP_DBF_PAY_MAX_REC,
520				      (u16)(sg->length - offset));
521			/* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */
522			memcpy(payload->data, sg_virt(sg) + offset, pay_len);
523			debug_event(dbf->pay, ZFCP_DBF_SAN_LEVEL, payload,
524				    zfcp_dbf_plen(pay_len));
525			payload->counter++;
526			offset += pay_len;
527			pay_sum += pay_len;
528		}
529	}
530	spin_unlock(&dbf->pay_lock);
531
532out:
533	debug_event(dbf->san, ZFCP_DBF_SAN_LEVEL, rec, sizeof(*rec));
534	spin_unlock_irqrestore(&dbf->san_lock, flags);
535}
536
537/**
538 * zfcp_dbf_san_req - trace event for issued SAN request
539 * @tag: identifier for event
540 * @fsf: request containing issued CT or ELS data
541 * @d_id: N_Port_ID where SAN request is sent to
542 * d_id: destination ID
543 */
544void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
545{
546	struct zfcp_dbf *dbf = fsf->adapter->dbf;
547	struct zfcp_fsf_ct_els *ct_els = fsf->data;
548	u16 length;
549
550	if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
551		return;
552
553	length = (u16)zfcp_qdio_real_bytes(ct_els->req);
554	zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ,
555		     length, fsf->req_id, d_id, length);
556}
557
558static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
559					      struct zfcp_fsf_req *fsf,
560					      u16 len)
561{
562	struct zfcp_fsf_ct_els *ct_els = fsf->data;
563	struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
564	struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
565	struct scatterlist *resp_entry = ct_els->resp;
566	struct fc_ct_hdr *resph;
567	struct fc_gpn_ft_resp *acc;
568	int max_entries, x, last = 0;
569
570	if (!(memcmp(tag, "fsscth2", 7) == 0
571	      && ct_els->d_id == FC_FID_DIR_SERV
572	      && reqh->ct_rev == FC_CT_REV
573	      && reqh->ct_in_id[0] == 0
574	      && reqh->ct_in_id[1] == 0
575	      && reqh->ct_in_id[2] == 0
576	      && reqh->ct_fs_type == FC_FST_DIR
577	      && reqh->ct_fs_subtype == FC_NS_SUBTYPE
578	      && reqh->ct_options == 0
579	      && reqh->_ct_resvd1 == 0
580	      && reqh->ct_cmd == cpu_to_be16(FC_NS_GPN_FT)
581	      /* reqh->ct_mr_size can vary so do not match but read below */
582	      && reqh->_ct_resvd2 == 0
583	      && reqh->ct_reason == 0
584	      && reqh->ct_explan == 0
585	      && reqh->ct_vendor == 0
586	      && reqn->fn_resvd == 0
587	      && reqn->fn_domain_id_scope == 0
588	      && reqn->fn_area_id_scope == 0
589	      && reqn->fn_fc4_type == FC_TYPE_FCP))
590		return len; /* not GPN_FT response so do not cap */
591
592	acc = sg_virt(resp_entry);
593
594	/* cap all but accept CT responses to at least the CT header */
595	resph = (struct fc_ct_hdr *)acc;
596	if ((ct_els->status) ||
597	    (resph->ct_cmd != cpu_to_be16(FC_FS_ACC)))
598		return max(FC_CT_HDR_LEN, ZFCP_DBF_SAN_MAX_PAYLOAD);
599
600	max_entries = (be16_to_cpu(reqh->ct_mr_size) * 4 /
601		       sizeof(struct fc_gpn_ft_resp))
602		+ 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
603		     * to account for header as 1st pseudo "entry" */;
604
605	/* the basic CT_IU preamble is the same size as one entry in the GPN_FT
606	 * response, allowing us to skip special handling for it - just skip it
607	 */
608	for (x = 1; x < max_entries && !last; x++) {
609		if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
610			acc++;
611		else
612			acc = sg_virt(++resp_entry);
613
614		last = acc->fp_flags & FC_NS_FID_LAST;
615	}
616	len = min(len, (u16)(x * sizeof(struct fc_gpn_ft_resp)));
617	return len; /* cap after last entry */
618}
619
620/**
621 * zfcp_dbf_san_res - trace event for received SAN request
622 * @tag: identifier for event
623 * @fsf: request containing received CT or ELS data
624 */
625void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
626{
627	struct zfcp_dbf *dbf = fsf->adapter->dbf;
628	struct zfcp_fsf_ct_els *ct_els = fsf->data;
629	u16 length;
630
631	if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
632		return;
633
634	length = (u16)zfcp_qdio_real_bytes(ct_els->resp);
635	zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES,
636		     length, fsf->req_id, ct_els->d_id,
637		     zfcp_dbf_san_res_cap_len_if_gpn_ft(tag, fsf, length));
638}
639
640/**
641 * zfcp_dbf_san_in_els - trace event for incoming ELS
642 * @tag: identifier for event
643 * @fsf: request containing received ELS data
644 */
645void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
646{
647	struct zfcp_dbf *dbf = fsf->adapter->dbf;
648	struct fsf_status_read_buffer *srb =
649		(struct fsf_status_read_buffer *) fsf->data;
650	u16 length;
651	struct scatterlist sg;
652
653	if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
654		return;
655
656	length = (u16)(srb->length -
657			offsetof(struct fsf_status_read_buffer, payload));
658	sg_init_one(&sg, srb->payload.data, length);
659	zfcp_dbf_san(tag, dbf, "san_els", &sg, ZFCP_DBF_SAN_ELS, length,
660		     fsf->req_id, ntoh24(srb->d_id), length);
661}
662
663/**
664 * zfcp_dbf_scsi_common() - Common trace event helper for scsi.
665 * @tag: Identifier for event.
666 * @level: trace level of event.
667 * @sdev: Pointer to SCSI device as context for this event.
668 * @sc: Pointer to SCSI command, or NULL with task management function (TMF).
669 * @fsf: Pointer to FSF request, or NULL.
670 */
671void zfcp_dbf_scsi_common(char *tag, int level, struct scsi_device *sdev,
672			  struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
673{
674	struct zfcp_adapter *adapter =
675		(struct zfcp_adapter *) sdev->host->hostdata[0];
676	struct zfcp_dbf *dbf = adapter->dbf;
677	struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
678	struct fcp_resp_with_ext *fcp_rsp;
679	struct fcp_resp_rsp_info *fcp_rsp_info;
680	unsigned long flags;
681
682	spin_lock_irqsave(&dbf->scsi_lock, flags);
683	memset(rec, 0, sizeof(*rec));
684
685	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
686	rec->id = ZFCP_DBF_SCSI_CMND;
687	if (sc) {
688		rec->scsi_result = sc->result;
689		rec->scsi_retries = sc->retries;
690		rec->scsi_allowed = sc->allowed;
691		rec->scsi_id = sc->device->id;
692		rec->scsi_lun = (u32)sc->device->lun;
693		rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32);
694		rec->host_scribble = (u64)sc->host_scribble;
695
696		memcpy(rec->scsi_opcode, sc->cmnd,
697		       min_t(int, sc->cmd_len, ZFCP_DBF_SCSI_OPCODE));
698	} else {
699		rec->scsi_result = ~0;
700		rec->scsi_retries = ~0;
701		rec->scsi_allowed = ~0;
702		rec->scsi_id = sdev->id;
703		rec->scsi_lun = (u32)sdev->lun;
704		rec->scsi_lun_64_hi = (u32)(sdev->lun >> 32);
705		rec->host_scribble = ~0;
706
707		memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE);
708	}
709
710	if (fsf) {
711		rec->fsf_req_id = fsf->req_id;
712		rec->pl_len = FCP_RESP_WITH_EXT;
713		fcp_rsp = &(fsf->qtcb->bottom.io.fcp_rsp.iu);
714		/* mandatory parts of FCP_RSP IU in this SCSI record */
715		memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
716		if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
717			fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
718			rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
719			rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_rsp_len);
720		}
721		if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
722			rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_sns_len);
723		}
724		/* complete FCP_RSP IU in associated PAYload record
725		 * but only if there are optional parts
726		 */
727		if (fcp_rsp->resp.fr_flags != 0)
728			zfcp_dbf_pl_write(
729				dbf, fcp_rsp,
730				/* at least one full PAY record
731				 * but not beyond hardware response field
732				 */
733				min_t(u16, max_t(u16, rec->pl_len,
734						 ZFCP_DBF_PAY_MAX_REC),
735				      FSF_FCP_RSP_SIZE),
736				"fcp_riu", fsf->req_id);
737	}
738
739	debug_event(dbf->scsi, level, rec, sizeof(*rec));
740	spin_unlock_irqrestore(&dbf->scsi_lock, flags);
741}
742
743/**
744 * zfcp_dbf_scsi_eh() - Trace event for special cases of scsi_eh callbacks.
745 * @tag: Identifier for event.
746 * @adapter: Pointer to zfcp adapter as context for this event.
747 * @scsi_id: SCSI ID/target to indicate scope of task management function (TMF).
748 * @ret: Return value of calling function.
749 *
750 * This SCSI trace variant does not depend on any of:
751 * scsi_cmnd, zfcp_fsf_req, scsi_device.
752 */
753void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
754		      unsigned int scsi_id, int ret)
755{
756	struct zfcp_dbf *dbf = adapter->dbf;
757	struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
758	unsigned long flags;
759	static int const level = 1;
760
761	if (unlikely(!debug_level_enabled(adapter->dbf->scsi, level)))
762		return;
763
764	spin_lock_irqsave(&dbf->scsi_lock, flags);
765	memset(rec, 0, sizeof(*rec));
766
767	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
768	rec->id = ZFCP_DBF_SCSI_CMND;
769	rec->scsi_result = ret; /* re-use field, int is 4 bytes and fits */
770	rec->scsi_retries = ~0;
771	rec->scsi_allowed = ~0;
772	rec->fcp_rsp_info = ~0;
773	rec->scsi_id = scsi_id;
774	rec->scsi_lun = (u32)ZFCP_DBF_INVALID_LUN;
775	rec->scsi_lun_64_hi = (u32)(ZFCP_DBF_INVALID_LUN >> 32);
776	rec->host_scribble = ~0;
777	memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE);
778
779	debug_event(dbf->scsi, level, rec, sizeof(*rec));
780	spin_unlock_irqrestore(&dbf->scsi_lock, flags);
781}
782
783static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size)
784{
785	struct debug_info *d;
786
787	d = debug_register(name, size, 1, rec_size);
788	if (!d)
789		return NULL;
790
791	debug_register_view(d, &debug_hex_ascii_view);
792	debug_set_level(d, dbflevel);
793
794	return d;
795}
796
797static void zfcp_dbf_unregister(struct zfcp_dbf *dbf)
798{
799	if (!dbf)
800		return;
801
802	debug_unregister(dbf->scsi);
803	debug_unregister(dbf->san);
804	debug_unregister(dbf->hba);
805	debug_unregister(dbf->pay);
806	debug_unregister(dbf->rec);
807	kfree(dbf);
808}
809
810/**
811 * zfcp_dbf_adapter_register - registers debug feature for an adapter
812 * @adapter: pointer to adapter for which debug features should be registered
813 * return: -ENOMEM on error, 0 otherwise
814 */
815int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter)
816{
817	char name[DEBUG_MAX_NAME_LEN];
818	struct zfcp_dbf *dbf;
819
820	dbf = kzalloc(sizeof(struct zfcp_dbf), GFP_KERNEL);
821	if (!dbf)
822		return -ENOMEM;
823
824	spin_lock_init(&dbf->pay_lock);
825	spin_lock_init(&dbf->hba_lock);
826	spin_lock_init(&dbf->san_lock);
827	spin_lock_init(&dbf->scsi_lock);
828	spin_lock_init(&dbf->rec_lock);
829
830	/* debug feature area which records recovery activity */
831	sprintf(name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev));
832	dbf->rec = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_rec));
833	if (!dbf->rec)
834		goto err_out;
835
836	/* debug feature area which records HBA (FSF and QDIO) conditions */
837	sprintf(name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev));
838	dbf->hba = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_hba));
839	if (!dbf->hba)
840		goto err_out;
841
842	/* debug feature area which records payload info */
843	sprintf(name, "zfcp_%s_pay", dev_name(&adapter->ccw_device->dev));
844	dbf->pay = zfcp_dbf_reg(name, dbfsize * 2, sizeof(struct zfcp_dbf_pay));
845	if (!dbf->pay)
846		goto err_out;
847
848	/* debug feature area which records SAN command failures and recovery */
849	sprintf(name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev));
850	dbf->san = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_san));
851	if (!dbf->san)
852		goto err_out;
853
854	/* debug feature area which records SCSI command failures and recovery */
855	sprintf(name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev));
856	dbf->scsi = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_scsi));
857	if (!dbf->scsi)
858		goto err_out;
859
860	adapter->dbf = dbf;
861
862	return 0;
863err_out:
864	zfcp_dbf_unregister(dbf);
865	return -ENOMEM;
866}
867
868/**
869 * zfcp_dbf_adapter_unregister - unregisters debug feature for an adapter
870 * @adapter: pointer to adapter for which debug features should be unregistered
871 */
872void zfcp_dbf_adapter_unregister(struct zfcp_adapter *adapter)
873{
874	struct zfcp_dbf *dbf = adapter->dbf;
875
876	adapter->dbf = NULL;
877	zfcp_dbf_unregister(dbf);
878}
879
880