1/* This file is part of the Emulex RoCE Device Driver for
2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * EMULEX and SLI are trademarks of Emulex.
5 * www.emulex.com
6 *
7 * This software is available to you under a choice of one of two licenses.
8 * You may choose to be licensed under the terms of the GNU General Public
9 * License (GPL) Version 2, available from the file COPYING in the main
10 * directory of this source tree, or the BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * - Redistributions of source code must retain the above copyright notice,
17 *   this list of conditions and the following disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 *   notice, this list of conditions and the following disclaimer in
21 *   the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 *
35 * Contact Information:
36 * linux-drivers@emulex.com
37 *
38 * Emulex
39 * 3333 Susan Street
40 * Costa Mesa, CA 92626
41 */
42
43#include <rdma/ib_addr.h>
44#include <rdma/ib_pma.h>
45#include "ocrdma_stats.h"
46
47static struct dentry *ocrdma_dbgfs_dir;
48
49static int ocrdma_add_stat(char *start, char *pcur,
50				char *name, u64 count)
51{
52	char buff[128] = {0};
53	int cpy_len = 0;
54
55	snprintf(buff, 128, "%s: %llu\n", name, count);
56	cpy_len = strlen(buff);
57
58	if (pcur + cpy_len > start + OCRDMA_MAX_DBGFS_MEM) {
59		pr_err("%s: No space in stats buff\n", __func__);
60		return 0;
61	}
62
63	memcpy(pcur, buff, cpy_len);
64	return cpy_len;
65}
66
67bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev)
68{
69	struct stats_mem *mem = &dev->stats_mem;
70
71	mutex_init(&dev->stats_lock);
72	/* Alloc mbox command mem*/
73	mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
74			sizeof(struct ocrdma_rdma_stats_resp));
75
76	mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size,
77				     &mem->pa, GFP_KERNEL);
78	if (!mem->va) {
79		pr_err("%s: stats mbox allocation failed\n", __func__);
80		return false;
81	}
82
83	/* Alloc debugfs mem */
84	mem->debugfs_mem = kzalloc(OCRDMA_MAX_DBGFS_MEM, GFP_KERNEL);
85	if (!mem->debugfs_mem)
86		return false;
87
88	return true;
89}
90
91void ocrdma_release_stats_resources(struct ocrdma_dev *dev)
92{
93	struct stats_mem *mem = &dev->stats_mem;
94
95	if (mem->va)
96		dma_free_coherent(&dev->nic_info.pdev->dev, mem->size,
97				  mem->va, mem->pa);
98	mem->va = NULL;
99	kfree(mem->debugfs_mem);
100}
101
102static char *ocrdma_resource_stats(struct ocrdma_dev *dev)
103{
104	char *stats = dev->stats_mem.debugfs_mem, *pcur;
105	struct ocrdma_rdma_stats_resp *rdma_stats =
106			(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
107	struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats;
108
109	memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
110
111	pcur = stats;
112	pcur += ocrdma_add_stat(stats, pcur, "active_dpp_pds",
113				(u64)rsrc_stats->dpp_pds);
114	pcur += ocrdma_add_stat(stats, pcur, "active_non_dpp_pds",
115				(u64)rsrc_stats->non_dpp_pds);
116	pcur += ocrdma_add_stat(stats, pcur, "active_rc_dpp_qps",
117				(u64)rsrc_stats->rc_dpp_qps);
118	pcur += ocrdma_add_stat(stats, pcur, "active_uc_dpp_qps",
119				(u64)rsrc_stats->uc_dpp_qps);
120	pcur += ocrdma_add_stat(stats, pcur, "active_ud_dpp_qps",
121				(u64)rsrc_stats->ud_dpp_qps);
122	pcur += ocrdma_add_stat(stats, pcur, "active_rc_non_dpp_qps",
123				(u64)rsrc_stats->rc_non_dpp_qps);
124	pcur += ocrdma_add_stat(stats, pcur, "active_uc_non_dpp_qps",
125				(u64)rsrc_stats->uc_non_dpp_qps);
126	pcur += ocrdma_add_stat(stats, pcur, "active_ud_non_dpp_qps",
127				(u64)rsrc_stats->ud_non_dpp_qps);
128	pcur += ocrdma_add_stat(stats, pcur, "active_srqs",
129				(u64)rsrc_stats->srqs);
130	pcur += ocrdma_add_stat(stats, pcur, "active_rbqs",
131				(u64)rsrc_stats->rbqs);
132	pcur += ocrdma_add_stat(stats, pcur, "active_64K_nsmr",
133				(u64)rsrc_stats->r64K_nsmr);
134	pcur += ocrdma_add_stat(stats, pcur, "active_64K_to_2M_nsmr",
135				(u64)rsrc_stats->r64K_to_2M_nsmr);
136	pcur += ocrdma_add_stat(stats, pcur, "active_2M_to_44M_nsmr",
137				(u64)rsrc_stats->r2M_to_44M_nsmr);
138	pcur += ocrdma_add_stat(stats, pcur, "active_44M_to_1G_nsmr",
139				(u64)rsrc_stats->r44M_to_1G_nsmr);
140	pcur += ocrdma_add_stat(stats, pcur, "active_1G_to_4G_nsmr",
141				(u64)rsrc_stats->r1G_to_4G_nsmr);
142	pcur += ocrdma_add_stat(stats, pcur, "active_nsmr_count_4G_to_32G",
143				(u64)rsrc_stats->nsmr_count_4G_to_32G);
144	pcur += ocrdma_add_stat(stats, pcur, "active_32G_to_64G_nsmr",
145				(u64)rsrc_stats->r32G_to_64G_nsmr);
146	pcur += ocrdma_add_stat(stats, pcur, "active_64G_to_128G_nsmr",
147				(u64)rsrc_stats->r64G_to_128G_nsmr);
148	pcur += ocrdma_add_stat(stats, pcur, "active_128G_to_higher_nsmr",
149				(u64)rsrc_stats->r128G_to_higher_nsmr);
150	pcur += ocrdma_add_stat(stats, pcur, "active_embedded_nsmr",
151				(u64)rsrc_stats->embedded_nsmr);
152	pcur += ocrdma_add_stat(stats, pcur, "active_frmr",
153				(u64)rsrc_stats->frmr);
154	pcur += ocrdma_add_stat(stats, pcur, "active_prefetch_qps",
155				(u64)rsrc_stats->prefetch_qps);
156	pcur += ocrdma_add_stat(stats, pcur, "active_ondemand_qps",
157				(u64)rsrc_stats->ondemand_qps);
158	pcur += ocrdma_add_stat(stats, pcur, "active_phy_mr",
159				(u64)rsrc_stats->phy_mr);
160	pcur += ocrdma_add_stat(stats, pcur, "active_mw",
161				(u64)rsrc_stats->mw);
162
163	/* Print the threshold stats */
164	rsrc_stats = &rdma_stats->th_rsrc_stats;
165
166	pcur += ocrdma_add_stat(stats, pcur, "threshold_dpp_pds",
167				(u64)rsrc_stats->dpp_pds);
168	pcur += ocrdma_add_stat(stats, pcur, "threshold_non_dpp_pds",
169				(u64)rsrc_stats->non_dpp_pds);
170	pcur += ocrdma_add_stat(stats, pcur, "threshold_rc_dpp_qps",
171				(u64)rsrc_stats->rc_dpp_qps);
172	pcur += ocrdma_add_stat(stats, pcur, "threshold_uc_dpp_qps",
173				(u64)rsrc_stats->uc_dpp_qps);
174	pcur += ocrdma_add_stat(stats, pcur, "threshold_ud_dpp_qps",
175				(u64)rsrc_stats->ud_dpp_qps);
176	pcur += ocrdma_add_stat(stats, pcur, "threshold_rc_non_dpp_qps",
177				(u64)rsrc_stats->rc_non_dpp_qps);
178	pcur += ocrdma_add_stat(stats, pcur, "threshold_uc_non_dpp_qps",
179				(u64)rsrc_stats->uc_non_dpp_qps);
180	pcur += ocrdma_add_stat(stats, pcur, "threshold_ud_non_dpp_qps",
181				(u64)rsrc_stats->ud_non_dpp_qps);
182	pcur += ocrdma_add_stat(stats, pcur, "threshold_srqs",
183				(u64)rsrc_stats->srqs);
184	pcur += ocrdma_add_stat(stats, pcur, "threshold_rbqs",
185				(u64)rsrc_stats->rbqs);
186	pcur += ocrdma_add_stat(stats, pcur, "threshold_64K_nsmr",
187				(u64)rsrc_stats->r64K_nsmr);
188	pcur += ocrdma_add_stat(stats, pcur, "threshold_64K_to_2M_nsmr",
189				(u64)rsrc_stats->r64K_to_2M_nsmr);
190	pcur += ocrdma_add_stat(stats, pcur, "threshold_2M_to_44M_nsmr",
191				(u64)rsrc_stats->r2M_to_44M_nsmr);
192	pcur += ocrdma_add_stat(stats, pcur, "threshold_44M_to_1G_nsmr",
193				(u64)rsrc_stats->r44M_to_1G_nsmr);
194	pcur += ocrdma_add_stat(stats, pcur, "threshold_1G_to_4G_nsmr",
195				(u64)rsrc_stats->r1G_to_4G_nsmr);
196	pcur += ocrdma_add_stat(stats, pcur, "threshold_nsmr_count_4G_to_32G",
197				(u64)rsrc_stats->nsmr_count_4G_to_32G);
198	pcur += ocrdma_add_stat(stats, pcur, "threshold_32G_to_64G_nsmr",
199				(u64)rsrc_stats->r32G_to_64G_nsmr);
200	pcur += ocrdma_add_stat(stats, pcur, "threshold_64G_to_128G_nsmr",
201				(u64)rsrc_stats->r64G_to_128G_nsmr);
202	pcur += ocrdma_add_stat(stats, pcur, "threshold_128G_to_higher_nsmr",
203				(u64)rsrc_stats->r128G_to_higher_nsmr);
204	pcur += ocrdma_add_stat(stats, pcur, "threshold_embedded_nsmr",
205				(u64)rsrc_stats->embedded_nsmr);
206	pcur += ocrdma_add_stat(stats, pcur, "threshold_frmr",
207				(u64)rsrc_stats->frmr);
208	pcur += ocrdma_add_stat(stats, pcur, "threshold_prefetch_qps",
209				(u64)rsrc_stats->prefetch_qps);
210	pcur += ocrdma_add_stat(stats, pcur, "threshold_ondemand_qps",
211				(u64)rsrc_stats->ondemand_qps);
212	pcur += ocrdma_add_stat(stats, pcur, "threshold_phy_mr",
213				(u64)rsrc_stats->phy_mr);
214	pcur += ocrdma_add_stat(stats, pcur, "threshold_mw",
215				(u64)rsrc_stats->mw);
216	return stats;
217}
218
219static char *ocrdma_rx_stats(struct ocrdma_dev *dev)
220{
221	char *stats = dev->stats_mem.debugfs_mem, *pcur;
222	struct ocrdma_rdma_stats_resp *rdma_stats =
223		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
224	struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
225
226	memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
227
228	pcur = stats;
229	pcur += ocrdma_add_stat
230		(stats, pcur, "roce_frame_bytes",
231		 convert_to_64bit(rx_stats->roce_frame_bytes_lo,
232		 rx_stats->roce_frame_bytes_hi));
233	pcur += ocrdma_add_stat(stats, pcur, "roce_frame_icrc_drops",
234				(u64)rx_stats->roce_frame_icrc_drops);
235	pcur += ocrdma_add_stat(stats, pcur, "roce_frame_payload_len_drops",
236				(u64)rx_stats->roce_frame_payload_len_drops);
237	pcur += ocrdma_add_stat(stats, pcur, "ud_drops",
238				(u64)rx_stats->ud_drops);
239	pcur += ocrdma_add_stat(stats, pcur, "qp1_drops",
240				(u64)rx_stats->qp1_drops);
241	pcur += ocrdma_add_stat(stats, pcur, "psn_error_request_packets",
242				(u64)rx_stats->psn_error_request_packets);
243	pcur += ocrdma_add_stat(stats, pcur, "psn_error_resp_packets",
244				(u64)rx_stats->psn_error_resp_packets);
245	pcur += ocrdma_add_stat(stats, pcur, "rnr_nak_timeouts",
246				(u64)rx_stats->rnr_nak_timeouts);
247	pcur += ocrdma_add_stat(stats, pcur, "rnr_nak_receives",
248				(u64)rx_stats->rnr_nak_receives);
249	pcur += ocrdma_add_stat(stats, pcur, "roce_frame_rxmt_drops",
250				(u64)rx_stats->roce_frame_rxmt_drops);
251	pcur += ocrdma_add_stat(stats, pcur, "nak_count_psn_sequence_errors",
252				(u64)rx_stats->nak_count_psn_sequence_errors);
253	pcur += ocrdma_add_stat(stats, pcur, "rc_drop_count_lookup_errors",
254				(u64)rx_stats->rc_drop_count_lookup_errors);
255	pcur += ocrdma_add_stat(stats, pcur, "rq_rnr_naks",
256				(u64)rx_stats->rq_rnr_naks);
257	pcur += ocrdma_add_stat(stats, pcur, "srq_rnr_naks",
258				(u64)rx_stats->srq_rnr_naks);
259	pcur += ocrdma_add_stat(stats, pcur, "roce_frames",
260				convert_to_64bit(rx_stats->roce_frames_lo,
261						 rx_stats->roce_frames_hi));
262
263	return stats;
264}
265
266static u64 ocrdma_sysfs_rcv_pkts(struct ocrdma_dev *dev)
267{
268	struct ocrdma_rdma_stats_resp *rdma_stats =
269		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
270	struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
271
272	return convert_to_64bit(rx_stats->roce_frames_lo,
273		rx_stats->roce_frames_hi) + (u64)rx_stats->roce_frame_icrc_drops
274		+ (u64)rx_stats->roce_frame_payload_len_drops;
275}
276
277static u64 ocrdma_sysfs_rcv_data(struct ocrdma_dev *dev)
278{
279	struct ocrdma_rdma_stats_resp *rdma_stats =
280		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
281	struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
282
283	return (convert_to_64bit(rx_stats->roce_frame_bytes_lo,
284		rx_stats->roce_frame_bytes_hi))/4;
285}
286
287static char *ocrdma_tx_stats(struct ocrdma_dev *dev)
288{
289	char *stats = dev->stats_mem.debugfs_mem, *pcur;
290	struct ocrdma_rdma_stats_resp *rdma_stats =
291		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
292	struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
293
294	memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
295
296	pcur = stats;
297	pcur += ocrdma_add_stat(stats, pcur, "send_pkts",
298				convert_to_64bit(tx_stats->send_pkts_lo,
299						 tx_stats->send_pkts_hi));
300	pcur += ocrdma_add_stat(stats, pcur, "write_pkts",
301				convert_to_64bit(tx_stats->write_pkts_lo,
302						 tx_stats->write_pkts_hi));
303	pcur += ocrdma_add_stat(stats, pcur, "read_pkts",
304				convert_to_64bit(tx_stats->read_pkts_lo,
305						 tx_stats->read_pkts_hi));
306	pcur += ocrdma_add_stat(stats, pcur, "read_rsp_pkts",
307				convert_to_64bit(tx_stats->read_rsp_pkts_lo,
308						 tx_stats->read_rsp_pkts_hi));
309	pcur += ocrdma_add_stat(stats, pcur, "ack_pkts",
310				convert_to_64bit(tx_stats->ack_pkts_lo,
311						 tx_stats->ack_pkts_hi));
312	pcur += ocrdma_add_stat(stats, pcur, "send_bytes",
313				convert_to_64bit(tx_stats->send_bytes_lo,
314						 tx_stats->send_bytes_hi));
315	pcur += ocrdma_add_stat(stats, pcur, "write_bytes",
316				convert_to_64bit(tx_stats->write_bytes_lo,
317						 tx_stats->write_bytes_hi));
318	pcur += ocrdma_add_stat(stats, pcur, "read_req_bytes",
319				convert_to_64bit(tx_stats->read_req_bytes_lo,
320						 tx_stats->read_req_bytes_hi));
321	pcur += ocrdma_add_stat(stats, pcur, "read_rsp_bytes",
322				convert_to_64bit(tx_stats->read_rsp_bytes_lo,
323						 tx_stats->read_rsp_bytes_hi));
324	pcur += ocrdma_add_stat(stats, pcur, "ack_timeouts",
325				(u64)tx_stats->ack_timeouts);
326
327	return stats;
328}
329
330static u64 ocrdma_sysfs_xmit_pkts(struct ocrdma_dev *dev)
331{
332	struct ocrdma_rdma_stats_resp *rdma_stats =
333		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
334	struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
335
336	return (convert_to_64bit(tx_stats->send_pkts_lo,
337				 tx_stats->send_pkts_hi) +
338	convert_to_64bit(tx_stats->write_pkts_lo, tx_stats->write_pkts_hi) +
339	convert_to_64bit(tx_stats->read_pkts_lo, tx_stats->read_pkts_hi) +
340	convert_to_64bit(tx_stats->read_rsp_pkts_lo,
341			 tx_stats->read_rsp_pkts_hi) +
342	convert_to_64bit(tx_stats->ack_pkts_lo, tx_stats->ack_pkts_hi));
343}
344
345static u64 ocrdma_sysfs_xmit_data(struct ocrdma_dev *dev)
346{
347	struct ocrdma_rdma_stats_resp *rdma_stats =
348		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
349	struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
350
351	return (convert_to_64bit(tx_stats->send_bytes_lo,
352				 tx_stats->send_bytes_hi) +
353		convert_to_64bit(tx_stats->write_bytes_lo,
354				 tx_stats->write_bytes_hi) +
355		convert_to_64bit(tx_stats->read_req_bytes_lo,
356				 tx_stats->read_req_bytes_hi) +
357		convert_to_64bit(tx_stats->read_rsp_bytes_lo,
358				 tx_stats->read_rsp_bytes_hi))/4;
359}
360
361static char *ocrdma_wqe_stats(struct ocrdma_dev *dev)
362{
363	char *stats = dev->stats_mem.debugfs_mem, *pcur;
364	struct ocrdma_rdma_stats_resp *rdma_stats =
365		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
366	struct ocrdma_wqe_stats	*wqe_stats = &rdma_stats->wqe_stats;
367
368	memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
369
370	pcur = stats;
371	pcur += ocrdma_add_stat(stats, pcur, "large_send_rc_wqes",
372		convert_to_64bit(wqe_stats->large_send_rc_wqes_lo,
373				 wqe_stats->large_send_rc_wqes_hi));
374	pcur += ocrdma_add_stat(stats, pcur, "large_write_rc_wqes",
375		convert_to_64bit(wqe_stats->large_write_rc_wqes_lo,
376				 wqe_stats->large_write_rc_wqes_hi));
377	pcur += ocrdma_add_stat(stats, pcur, "read_wqes",
378				convert_to_64bit(wqe_stats->read_wqes_lo,
379						 wqe_stats->read_wqes_hi));
380	pcur += ocrdma_add_stat(stats, pcur, "frmr_wqes",
381				convert_to_64bit(wqe_stats->frmr_wqes_lo,
382						 wqe_stats->frmr_wqes_hi));
383	pcur += ocrdma_add_stat(stats, pcur, "mw_bind_wqes",
384				convert_to_64bit(wqe_stats->mw_bind_wqes_lo,
385						 wqe_stats->mw_bind_wqes_hi));
386	pcur += ocrdma_add_stat(stats, pcur, "invalidate_wqes",
387		convert_to_64bit(wqe_stats->invalidate_wqes_lo,
388				 wqe_stats->invalidate_wqes_hi));
389	pcur += ocrdma_add_stat(stats, pcur, "dpp_wqe_drops",
390				(u64)wqe_stats->dpp_wqe_drops);
391	return stats;
392}
393
394static char *ocrdma_db_errstats(struct ocrdma_dev *dev)
395{
396	char *stats = dev->stats_mem.debugfs_mem, *pcur;
397	struct ocrdma_rdma_stats_resp *rdma_stats =
398		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
399	struct ocrdma_db_err_stats *db_err_stats = &rdma_stats->db_err_stats;
400
401	memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
402
403	pcur = stats;
404	pcur += ocrdma_add_stat(stats, pcur, "sq_doorbell_errors",
405				(u64)db_err_stats->sq_doorbell_errors);
406	pcur += ocrdma_add_stat(stats, pcur, "cq_doorbell_errors",
407				(u64)db_err_stats->cq_doorbell_errors);
408	pcur += ocrdma_add_stat(stats, pcur, "rq_srq_doorbell_errors",
409				(u64)db_err_stats->rq_srq_doorbell_errors);
410	pcur += ocrdma_add_stat(stats, pcur, "cq_overflow_errors",
411				(u64)db_err_stats->cq_overflow_errors);
412	return stats;
413}
414
415static char *ocrdma_rxqp_errstats(struct ocrdma_dev *dev)
416{
417	char *stats = dev->stats_mem.debugfs_mem, *pcur;
418	struct ocrdma_rdma_stats_resp *rdma_stats =
419		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
420	struct ocrdma_rx_qp_err_stats *rx_qp_err_stats =
421		 &rdma_stats->rx_qp_err_stats;
422
423	memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
424
425	pcur = stats;
426	pcur += ocrdma_add_stat(stats, pcur, "nak_invalid_request_errors",
427			(u64)rx_qp_err_stats->nak_invalid_request_errors);
428	pcur += ocrdma_add_stat(stats, pcur, "nak_remote_operation_errors",
429			(u64)rx_qp_err_stats->nak_remote_operation_errors);
430	pcur += ocrdma_add_stat(stats, pcur, "nak_count_remote_access_errors",
431			(u64)rx_qp_err_stats->nak_count_remote_access_errors);
432	pcur += ocrdma_add_stat(stats, pcur, "local_length_errors",
433			(u64)rx_qp_err_stats->local_length_errors);
434	pcur += ocrdma_add_stat(stats, pcur, "local_protection_errors",
435			(u64)rx_qp_err_stats->local_protection_errors);
436	pcur += ocrdma_add_stat(stats, pcur, "local_qp_operation_errors",
437			(u64)rx_qp_err_stats->local_qp_operation_errors);
438	return stats;
439}
440
441static char *ocrdma_txqp_errstats(struct ocrdma_dev *dev)
442{
443	char *stats = dev->stats_mem.debugfs_mem, *pcur;
444	struct ocrdma_rdma_stats_resp *rdma_stats =
445		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
446	struct ocrdma_tx_qp_err_stats *tx_qp_err_stats =
447		&rdma_stats->tx_qp_err_stats;
448
449	memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
450
451	pcur = stats;
452	pcur += ocrdma_add_stat(stats, pcur, "local_length_errors",
453			(u64)tx_qp_err_stats->local_length_errors);
454	pcur += ocrdma_add_stat(stats, pcur, "local_protection_errors",
455			(u64)tx_qp_err_stats->local_protection_errors);
456	pcur += ocrdma_add_stat(stats, pcur, "local_qp_operation_errors",
457			(u64)tx_qp_err_stats->local_qp_operation_errors);
458	pcur += ocrdma_add_stat(stats, pcur, "retry_count_exceeded_errors",
459			(u64)tx_qp_err_stats->retry_count_exceeded_errors);
460	pcur += ocrdma_add_stat(stats, pcur, "rnr_retry_count_exceeded_errors",
461			(u64)tx_qp_err_stats->rnr_retry_count_exceeded_errors);
462	return stats;
463}
464
465static char *ocrdma_tx_dbg_stats(struct ocrdma_dev *dev)
466{
467	int i;
468	char *pstats = dev->stats_mem.debugfs_mem;
469	struct ocrdma_rdma_stats_resp *rdma_stats =
470		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
471	struct ocrdma_tx_dbg_stats *tx_dbg_stats =
472		&rdma_stats->tx_dbg_stats;
473
474	memset(pstats, 0, (OCRDMA_MAX_DBGFS_MEM));
475
476	for (i = 0; i < 100; i++)
477		pstats += snprintf(pstats, 80, "DW[%d] = 0x%x\n", i,
478				 tx_dbg_stats->data[i]);
479
480	return dev->stats_mem.debugfs_mem;
481}
482
483static char *ocrdma_rx_dbg_stats(struct ocrdma_dev *dev)
484{
485	int i;
486	char *pstats = dev->stats_mem.debugfs_mem;
487	struct ocrdma_rdma_stats_resp *rdma_stats =
488		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
489	struct ocrdma_rx_dbg_stats *rx_dbg_stats =
490		&rdma_stats->rx_dbg_stats;
491
492	memset(pstats, 0, (OCRDMA_MAX_DBGFS_MEM));
493
494	for (i = 0; i < 200; i++)
495		pstats += snprintf(pstats, 80, "DW[%d] = 0x%x\n", i,
496				 rx_dbg_stats->data[i]);
497
498	return dev->stats_mem.debugfs_mem;
499}
500
501static char *ocrdma_driver_dbg_stats(struct ocrdma_dev *dev)
502{
503	char *stats = dev->stats_mem.debugfs_mem, *pcur;
504
505
506	memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
507
508	pcur = stats;
509	pcur += ocrdma_add_stat(stats, pcur, "async_cq_err",
510				(u64)(dev->async_err_stats
511				[OCRDMA_CQ_ERROR].counter));
512	pcur += ocrdma_add_stat(stats, pcur, "async_cq_overrun_err",
513				(u64)dev->async_err_stats
514				[OCRDMA_CQ_OVERRUN_ERROR].counter);
515	pcur += ocrdma_add_stat(stats, pcur, "async_cq_qpcat_err",
516				(u64)dev->async_err_stats
517				[OCRDMA_CQ_QPCAT_ERROR].counter);
518	pcur += ocrdma_add_stat(stats, pcur, "async_qp_access_err",
519				(u64)dev->async_err_stats
520				[OCRDMA_QP_ACCESS_ERROR].counter);
521	pcur += ocrdma_add_stat(stats, pcur, "async_qp_commm_est_evt",
522				(u64)dev->async_err_stats
523				[OCRDMA_QP_COMM_EST_EVENT].counter);
524	pcur += ocrdma_add_stat(stats, pcur, "async_sq_drained_evt",
525				(u64)dev->async_err_stats
526				[OCRDMA_SQ_DRAINED_EVENT].counter);
527	pcur += ocrdma_add_stat(stats, pcur, "async_dev_fatal_evt",
528				(u64)dev->async_err_stats
529				[OCRDMA_DEVICE_FATAL_EVENT].counter);
530	pcur += ocrdma_add_stat(stats, pcur, "async_srqcat_err",
531				(u64)dev->async_err_stats
532				[OCRDMA_SRQCAT_ERROR].counter);
533	pcur += ocrdma_add_stat(stats, pcur, "async_srq_limit_evt",
534				(u64)dev->async_err_stats
535				[OCRDMA_SRQ_LIMIT_EVENT].counter);
536	pcur += ocrdma_add_stat(stats, pcur, "async_qp_last_wqe_evt",
537				(u64)dev->async_err_stats
538				[OCRDMA_QP_LAST_WQE_EVENT].counter);
539
540	pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_len_err",
541				(u64)dev->cqe_err_stats
542				[OCRDMA_CQE_LOC_LEN_ERR].counter);
543	pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_qp_op_err",
544				(u64)dev->cqe_err_stats
545				[OCRDMA_CQE_LOC_QP_OP_ERR].counter);
546	pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_eec_op_err",
547				(u64)dev->cqe_err_stats
548				[OCRDMA_CQE_LOC_EEC_OP_ERR].counter);
549	pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_prot_err",
550				(u64)dev->cqe_err_stats
551				[OCRDMA_CQE_LOC_PROT_ERR].counter);
552	pcur += ocrdma_add_stat(stats, pcur, "cqe_wr_flush_err",
553				(u64)dev->cqe_err_stats
554				[OCRDMA_CQE_WR_FLUSH_ERR].counter);
555	pcur += ocrdma_add_stat(stats, pcur, "cqe_mw_bind_err",
556				(u64)dev->cqe_err_stats
557				[OCRDMA_CQE_MW_BIND_ERR].counter);
558	pcur += ocrdma_add_stat(stats, pcur, "cqe_bad_resp_err",
559				(u64)dev->cqe_err_stats
560				[OCRDMA_CQE_BAD_RESP_ERR].counter);
561	pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_access_err",
562				(u64)dev->cqe_err_stats
563				[OCRDMA_CQE_LOC_ACCESS_ERR].counter);
564	pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_req_err",
565				(u64)dev->cqe_err_stats
566				[OCRDMA_CQE_REM_INV_REQ_ERR].counter);
567	pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_access_err",
568				(u64)dev->cqe_err_stats
569				[OCRDMA_CQE_REM_ACCESS_ERR].counter);
570	pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_op_err",
571				(u64)dev->cqe_err_stats
572				[OCRDMA_CQE_REM_OP_ERR].counter);
573	pcur += ocrdma_add_stat(stats, pcur, "cqe_retry_exc_err",
574				(u64)dev->cqe_err_stats
575				[OCRDMA_CQE_RETRY_EXC_ERR].counter);
576	pcur += ocrdma_add_stat(stats, pcur, "cqe_rnr_retry_exc_err",
577				(u64)dev->cqe_err_stats
578				[OCRDMA_CQE_RNR_RETRY_EXC_ERR].counter);
579	pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_rdd_viol_err",
580				(u64)dev->cqe_err_stats
581				[OCRDMA_CQE_LOC_RDD_VIOL_ERR].counter);
582	pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_rd_req_err",
583				(u64)dev->cqe_err_stats
584				[OCRDMA_CQE_REM_INV_RD_REQ_ERR].counter);
585	pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_abort_err",
586				(u64)dev->cqe_err_stats
587				[OCRDMA_CQE_REM_ABORT_ERR].counter);
588	pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eecn_err",
589				(u64)dev->cqe_err_stats
590				[OCRDMA_CQE_INV_EECN_ERR].counter);
591	pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eec_state_err",
592				(u64)dev->cqe_err_stats
593				[OCRDMA_CQE_INV_EEC_STATE_ERR].counter);
594	pcur += ocrdma_add_stat(stats, pcur, "cqe_fatal_err",
595				(u64)dev->cqe_err_stats
596				[OCRDMA_CQE_FATAL_ERR].counter);
597	pcur += ocrdma_add_stat(stats, pcur, "cqe_resp_timeout_err",
598				(u64)dev->cqe_err_stats
599				[OCRDMA_CQE_RESP_TIMEOUT_ERR].counter);
600	pcur += ocrdma_add_stat(stats, pcur, "cqe_general_err",
601				(u64)dev->cqe_err_stats
602				[OCRDMA_CQE_GENERAL_ERR].counter);
603	return stats;
604}
605
606static void ocrdma_update_stats(struct ocrdma_dev *dev)
607{
608	ulong now = jiffies, secs;
609	int status;
610	struct ocrdma_rdma_stats_resp *rdma_stats =
611		      (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
612	struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats;
613
614	secs = jiffies_to_msecs(now - dev->last_stats_time) / 1000U;
615	if (secs) {
616		/* update */
617		status = ocrdma_mbx_rdma_stats(dev, false);
618		if (status)
619			pr_err("%s: stats mbox failed with status = %d\n",
620			       __func__, status);
621		/* Update PD counters from PD resource manager */
622		if (dev->pd_mgr->pd_prealloc_valid) {
623			rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_count;
624			rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_count;
625			/* Threshold stata*/
626			rsrc_stats = &rdma_stats->th_rsrc_stats;
627			rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_thrsh;
628			rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_thrsh;
629		}
630		dev->last_stats_time = jiffies;
631	}
632}
633
634static ssize_t ocrdma_dbgfs_ops_write(struct file *filp,
635					const char __user *buffer,
636					size_t count, loff_t *ppos)
637{
638	char tmp_str[32];
639	long reset;
640	int status;
641	struct ocrdma_stats *pstats = filp->private_data;
642	struct ocrdma_dev *dev = pstats->dev;
643
644	if (*ppos != 0 || count == 0 || count > sizeof(tmp_str))
645		goto err;
646
647	if (copy_from_user(tmp_str, buffer, count))
648		goto err;
649
650	tmp_str[count-1] = '\0';
651	if (kstrtol(tmp_str, 10, &reset))
652		goto err;
653
654	switch (pstats->type) {
655	case OCRDMA_RESET_STATS:
656		if (reset) {
657			status = ocrdma_mbx_rdma_stats(dev, true);
658			if (status) {
659				pr_err("Failed to reset stats = %d\n", status);
660				goto err;
661			}
662		}
663		break;
664	default:
665		goto err;
666	}
667
668	return count;
669err:
670	return -EFAULT;
671}
672
673void ocrdma_pma_counters(struct ocrdma_dev *dev, struct ib_mad *out_mad)
674{
675	struct ib_pma_portcounters *pma_cnt;
676
677	pma_cnt = (void *)(out_mad->data + 40);
678	ocrdma_update_stats(dev);
679
680	pma_cnt->port_xmit_data    = cpu_to_be32(ocrdma_sysfs_xmit_data(dev));
681	pma_cnt->port_rcv_data     = cpu_to_be32(ocrdma_sysfs_rcv_data(dev));
682	pma_cnt->port_xmit_packets = cpu_to_be32(ocrdma_sysfs_xmit_pkts(dev));
683	pma_cnt->port_rcv_packets  = cpu_to_be32(ocrdma_sysfs_rcv_pkts(dev));
684}
685
686static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer,
687					size_t usr_buf_len, loff_t *ppos)
688{
689	struct ocrdma_stats *pstats = filp->private_data;
690	struct ocrdma_dev *dev = pstats->dev;
691	ssize_t status = 0;
692	char *data = NULL;
693
694	/* No partial reads */
695	if (*ppos != 0)
696		return 0;
697
698	mutex_lock(&dev->stats_lock);
699
700	ocrdma_update_stats(dev);
701
702	switch (pstats->type) {
703	case OCRDMA_RSRC_STATS:
704		data = ocrdma_resource_stats(dev);
705		break;
706	case OCRDMA_RXSTATS:
707		data = ocrdma_rx_stats(dev);
708		break;
709	case OCRDMA_WQESTATS:
710		data = ocrdma_wqe_stats(dev);
711		break;
712	case OCRDMA_TXSTATS:
713		data = ocrdma_tx_stats(dev);
714		break;
715	case OCRDMA_DB_ERRSTATS:
716		data = ocrdma_db_errstats(dev);
717		break;
718	case OCRDMA_RXQP_ERRSTATS:
719		data = ocrdma_rxqp_errstats(dev);
720		break;
721	case OCRDMA_TXQP_ERRSTATS:
722		data = ocrdma_txqp_errstats(dev);
723		break;
724	case OCRDMA_TX_DBG_STATS:
725		data = ocrdma_tx_dbg_stats(dev);
726		break;
727	case OCRDMA_RX_DBG_STATS:
728		data = ocrdma_rx_dbg_stats(dev);
729		break;
730	case OCRDMA_DRV_STATS:
731		data = ocrdma_driver_dbg_stats(dev);
732		break;
733
734	default:
735		status = -EFAULT;
736		goto exit;
737	}
738
739	if (usr_buf_len < strlen(data)) {
740		status = -ENOSPC;
741		goto exit;
742	}
743
744	status = simple_read_from_buffer(buffer, usr_buf_len, ppos, data,
745					 strlen(data));
746exit:
747	mutex_unlock(&dev->stats_lock);
748	return status;
749}
750
751static const struct file_operations ocrdma_dbg_ops = {
752	.owner = THIS_MODULE,
753	.open = simple_open,
754	.read = ocrdma_dbgfs_ops_read,
755	.write = ocrdma_dbgfs_ops_write,
756};
757
758void ocrdma_add_port_stats(struct ocrdma_dev *dev)
759{
760	const struct pci_dev *pdev = dev->nic_info.pdev;
761
762	if (!ocrdma_dbgfs_dir)
763		return;
764
765	/* Create post stats base dir */
766	dev->dir = debugfs_create_dir(pci_name(pdev), ocrdma_dbgfs_dir);
767
768	dev->rsrc_stats.type = OCRDMA_RSRC_STATS;
769	dev->rsrc_stats.dev = dev;
770	debugfs_create_file("resource_stats", S_IRUSR, dev->dir,
771			    &dev->rsrc_stats, &ocrdma_dbg_ops);
772
773	dev->rx_stats.type = OCRDMA_RXSTATS;
774	dev->rx_stats.dev = dev;
775	debugfs_create_file("rx_stats", S_IRUSR, dev->dir, &dev->rx_stats,
776			    &ocrdma_dbg_ops);
777
778	dev->wqe_stats.type = OCRDMA_WQESTATS;
779	dev->wqe_stats.dev = dev;
780	debugfs_create_file("wqe_stats", S_IRUSR, dev->dir, &dev->wqe_stats,
781			    &ocrdma_dbg_ops);
782
783	dev->tx_stats.type = OCRDMA_TXSTATS;
784	dev->tx_stats.dev = dev;
785	debugfs_create_file("tx_stats", S_IRUSR, dev->dir, &dev->tx_stats,
786			    &ocrdma_dbg_ops);
787
788	dev->db_err_stats.type = OCRDMA_DB_ERRSTATS;
789	dev->db_err_stats.dev = dev;
790	debugfs_create_file("db_err_stats", S_IRUSR, dev->dir,
791			    &dev->db_err_stats, &ocrdma_dbg_ops);
792
793	dev->tx_qp_err_stats.type = OCRDMA_TXQP_ERRSTATS;
794	dev->tx_qp_err_stats.dev = dev;
795	debugfs_create_file("tx_qp_err_stats", S_IRUSR, dev->dir,
796			    &dev->tx_qp_err_stats, &ocrdma_dbg_ops);
797
798	dev->rx_qp_err_stats.type = OCRDMA_RXQP_ERRSTATS;
799	dev->rx_qp_err_stats.dev = dev;
800	debugfs_create_file("rx_qp_err_stats", S_IRUSR, dev->dir,
801			    &dev->rx_qp_err_stats, &ocrdma_dbg_ops);
802
803	dev->tx_dbg_stats.type = OCRDMA_TX_DBG_STATS;
804	dev->tx_dbg_stats.dev = dev;
805	debugfs_create_file("tx_dbg_stats", S_IRUSR, dev->dir,
806			    &dev->tx_dbg_stats, &ocrdma_dbg_ops);
807
808	dev->rx_dbg_stats.type = OCRDMA_RX_DBG_STATS;
809	dev->rx_dbg_stats.dev = dev;
810	debugfs_create_file("rx_dbg_stats", S_IRUSR, dev->dir,
811			    &dev->rx_dbg_stats, &ocrdma_dbg_ops);
812
813	dev->driver_stats.type = OCRDMA_DRV_STATS;
814	dev->driver_stats.dev = dev;
815	debugfs_create_file("driver_dbg_stats", S_IRUSR, dev->dir,
816			    &dev->driver_stats, &ocrdma_dbg_ops);
817
818	dev->reset_stats.type = OCRDMA_RESET_STATS;
819	dev->reset_stats.dev = dev;
820	debugfs_create_file("reset_stats", 0200, dev->dir, &dev->reset_stats,
821			    &ocrdma_dbg_ops);
822}
823
824void ocrdma_rem_port_stats(struct ocrdma_dev *dev)
825{
826	debugfs_remove_recursive(dev->dir);
827}
828
829void ocrdma_init_debugfs(void)
830{
831	/* Create base dir in debugfs root dir */
832	ocrdma_dbgfs_dir = debugfs_create_dir("ocrdma", NULL);
833}
834
835void ocrdma_rem_debugfs(void)
836{
837	debugfs_remove_recursive(ocrdma_dbgfs_dir);
838}
839