1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. */
3
4#include "reporter_vnic.h"
5#include "en_stats.h"
6#include "devlink.h"
7
8#define VNIC_ENV_GET64(vnic_env_stats, c) \
9	MLX5_GET64(query_vnic_env_out, (vnic_env_stats)->query_vnic_env_out, \
10		 vport_env.c)
11
12struct mlx5_vnic_diag_stats {
13	__be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)];
14};
15
16void mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
17					  struct devlink_fmsg *fmsg,
18					  u16 vport_num, bool other_vport)
19{
20	u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
21	struct mlx5_vnic_diag_stats vnic;
22
23	MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
24	MLX5_SET(query_vnic_env_in, in, vport_number, vport_num);
25	MLX5_SET(query_vnic_env_in, in, other_vport, !!other_vport);
26
27	mlx5_cmd_exec_inout(dev, query_vnic_env, in, &vnic.query_vnic_env_out);
28
29	devlink_fmsg_pair_nest_start(fmsg, "vNIC env counters");
30	devlink_fmsg_obj_nest_start(fmsg);
31
32	if (MLX5_CAP_GEN(dev, vnic_env_queue_counters)) {
33		devlink_fmsg_u32_pair_put(fmsg, "total_error_queues",
34					  VNIC_ENV_GET(&vnic, total_error_queues));
35		devlink_fmsg_u32_pair_put(fmsg, "send_queue_priority_update_flow",
36					  VNIC_ENV_GET(&vnic, send_queue_priority_update_flow));
37	}
38	if (MLX5_CAP_GEN(dev, eq_overrun_count)) {
39		devlink_fmsg_u32_pair_put(fmsg, "comp_eq_overrun",
40					  VNIC_ENV_GET(&vnic, comp_eq_overrun));
41		devlink_fmsg_u32_pair_put(fmsg, "async_eq_overrun",
42					  VNIC_ENV_GET(&vnic, async_eq_overrun));
43	}
44	if (MLX5_CAP_GEN(dev, vnic_env_cq_overrun))
45		devlink_fmsg_u32_pair_put(fmsg, "cq_overrun",
46					  VNIC_ENV_GET(&vnic, cq_overrun));
47	if (MLX5_CAP_GEN(dev, invalid_command_count))
48		devlink_fmsg_u32_pair_put(fmsg, "invalid_command",
49					  VNIC_ENV_GET(&vnic, invalid_command));
50	if (MLX5_CAP_GEN(dev, quota_exceeded_count))
51		devlink_fmsg_u32_pair_put(fmsg, "quota_exceeded_command",
52					  VNIC_ENV_GET(&vnic, quota_exceeded_command));
53	if (MLX5_CAP_GEN(dev, nic_receive_steering_discard))
54		devlink_fmsg_u64_pair_put(fmsg, "nic_receive_steering_discard",
55					  VNIC_ENV_GET64(&vnic, nic_receive_steering_discard));
56	if (MLX5_CAP_GEN(dev, vnic_env_cnt_steering_fail)) {
57		devlink_fmsg_u64_pair_put(fmsg, "generated_pkt_steering_fail",
58					  VNIC_ENV_GET64(&vnic, generated_pkt_steering_fail));
59		devlink_fmsg_u64_pair_put(fmsg, "handled_pkt_steering_fail",
60					  VNIC_ENV_GET64(&vnic, handled_pkt_steering_fail));
61	}
62
63	devlink_fmsg_obj_nest_end(fmsg);
64	devlink_fmsg_pair_nest_end(fmsg);
65}
66
67static int mlx5_reporter_vnic_diagnose(struct devlink_health_reporter *reporter,
68				       struct devlink_fmsg *fmsg,
69				       struct netlink_ext_ack *extack)
70{
71	struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
72
73	mlx5_reporter_vnic_diagnose_counters(dev, fmsg, 0, false);
74	return 0;
75}
76
77static const struct devlink_health_reporter_ops mlx5_reporter_vnic_ops = {
78	.name = "vnic",
79	.diagnose = mlx5_reporter_vnic_diagnose,
80};
81
82void mlx5_reporter_vnic_create(struct mlx5_core_dev *dev)
83{
84	struct mlx5_core_health *health = &dev->priv.health;
85	struct devlink *devlink = priv_to_devlink(dev);
86
87	health->vnic_reporter =
88		devlink_health_reporter_create(devlink,
89					       &mlx5_reporter_vnic_ops,
90					       0, dev);
91	if (IS_ERR(health->vnic_reporter))
92		mlx5_core_warn(dev,
93			       "Failed to create vnic reporter, err = %ld\n",
94			       PTR_ERR(health->vnic_reporter));
95}
96
97void mlx5_reporter_vnic_destroy(struct mlx5_core_dev *dev)
98{
99	struct mlx5_core_health *health = &dev->priv.health;
100
101	if (!IS_ERR_OR_NULL(health->vnic_reporter))
102		devlink_health_reporter_destroy(health->vnic_reporter);
103}
104