1// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
2// Copyright (c) 2019 Hisilicon Limited.
3
4#include <rdma/rdma_cm.h>
5#include <rdma/restrack.h>
6#include <uapi/rdma/rdma_netlink.h>
7#include "hnae3.h"
8#include "hns_roce_common.h"
9#include "hns_roce_device.h"
10#include "hns_roce_hw_v2.h"
11
12int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq)
13{
14	struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
15	struct nlattr *table_attr;
16
17	table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
18	if (!table_attr)
19		return -EMSGSIZE;
20
21	if (rdma_nl_put_driver_u32(msg, "cq_depth", hr_cq->cq_depth))
22		goto err;
23
24	if (rdma_nl_put_driver_u32(msg, "cons_index", hr_cq->cons_index))
25		goto err;
26
27	if (rdma_nl_put_driver_u32(msg, "cqe_size", hr_cq->cqe_size))
28		goto err;
29
30	if (rdma_nl_put_driver_u32(msg, "arm_sn", hr_cq->arm_sn))
31		goto err;
32
33	nla_nest_end(msg, table_attr);
34
35	return 0;
36
37err:
38	nla_nest_cancel(msg, table_attr);
39
40	return -EMSGSIZE;
41}
42
43int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq)
44{
45	struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
46	struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
47	struct hns_roce_v2_cq_context context;
48	int ret;
49
50	if (!hr_dev->hw->query_cqc)
51		return -EINVAL;
52
53	ret = hr_dev->hw->query_cqc(hr_dev, hr_cq->cqn, &context);
54	if (ret)
55		return -EINVAL;
56
57	ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
58
59	return ret;
60}
61
62int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp)
63{
64	struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
65	struct nlattr *table_attr;
66
67	table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
68	if (!table_attr)
69		return -EMSGSIZE;
70
71	if (rdma_nl_put_driver_u32_hex(msg, "sq_wqe_cnt", hr_qp->sq.wqe_cnt))
72		goto err;
73
74	if (rdma_nl_put_driver_u32_hex(msg, "sq_max_gs", hr_qp->sq.max_gs))
75		goto err;
76
77	if (rdma_nl_put_driver_u32_hex(msg, "rq_wqe_cnt", hr_qp->rq.wqe_cnt))
78		goto err;
79
80	if (rdma_nl_put_driver_u32_hex(msg, "rq_max_gs", hr_qp->rq.max_gs))
81		goto err;
82
83	if (rdma_nl_put_driver_u32_hex(msg, "ext_sge_sge_cnt", hr_qp->sge.sge_cnt))
84		goto err;
85
86	nla_nest_end(msg, table_attr);
87
88	return 0;
89
90err:
91	nla_nest_cancel(msg, table_attr);
92
93	return -EMSGSIZE;
94}
95
96int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
97{
98	struct hns_roce_dev *hr_dev = to_hr_dev(ib_qp->device);
99	struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
100	struct hns_roce_full_qp_ctx {
101		struct hns_roce_v2_qp_context qpc;
102		struct hns_roce_v2_scc_context sccc;
103	} context = {};
104	int ret;
105
106	if (!hr_dev->hw->query_qpc)
107		return -EINVAL;
108
109	ret = hr_dev->hw->query_qpc(hr_dev, hr_qp->qpn, &context.qpc);
110	if (ret)
111		return ret;
112
113	/* If SCC is disabled or the query fails, the queried SCCC will
114	 * be all 0.
115	 */
116	if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) ||
117	    !hr_dev->hw->query_sccc)
118		goto out;
119
120	ret = hr_dev->hw->query_sccc(hr_dev, hr_qp->qpn, &context.sccc);
121	if (ret)
122		ibdev_warn_ratelimited(&hr_dev->ib_dev,
123				       "failed to query SCCC, ret = %d.\n",
124				       ret);
125
126out:
127	ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
128
129	return ret;
130}
131
132int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr)
133{
134	struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr);
135	struct nlattr *table_attr;
136
137	table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
138	if (!table_attr)
139		return -EMSGSIZE;
140
141	if (rdma_nl_put_driver_u32_hex(msg, "pbl_hop_num", hr_mr->pbl_hop_num))
142		goto err;
143
144	if (rdma_nl_put_driver_u32_hex(msg, "ba_pg_shift",
145				       hr_mr->pbl_mtr.hem_cfg.ba_pg_shift))
146		goto err;
147
148	if (rdma_nl_put_driver_u32_hex(msg, "buf_pg_shift",
149				       hr_mr->pbl_mtr.hem_cfg.buf_pg_shift))
150		goto err;
151
152	nla_nest_end(msg, table_attr);
153
154	return 0;
155
156err:
157	nla_nest_cancel(msg, table_attr);
158
159	return -EMSGSIZE;
160}
161
162int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr)
163{
164	struct hns_roce_dev *hr_dev = to_hr_dev(ib_mr->device);
165	struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr);
166	struct hns_roce_v2_mpt_entry context;
167	int ret;
168
169	if (!hr_dev->hw->query_mpt)
170		return -EINVAL;
171
172	ret = hr_dev->hw->query_mpt(hr_dev, hr_mr->key, &context);
173	if (ret)
174		return -EINVAL;
175
176	ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
177
178	return ret;
179}
180
181int hns_roce_fill_res_srq_entry(struct sk_buff *msg, struct ib_srq *ib_srq)
182{
183	struct hns_roce_srq *hr_srq = to_hr_srq(ib_srq);
184	struct nlattr *table_attr;
185
186	table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
187	if (!table_attr)
188		return -EMSGSIZE;
189
190	if (rdma_nl_put_driver_u32_hex(msg, "srqn", hr_srq->srqn))
191		goto err;
192
193	if (rdma_nl_put_driver_u32_hex(msg, "wqe_cnt", hr_srq->wqe_cnt))
194		goto err;
195
196	if (rdma_nl_put_driver_u32_hex(msg, "max_gs", hr_srq->max_gs))
197		goto err;
198
199	if (rdma_nl_put_driver_u32_hex(msg, "xrcdn", hr_srq->xrcdn))
200		goto err;
201
202	nla_nest_end(msg, table_attr);
203
204	return 0;
205
206err:
207	nla_nest_cancel(msg, table_attr);
208	return -EMSGSIZE;
209}
210
211int hns_roce_fill_res_srq_entry_raw(struct sk_buff *msg, struct ib_srq *ib_srq)
212{
213	struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
214	struct hns_roce_srq *hr_srq = to_hr_srq(ib_srq);
215	struct hns_roce_srq_context context;
216	int ret;
217
218	if (!hr_dev->hw->query_srqc)
219		return -EINVAL;
220
221	ret = hr_dev->hw->query_srqc(hr_dev, hr_srq->srqn, &context);
222	if (ret)
223		return ret;
224
225	ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
226
227	return ret;
228}
229