1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 *  linux/drivers/net/ethernet/ibm/ehea/ehea_phyp.c
4 *
5 *  eHEA ethernet device driver for IBM eServer System p
6 *
7 *  (C) Copyright IBM Corp. 2006
8 *
9 *  Authors:
10 *	 Christoph Raisch <raisch@de.ibm.com>
11 *	 Jan-Bernd Themann <themann@de.ibm.com>
12 *	 Thomas Klein <tklein@de.ibm.com>
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include "ehea_phyp.h"
18
19
20static inline u16 get_order_of_qentries(u16 queue_entries)
21{
22	u8 ld = 1;		/*  logarithmus dualis */
23	while (((1U << ld) - 1) < queue_entries)
24		ld++;
25	return ld - 1;
26}
27
28/* Defines for H_CALL H_ALLOC_RESOURCE */
29#define H_ALL_RES_TYPE_QP	 1
30#define H_ALL_RES_TYPE_CQ	 2
31#define H_ALL_RES_TYPE_EQ	 3
32#define H_ALL_RES_TYPE_MR	 5
33#define H_ALL_RES_TYPE_MW	 6
34
35static long ehea_plpar_hcall_norets(unsigned long opcode,
36				    unsigned long arg1,
37				    unsigned long arg2,
38				    unsigned long arg3,
39				    unsigned long arg4,
40				    unsigned long arg5,
41				    unsigned long arg6,
42				    unsigned long arg7)
43{
44	long ret;
45	int i, sleep_msecs;
46
47	for (i = 0; i < 5; i++) {
48		ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
49					 arg5, arg6, arg7);
50
51		if (H_IS_LONG_BUSY(ret)) {
52			sleep_msecs = get_longbusy_msecs(ret);
53			msleep_interruptible(sleep_msecs);
54			continue;
55		}
56
57		if (ret < H_SUCCESS)
58			pr_err("opcode=%lx ret=%lx"
59			       " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
60			       " arg5=%lx arg6=%lx arg7=%lx\n",
61			       opcode, ret,
62			       arg1, arg2, arg3, arg4, arg5, arg6, arg7);
63
64		return ret;
65	}
66
67	return H_BUSY;
68}
69
70static long ehea_plpar_hcall9(unsigned long opcode,
71			      unsigned long *outs, /* array of 9 outputs */
72			      unsigned long arg1,
73			      unsigned long arg2,
74			      unsigned long arg3,
75			      unsigned long arg4,
76			      unsigned long arg5,
77			      unsigned long arg6,
78			      unsigned long arg7,
79			      unsigned long arg8,
80			      unsigned long arg9)
81{
82	long ret;
83	int i, sleep_msecs;
84	u8 cb_cat;
85
86	for (i = 0; i < 5; i++) {
87		ret = plpar_hcall9(opcode, outs,
88				   arg1, arg2, arg3, arg4, arg5,
89				   arg6, arg7, arg8, arg9);
90
91		if (H_IS_LONG_BUSY(ret)) {
92			sleep_msecs = get_longbusy_msecs(ret);
93			msleep_interruptible(sleep_msecs);
94			continue;
95		}
96
97		cb_cat = EHEA_BMASK_GET(H_MEHEAPORT_CAT, arg2);
98
99		if ((ret < H_SUCCESS) && !(((ret == H_AUTHORITY)
100		    && (opcode == H_MODIFY_HEA_PORT))
101		    && (((cb_cat == H_PORT_CB4) && ((arg3 == H_PORT_CB4_JUMBO)
102		    || (arg3 == H_PORT_CB4_SPEED))) || ((cb_cat == H_PORT_CB7)
103		    && (arg3 == H_PORT_CB7_DUCQPN)))))
104			pr_err("opcode=%lx ret=%lx"
105			       " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
106			       " arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
107			       " arg9=%lx"
108			       " out1=%lx out2=%lx out3=%lx out4=%lx"
109			       " out5=%lx out6=%lx out7=%lx out8=%lx"
110			       " out9=%lx\n",
111			       opcode, ret,
112			       arg1, arg2, arg3, arg4, arg5,
113			       arg6, arg7, arg8, arg9,
114			       outs[0], outs[1], outs[2], outs[3], outs[4],
115			       outs[5], outs[6], outs[7], outs[8]);
116		return ret;
117	}
118
119	return H_BUSY;
120}
121
122u64 ehea_h_query_ehea_qp(const u64 adapter_handle, const u8 qp_category,
123			 const u64 qp_handle, const u64 sel_mask, void *cb_addr)
124{
125	return ehea_plpar_hcall_norets(H_QUERY_HEA_QP,
126				       adapter_handle,		/* R4 */
127				       qp_category,		/* R5 */
128				       qp_handle,		/* R6 */
129				       sel_mask,		/* R7 */
130				       __pa(cb_addr),		/* R8 */
131				       0, 0);
132}
133
134/* input param R5 */
135#define H_ALL_RES_QP_EQPO	  EHEA_BMASK_IBM(9, 11)
136#define H_ALL_RES_QP_QPP	  EHEA_BMASK_IBM(12, 12)
137#define H_ALL_RES_QP_RQR	  EHEA_BMASK_IBM(13, 15)
138#define H_ALL_RES_QP_EQEG	  EHEA_BMASK_IBM(16, 16)
139#define H_ALL_RES_QP_LL_QP	  EHEA_BMASK_IBM(17, 17)
140#define H_ALL_RES_QP_DMA128	  EHEA_BMASK_IBM(19, 19)
141#define H_ALL_RES_QP_HSM	  EHEA_BMASK_IBM(20, 21)
142#define H_ALL_RES_QP_SIGT	  EHEA_BMASK_IBM(22, 23)
143#define H_ALL_RES_QP_TENURE	  EHEA_BMASK_IBM(48, 55)
144#define H_ALL_RES_QP_RES_TYP	  EHEA_BMASK_IBM(56, 63)
145
146/* input param R9  */
147#define H_ALL_RES_QP_TOKEN	  EHEA_BMASK_IBM(0, 31)
148#define H_ALL_RES_QP_PD		  EHEA_BMASK_IBM(32, 63)
149
150/* input param R10 */
151#define H_ALL_RES_QP_MAX_SWQE	  EHEA_BMASK_IBM(4, 7)
152#define H_ALL_RES_QP_MAX_R1WQE	  EHEA_BMASK_IBM(12, 15)
153#define H_ALL_RES_QP_MAX_R2WQE	  EHEA_BMASK_IBM(20, 23)
154#define H_ALL_RES_QP_MAX_R3WQE	  EHEA_BMASK_IBM(28, 31)
155/* Max Send Scatter Gather Elements */
156#define H_ALL_RES_QP_MAX_SSGE	  EHEA_BMASK_IBM(37, 39)
157#define H_ALL_RES_QP_MAX_R1SGE	  EHEA_BMASK_IBM(45, 47)
158/* Max Receive SG Elements RQ1 */
159#define H_ALL_RES_QP_MAX_R2SGE	  EHEA_BMASK_IBM(53, 55)
160#define H_ALL_RES_QP_MAX_R3SGE	  EHEA_BMASK_IBM(61, 63)
161
162/* input param R11 */
163#define H_ALL_RES_QP_SWQE_IDL	  EHEA_BMASK_IBM(0, 7)
164/* max swqe immediate data length */
165#define H_ALL_RES_QP_PORT_NUM	  EHEA_BMASK_IBM(48, 63)
166
167/* input param R12 */
168#define H_ALL_RES_QP_TH_RQ2	  EHEA_BMASK_IBM(0, 15)
169/* Threshold RQ2 */
170#define H_ALL_RES_QP_TH_RQ3	  EHEA_BMASK_IBM(16, 31)
171/* Threshold RQ3 */
172
173/* output param R6 */
174#define H_ALL_RES_QP_ACT_SWQE	  EHEA_BMASK_IBM(0, 15)
175#define H_ALL_RES_QP_ACT_R1WQE	  EHEA_BMASK_IBM(16, 31)
176#define H_ALL_RES_QP_ACT_R2WQE	  EHEA_BMASK_IBM(32, 47)
177#define H_ALL_RES_QP_ACT_R3WQE	  EHEA_BMASK_IBM(48, 63)
178
179/* output param, R7 */
180#define H_ALL_RES_QP_ACT_SSGE	  EHEA_BMASK_IBM(0, 7)
181#define H_ALL_RES_QP_ACT_R1SGE	  EHEA_BMASK_IBM(8, 15)
182#define H_ALL_RES_QP_ACT_R2SGE	  EHEA_BMASK_IBM(16, 23)
183#define H_ALL_RES_QP_ACT_R3SGE	  EHEA_BMASK_IBM(24, 31)
184#define H_ALL_RES_QP_ACT_SWQE_IDL EHEA_BMASK_IBM(32, 39)
185
186/* output param R8,R9 */
187#define H_ALL_RES_QP_SIZE_SQ	  EHEA_BMASK_IBM(0, 31)
188#define H_ALL_RES_QP_SIZE_RQ1	  EHEA_BMASK_IBM(32, 63)
189#define H_ALL_RES_QP_SIZE_RQ2	  EHEA_BMASK_IBM(0, 31)
190#define H_ALL_RES_QP_SIZE_RQ3	  EHEA_BMASK_IBM(32, 63)
191
192/* output param R11,R12 */
193#define H_ALL_RES_QP_LIOBN_SQ	  EHEA_BMASK_IBM(0, 31)
194#define H_ALL_RES_QP_LIOBN_RQ1	  EHEA_BMASK_IBM(32, 63)
195#define H_ALL_RES_QP_LIOBN_RQ2	  EHEA_BMASK_IBM(0, 31)
196#define H_ALL_RES_QP_LIOBN_RQ3	  EHEA_BMASK_IBM(32, 63)
197
198u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
199			     struct ehea_qp_init_attr *init_attr, const u32 pd,
200			     u64 *qp_handle, struct h_epas *h_epas)
201{
202	u64 hret;
203	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
204
205	u64 allocate_controls =
206	    EHEA_BMASK_SET(H_ALL_RES_QP_EQPO, init_attr->low_lat_rq1 ? 1 : 0)
207	    | EHEA_BMASK_SET(H_ALL_RES_QP_QPP, 0)
208	    | EHEA_BMASK_SET(H_ALL_RES_QP_RQR, 6)	/* rq1 & rq2 & rq3 */
209	    | EHEA_BMASK_SET(H_ALL_RES_QP_EQEG, 0)	/* EQE gen. disabled */
210	    | EHEA_BMASK_SET(H_ALL_RES_QP_LL_QP, init_attr->low_lat_rq1)
211	    | EHEA_BMASK_SET(H_ALL_RES_QP_DMA128, 0)
212	    | EHEA_BMASK_SET(H_ALL_RES_QP_HSM, 0)
213	    | EHEA_BMASK_SET(H_ALL_RES_QP_SIGT, init_attr->signalingtype)
214	    | EHEA_BMASK_SET(H_ALL_RES_QP_RES_TYP, H_ALL_RES_TYPE_QP);
215
216	u64 r9_reg = EHEA_BMASK_SET(H_ALL_RES_QP_PD, pd)
217	    | EHEA_BMASK_SET(H_ALL_RES_QP_TOKEN, init_attr->qp_token);
218
219	u64 max_r10_reg =
220	    EHEA_BMASK_SET(H_ALL_RES_QP_MAX_SWQE,
221			   get_order_of_qentries(init_attr->max_nr_send_wqes))
222	    | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R1WQE,
223			     get_order_of_qentries(init_attr->max_nr_rwqes_rq1))
224	    | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R2WQE,
225			     get_order_of_qentries(init_attr->max_nr_rwqes_rq2))
226	    | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R3WQE,
227			     get_order_of_qentries(init_attr->max_nr_rwqes_rq3))
228	    | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_SSGE, init_attr->wqe_size_enc_sq)
229	    | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R1SGE,
230			     init_attr->wqe_size_enc_rq1)
231	    | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R2SGE,
232			     init_attr->wqe_size_enc_rq2)
233	    | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R3SGE,
234			     init_attr->wqe_size_enc_rq3);
235
236	u64 r11_in =
237	    EHEA_BMASK_SET(H_ALL_RES_QP_SWQE_IDL, init_attr->swqe_imm_data_len)
238	    | EHEA_BMASK_SET(H_ALL_RES_QP_PORT_NUM, init_attr->port_nr);
239	u64 threshold =
240	    EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ2, init_attr->rq2_threshold)
241	    | EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ3, init_attr->rq3_threshold);
242
243	hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
244				 outs,
245				 adapter_handle,		/* R4 */
246				 allocate_controls,		/* R5 */
247				 init_attr->send_cq_handle,	/* R6 */
248				 init_attr->recv_cq_handle,	/* R7 */
249				 init_attr->aff_eq_handle,	/* R8 */
250				 r9_reg,			/* R9 */
251				 max_r10_reg,			/* R10 */
252				 r11_in,			/* R11 */
253				 threshold);			/* R12 */
254
255	*qp_handle = outs[0];
256	init_attr->qp_nr = (u32)outs[1];
257
258	init_attr->act_nr_send_wqes =
259	    (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_SWQE, outs[2]);
260	init_attr->act_nr_rwqes_rq1 =
261	    (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R1WQE, outs[2]);
262	init_attr->act_nr_rwqes_rq2 =
263	    (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R2WQE, outs[2]);
264	init_attr->act_nr_rwqes_rq3 =
265	    (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R3WQE, outs[2]);
266
267	init_attr->act_wqe_size_enc_sq = init_attr->wqe_size_enc_sq;
268	init_attr->act_wqe_size_enc_rq1 = init_attr->wqe_size_enc_rq1;
269	init_attr->act_wqe_size_enc_rq2 = init_attr->wqe_size_enc_rq2;
270	init_attr->act_wqe_size_enc_rq3 = init_attr->wqe_size_enc_rq3;
271
272	init_attr->nr_sq_pages =
273	    (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_SQ, outs[4]);
274	init_attr->nr_rq1_pages =
275	    (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ1, outs[4]);
276	init_attr->nr_rq2_pages =
277	    (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ2, outs[5]);
278	init_attr->nr_rq3_pages =
279	    (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ3, outs[5]);
280
281	init_attr->liobn_sq =
282	    (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_SQ, outs[7]);
283	init_attr->liobn_rq1 =
284	    (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ1, outs[7]);
285	init_attr->liobn_rq2 =
286	    (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ2, outs[8]);
287	init_attr->liobn_rq3 =
288	    (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ3, outs[8]);
289
290	if (!hret)
291		hcp_epas_ctor(h_epas, outs[6], outs[6]);
292
293	return hret;
294}
295
296u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
297			     struct ehea_cq_attr *cq_attr,
298			     u64 *cq_handle, struct h_epas *epas)
299{
300	u64 hret;
301	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
302
303	hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
304				 outs,
305				 adapter_handle,		/* R4 */
306				 H_ALL_RES_TYPE_CQ,		/* R5 */
307				 cq_attr->eq_handle,		/* R6 */
308				 cq_attr->cq_token,		/* R7 */
309				 cq_attr->max_nr_of_cqes,	/* R8 */
310				 0, 0, 0, 0);			/* R9-R12 */
311
312	*cq_handle = outs[0];
313	cq_attr->act_nr_of_cqes = outs[3];
314	cq_attr->nr_pages = outs[4];
315
316	if (!hret)
317		hcp_epas_ctor(epas, outs[5], outs[6]);
318
319	return hret;
320}
321
322/* Defines for H_CALL H_ALLOC_RESOURCE */
323#define H_ALL_RES_TYPE_QP	 1
324#define H_ALL_RES_TYPE_CQ	 2
325#define H_ALL_RES_TYPE_EQ	 3
326#define H_ALL_RES_TYPE_MR	 5
327#define H_ALL_RES_TYPE_MW	 6
328
329/*  input param R5 */
330#define H_ALL_RES_EQ_NEQ	     EHEA_BMASK_IBM(0, 0)
331#define H_ALL_RES_EQ_NON_NEQ_ISN     EHEA_BMASK_IBM(6, 7)
332#define H_ALL_RES_EQ_INH_EQE_GEN     EHEA_BMASK_IBM(16, 16)
333#define H_ALL_RES_EQ_RES_TYPE	     EHEA_BMASK_IBM(56, 63)
334/*  input param R6 */
335#define H_ALL_RES_EQ_MAX_EQE	     EHEA_BMASK_IBM(32, 63)
336
337/*  output param R6 */
338#define H_ALL_RES_EQ_LIOBN	     EHEA_BMASK_IBM(32, 63)
339
340/*  output param R7 */
341#define H_ALL_RES_EQ_ACT_EQE	     EHEA_BMASK_IBM(32, 63)
342
343/*  output param R8 */
344#define H_ALL_RES_EQ_ACT_PS	     EHEA_BMASK_IBM(32, 63)
345
346/*  output param R9 */
347#define H_ALL_RES_EQ_ACT_EQ_IST_C    EHEA_BMASK_IBM(30, 31)
348#define H_ALL_RES_EQ_ACT_EQ_IST_1    EHEA_BMASK_IBM(40, 63)
349
350/*  output param R10 */
351#define H_ALL_RES_EQ_ACT_EQ_IST_2    EHEA_BMASK_IBM(40, 63)
352
353/*  output param R11 */
354#define H_ALL_RES_EQ_ACT_EQ_IST_3    EHEA_BMASK_IBM(40, 63)
355
356/*  output param R12 */
357#define H_ALL_RES_EQ_ACT_EQ_IST_4    EHEA_BMASK_IBM(40, 63)
358
359u64 ehea_h_alloc_resource_eq(const u64 adapter_handle,
360			     struct ehea_eq_attr *eq_attr, u64 *eq_handle)
361{
362	u64 hret, allocate_controls;
363	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
364
365	/* resource type */
366	allocate_controls =
367	    EHEA_BMASK_SET(H_ALL_RES_EQ_RES_TYPE, H_ALL_RES_TYPE_EQ)
368	    | EHEA_BMASK_SET(H_ALL_RES_EQ_NEQ, eq_attr->type ? 1 : 0)
369	    | EHEA_BMASK_SET(H_ALL_RES_EQ_INH_EQE_GEN, !eq_attr->eqe_gen)
370	    | EHEA_BMASK_SET(H_ALL_RES_EQ_NON_NEQ_ISN, 1);
371
372	hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
373				 outs,
374				 adapter_handle,		/* R4 */
375				 allocate_controls,		/* R5 */
376				 eq_attr->max_nr_of_eqes,	/* R6 */
377				 0, 0, 0, 0, 0, 0);		/* R7-R10 */
378
379	*eq_handle = outs[0];
380	eq_attr->act_nr_of_eqes = outs[3];
381	eq_attr->nr_pages = outs[4];
382	eq_attr->ist1 = outs[5];
383	eq_attr->ist2 = outs[6];
384	eq_attr->ist3 = outs[7];
385	eq_attr->ist4 = outs[8];
386
387	return hret;
388}
389
390u64 ehea_h_modify_ehea_qp(const u64 adapter_handle, const u8 cat,
391			  const u64 qp_handle, const u64 sel_mask,
392			  void *cb_addr, u64 *inv_attr_id, u64 *proc_mask,
393			  u16 *out_swr, u16 *out_rwr)
394{
395	u64 hret;
396	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
397
398	hret = ehea_plpar_hcall9(H_MODIFY_HEA_QP,
399				 outs,
400				 adapter_handle,		/* R4 */
401				 (u64) cat,			/* R5 */
402				 qp_handle,			/* R6 */
403				 sel_mask,			/* R7 */
404				 __pa(cb_addr),			/* R8 */
405				 0, 0, 0, 0);			/* R9-R12 */
406
407	*inv_attr_id = outs[0];
408	*out_swr = outs[3];
409	*out_rwr = outs[4];
410	*proc_mask = outs[5];
411
412	return hret;
413}
414
415u64 ehea_h_register_rpage(const u64 adapter_handle, const u8 pagesize,
416			  const u8 queue_type, const u64 resource_handle,
417			  const u64 log_pageaddr, u64 count)
418{
419	u64  reg_control;
420
421	reg_control = EHEA_BMASK_SET(H_REG_RPAGE_PAGE_SIZE, pagesize)
422		    | EHEA_BMASK_SET(H_REG_RPAGE_QT, queue_type);
423
424	return ehea_plpar_hcall_norets(H_REGISTER_HEA_RPAGES,
425				       adapter_handle,		/* R4 */
426				       reg_control,		/* R5 */
427				       resource_handle,		/* R6 */
428				       log_pageaddr,		/* R7 */
429				       count,			/* R8 */
430				       0, 0);			/* R9-R10 */
431}
432
433u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle,
434			const u64 vaddr_in, const u32 access_ctrl, const u32 pd,
435			struct ehea_mr *mr)
436{
437	u64 hret;
438	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
439
440	hret = ehea_plpar_hcall9(H_REGISTER_SMR,
441				 outs,
442				 adapter_handle	      ,		 /* R4 */
443				 orig_mr_handle,		 /* R5 */
444				 vaddr_in,			 /* R6 */
445				 (((u64)access_ctrl) << 32ULL),	 /* R7 */
446				 pd,				 /* R8 */
447				 0, 0, 0, 0);			 /* R9-R12 */
448
449	mr->handle = outs[0];
450	mr->lkey = (u32)outs[2];
451
452	return hret;
453}
454
455u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle)
456{
457	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
458
459	return ehea_plpar_hcall9(H_DISABLE_AND_GET_HEA,
460				 outs,
461				 adapter_handle,		/* R4 */
462				 H_DISABLE_GET_EHEA_WQE_P,	/* R5 */
463				 qp_handle,			/* R6 */
464				 0, 0, 0, 0, 0, 0);		/* R7-R12 */
465}
466
467u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle,
468			 u64 force_bit)
469{
470	return ehea_plpar_hcall_norets(H_FREE_RESOURCE,
471				       adapter_handle,	   /* R4 */
472				       res_handle,	   /* R5 */
473				       force_bit,
474				       0, 0, 0, 0);	   /* R7-R10 */
475}
476
477u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
478			     const u64 length, const u32 access_ctrl,
479			     const u32 pd, u64 *mr_handle, u32 *lkey)
480{
481	u64 hret;
482	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
483
484	hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
485				 outs,
486				 adapter_handle,		   /* R4 */
487				 5,				   /* R5 */
488				 vaddr,				   /* R6 */
489				 length,			   /* R7 */
490				 (((u64) access_ctrl) << 32ULL),   /* R8 */
491				 pd,				   /* R9 */
492				 0, 0, 0);			   /* R10-R12 */
493
494	*mr_handle = outs[0];
495	*lkey = (u32)outs[2];
496	return hret;
497}
498
499u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
500			     const u8 pagesize, const u8 queue_type,
501			     const u64 log_pageaddr, const u64 count)
502{
503	if ((count > 1) && (log_pageaddr & ~PAGE_MASK)) {
504		pr_err("not on pageboundary\n");
505		return H_PARAMETER;
506	}
507
508	return ehea_h_register_rpage(adapter_handle, pagesize,
509				     queue_type, mr_handle,
510				     log_pageaddr, count);
511}
512
513u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr)
514{
515	u64 hret, cb_logaddr;
516
517	cb_logaddr = __pa(cb_addr);
518
519	hret = ehea_plpar_hcall_norets(H_QUERY_HEA,
520				       adapter_handle,		/* R4 */
521				       cb_logaddr,		/* R5 */
522				       0, 0, 0, 0, 0);		/* R6-R10 */
523#ifdef DEBUG
524	ehea_dump(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea");
525#endif
526	return hret;
527}
528
529u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num,
530			   const u8 cb_cat, const u64 select_mask,
531			   void *cb_addr)
532{
533	u64 port_info;
534	u64 cb_logaddr = __pa(cb_addr);
535	u64 arr_index = 0;
536
537	port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
538		  | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num);
539
540	return ehea_plpar_hcall_norets(H_QUERY_HEA_PORT,
541				       adapter_handle,		/* R4 */
542				       port_info,		/* R5 */
543				       select_mask,		/* R6 */
544				       arr_index,		/* R7 */
545				       cb_logaddr,		/* R8 */
546				       0, 0);			/* R9-R10 */
547}
548
549u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
550			    const u8 cb_cat, const u64 select_mask,
551			    void *cb_addr)
552{
553	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
554	u64 port_info;
555	u64 arr_index = 0;
556	u64 cb_logaddr = __pa(cb_addr);
557
558	port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
559		  | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num);
560#ifdef DEBUG
561	ehea_dump(cb_addr, sizeof(struct hcp_ehea_port_cb0), "Before HCALL");
562#endif
563	return ehea_plpar_hcall9(H_MODIFY_HEA_PORT,
564				 outs,
565				 adapter_handle,		/* R4 */
566				 port_info,			/* R5 */
567				 select_mask,			/* R6 */
568				 arr_index,			/* R7 */
569				 cb_logaddr,			/* R8 */
570				 0, 0, 0, 0);			/* R9-R12 */
571}
572
573u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num,
574			  const u8 reg_type, const u64 mc_mac_addr,
575			  const u16 vlan_id, const u32 hcall_id)
576{
577	u64 r5_port_num, r6_reg_type, r7_mc_mac_addr, r8_vlan_id;
578	u64 mac_addr = mc_mac_addr >> 16;
579
580	r5_port_num = EHEA_BMASK_SET(H_REGBCMC_PN, port_num);
581	r6_reg_type = EHEA_BMASK_SET(H_REGBCMC_REGTYPE, reg_type);
582	r7_mc_mac_addr = EHEA_BMASK_SET(H_REGBCMC_MACADDR, mac_addr);
583	r8_vlan_id = EHEA_BMASK_SET(H_REGBCMC_VLANID, vlan_id);
584
585	return ehea_plpar_hcall_norets(hcall_id,
586				       adapter_handle,		/* R4 */
587				       r5_port_num,		/* R5 */
588				       r6_reg_type,		/* R6 */
589				       r7_mc_mac_addr,		/* R7 */
590				       r8_vlan_id,		/* R8 */
591				       0, 0);			/* R9-R12 */
592}
593
594u64 ehea_h_reset_events(const u64 adapter_handle, const u64 neq_handle,
595			const u64 event_mask)
596{
597	return ehea_plpar_hcall_norets(H_RESET_EVENTS,
598				       adapter_handle,		/* R4 */
599				       neq_handle,		/* R5 */
600				       event_mask,		/* R6 */
601				       0, 0, 0, 0);		/* R7-R12 */
602}
603
604u64 ehea_h_error_data(const u64 adapter_handle, const u64 ressource_handle,
605		      void *rblock)
606{
607	return ehea_plpar_hcall_norets(H_ERROR_DATA,
608				       adapter_handle,		/* R4 */
609				       ressource_handle,	/* R5 */
610				       __pa(rblock),		/* R6 */
611				       0, 0, 0, 0);		/* R7-R12 */
612}
613