1/*
2
3 * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses.  You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 *     Redistribution and use in source and binary forms, with or
13 *     without modification, are permitted provided that the following
14 *     conditions are met:
15 *
16 *	- Redistributions of source code must retain the above
17 *	  copyright notice, this list of conditions and the following
18 *	  disclaimer.
19 *
20 *	- Redistributions in binary form must reproduce the above
21 *	  copyright notice, this list of conditions and the following
22 *	  disclaimer in the documentation and/or other materials
23 *	  provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33
34
35 #include <linux/interrupt.h>
36 #include <linux/slab.h>
37 #include <linux/module.h>
38 #include <linux/mm.h>
39 #include <linux/dma-mapping.h>
40
41 #include <linux/mlx4/cmd.h>
42 */
43#include <linux/mm.h>
44#include <linux/log2.h>
45#include <linux/kernel.h>
46#include <linux/err.h>
47#include <linux/gfp.h>
48#include <linux/io.h>
49
50#include <linux/mlx4/cmd.h>
51
52#include <asm/byteorder.h>
53
54#include "icm.h"
55#include <debug.h>
56/*
57 #include "fw.h"
58 */
59enum {
60	MLX4_IRQNAME_SIZE = 32
61};
62
63enum {
64	MLX4_NUM_ASYNC_EQE = 0x100,
65	MLX4_NUM_SPARE_EQE = 0x80,
66	MLX4_EQ_ENTRY_SIZE = 0x20
67};
68
69#define MLX4_EQ_STATUS_OK	   ( 0 << 28)
70#define MLX4_EQ_STATUS_WRITE_FAIL  (10 << 28)
71#define MLX4_EQ_OWNER_SW	   ( 0 << 24)
72#define MLX4_EQ_OWNER_HW	   ( 1 << 24)
73#define MLX4_EQ_FLAG_EC		   ( 1 << 18)
74#define MLX4_EQ_FLAG_OI		   ( 1 << 17)
75#define MLX4_EQ_STATE_ARMED	   ( 9 <<  8)
76#define MLX4_EQ_STATE_FIRED	   (10 <<  8)
77#define MLX4_EQ_STATE_ALWAYS_ARMED (11 <<  8)
78
79#define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG)	    | \
80			       (1ull << MLX4_EVENT_TYPE_COMM_EST)	    | \
81			       (1ull << MLX4_EVENT_TYPE_SQ_DRAINED)	    | \
82			       (1ull << MLX4_EVENT_TYPE_CQ_ERROR)	    | \
83			       (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR)	    | \
84			       (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR)    | \
85			       (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED)    | \
86			       (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
87			       (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR)    | \
88			       (1ull << MLX4_EVENT_TYPE_PORT_CHANGE)	    | \
89			       (1ull << MLX4_EVENT_TYPE_ECC_DETECT)	    | \
90			       (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
91			       (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE)    | \
92			       (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT)	    | \
93			       (1ull << MLX4_EVENT_TYPE_CMD)		    | \
94			       (1ull << MLX4_EVENT_TYPE_OP_REQUIRED)	    | \
95			       (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL)       | \
96			       (1ull << MLX4_EVENT_TYPE_FLR_EVENT)	    | \
97			       (1ull << MLX4_EVENT_TYPE_FATAL_WARNING))
98
99static u64 get_async_ev_mask(struct mlx4_priv *priv) {
100	u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK;
101	if (priv->dev.caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
102		async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT);
103	if (priv->dev.caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
104		async_ev_mask |= (1ull << MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT);
105
106	return async_ev_mask;
107}
108static void eq_set_ci(struct mlx4_eq *eq, int req_not) {
109	__raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
110					req_not << 31), eq->doorbell);
111	/*We still
112	 want ordering, just
113	 not swabbing, so
114	 add a
115	 barrier*/
116	mb();
117}
118/*
119 static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor)
120 {
121 (entry & (eq->nent - 1)) gives us a cyclic array
122 unsigned long offset = (entry & (eq->nent - 1)) * (MLX4_EQ_ENTRY_SIZE << eqe_factor);
123 CX3 is capable of extending the EQE from 32 to 64 bytes.
124 * When this feature is enabled, the first (in the lower addresses)
125 * 32 bytes in the 64 byte EQE are reserved and the next 32 bytes
126 * contain the legacy EQE information.
127
128 return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE;
129 }
130
131 static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor)
132 {
133 struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor);
134 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
135 }
136
137 static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq)
138 {
139 struct mlx4_eqe *eqe =
140 &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)];
141 return (!!(eqe->owner & 0x80) ^
142 !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ?
143 eqe : NULL;
144 }
145
146 void mlx4_gen_slave_eqe(struct work_struct *work)
147 {
148 struct mlx4_mfunc_master_ctx *master =
149 container_of(work, struct mlx4_mfunc_master_ctx,
150 slave_event_work);
151 struct mlx4_mfunc *mfunc =
152 container_of(master, struct mlx4_mfunc, master);
153 struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc);
154 struct mlx4_dev *dev = &priv->dev;
155 struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq;
156 struct mlx4_eqe *eqe;
157 u8 slave;
158 int i;
159
160 for (eqe = next_slave_event_eqe(slave_eq); eqe;
161 eqe = next_slave_event_eqe(slave_eq)) {
162 slave = eqe->slave_id;
163
164 All active slaves need to receive the event
165 if (slave == ALL_SLAVES) {
166 for (i = 0; i < priv->dev.num_slaves; i++) {
167 if (mlx4_GEN_EQE(&priv->dev, i, eqe))
168 MLX4_DEBUG( "Failed to generate "
169 "event for slave %d\n", i);
170 }
171 } else {
172 if (mlx4_GEN_EQE(&priv->dev, slave, eqe))
173 MLX4_DEBUG( "Failed to generate event "
174 "for slave %d\n", slave);
175 }
176 ++slave_eq->cons;
177 }
178 }
179
180
181 static void slave_event(struct mlx4_priv *priv, u8 slave, struct mlx4_eqe *eqe)
182 {
183 struct mlx4_priv *priv = mlx4_priv(&priv->dev);
184 struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq;
185 struct mlx4_eqe *s_eqe;
186 unsigned long flags;
187
188 spin_lock_irqsave(&slave_eq->event_lock, flags);
189 s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
190 if ((!!(s_eqe->owner & 0x80)) ^
191 (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
192 MLX4_DEBUG( "Master failed to generate an EQE for slave: %d. "
193 "No free EQE on slave events queue\n", slave);
194 spin_unlock_irqrestore(&slave_eq->event_lock, flags);
195 return;
196 }
197
198 memcpy(s_eqe, eqe, priv->dev.caps.eqe_size - 1);
199 s_eqe->slave_id = slave;
200 ensure all information is written before setting the ownersip bit
201 wmb();
202 s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
203 ++slave_eq->prod;
204
205 queue_work(priv->mfunc.master.comm_wq,
206 &priv->mfunc.master.slave_event_work);
207 spin_unlock_irqrestore(&slave_eq->event_lock, flags);
208 }
209
210 static void mlx4_slave_event(struct mlx4_priv *priv, int slave,
211 struct mlx4_eqe *eqe)
212 {
213 struct mlx4_priv *priv = mlx4_priv(&priv->dev);
214
215 if (slave < 0 || slave >= priv->dev.num_slaves ||
216 slave == priv->dev.caps.function)
217 return;
218
219 if (!priv->mfunc.master.slave_state[slave].active)
220 return;
221
222 slave_event(&priv->dev, slave, eqe);
223 }
224
225 int mlx4_gen_pkey_eqe(struct mlx4_priv *priv, int slave, u8 port)
226 {
227 struct mlx4_eqe eqe;
228
229 struct mlx4_priv *priv = mlx4_priv(&priv->dev);
230 struct mlx4_slave_state *s_slave = &priv->mfunc.master.slave_state[slave];
231
232 if (!s_slave->active)
233 return 0;
234
235 memset(&eqe, 0, sizeof eqe);
236
237 eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
238 eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE;
239 eqe.event.port_mgmt_change.port = port;
240
241 return mlx4_GEN_EQE(&priv->dev, slave, &eqe);
242 }
243 EXPORT_SYMBOL(mlx4_gen_pkey_eqe);
244
245 int mlx4_gen_guid_change_eqe(struct mlx4_priv *priv, int slave, u8 port)
246 {
247 struct mlx4_eqe eqe;
248
249 don't send if we don't have the that slave
250 if (priv->dev.num_vfs < slave)
251 return 0;
252 memset(&eqe, 0, sizeof eqe);
253
254 eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
255 eqe.subtype = MLX4_DEV_PMC_SUBTYPE_GUID_INFO;
256 eqe.event.port_mgmt_change.port = port;
257
258 return mlx4_GEN_EQE(&priv->dev, slave, &eqe);
259 }
260 EXPORT_SYMBOL(mlx4_gen_guid_change_eqe);
261
262 int mlx4_gen_port_state_change_eqe(struct mlx4_priv *priv, int slave, u8 port,
263 u8 port_subtype_change)
264 {
265 struct mlx4_eqe eqe;
266
267 don't send if we don't have the that slave
268 if (priv->dev.num_vfs < slave)
269 return 0;
270 memset(&eqe, 0, sizeof eqe);
271
272 eqe.type = MLX4_EVENT_TYPE_PORT_CHANGE;
273 eqe.subtype = port_subtype_change;
274 eqe.event.port_change.port = cpu_to_be32(port << 28);
275
276 MLX4_DEBUG( "%s: sending: %d to slave: %d on port: %d\n", __func__,
277 port_subtype_change, slave, port);
278 return mlx4_GEN_EQE(&priv->dev, slave, &eqe);
279 }
280 EXPORT_SYMBOL(mlx4_gen_port_state_change_eqe);
281
282 enum slave_port_state mlx4_get_slave_port_state(struct mlx4_priv *priv, int slave, u8 port)
283 {
284 struct mlx4_priv *priv = mlx4_priv(&priv->dev);
285 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
286 if (slave >= priv->dev.num_slaves || port > MLX4_MAX_PORTS) {
287 pr_err("%s: Error: asking for slave:%d, port:%d\n",
288 __func__, slave, port);
289 return SLAVE_PORT_DOWN;
290 }
291 return s_state[slave].port_state[port];
292 }
293 EXPORT_SYMBOL(mlx4_get_slave_port_state);
294
295 static int mlx4_set_slave_port_state(struct mlx4_priv *priv, int slave, u8 port,
296 enum slave_port_state state)
297 {
298 struct mlx4_priv *priv = mlx4_priv(&priv->dev);
299 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
300
301 if (slave >= priv->dev.num_slaves || port > MLX4_MAX_PORTS || port == 0) {
302 pr_err("%s: Error: asking for slave:%d, port:%d\n",
303 __func__, slave, port);
304 return -1;
305 }
306 s_state[slave].port_state[port] = state;
307
308 return 0;
309 }
310
311 static void set_all_slave_state(struct mlx4_priv *priv, u8 port, int event)
312 {
313 int i;
314 enum slave_port_gen_event gen_event;
315
316 for (i = 0; i < priv->dev.num_slaves; i++)
317 set_and_calc_slave_port_state(&priv->dev, i, port, event, &gen_event);
318 }
319 *************************************************************************
320 The function get as input the new event to that port,
321 and according to the prev state change the slave's port state.
322 The events are:
323 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
324 MLX4_PORT_STATE_DEV_EVENT_PORT_UP
325 MLX4_PORT_STATE_IB_EVENT_GID_VALID
326 MLX4_PORT_STATE_IB_EVENT_GID_INVALID
327 **************************************************************************
328 int set_and_calc_slave_port_state(struct mlx4_priv *priv, int slave,
329 u8 port, int event,
330 enum slave_port_gen_event *gen_event)
331 {
332 struct mlx4_priv *priv = mlx4_priv(&priv->dev);
333 struct mlx4_slave_state *ctx = NULL;
334 unsigned long flags;
335 int ret = -1;
336 enum slave_port_state cur_state =
337 mlx4_get_slave_port_state(&priv->dev, slave, port);
338
339 *gen_event = SLAVE_PORT_GEN_EVENT_NONE;
340
341 if (slave >= priv->dev.num_slaves || port > MLX4_MAX_PORTS || port == 0) {
342 pr_err("%s: Error: asking for slave:%d, port:%d\n",
343 __func__, slave, port);
344 return ret;
345 }
346
347 ctx = &priv->mfunc.master.slave_state[slave];
348 spin_lock_irqsave(&ctx->lock, flags);
349
350 switch (cur_state) {
351 case SLAVE_PORT_DOWN:
352 if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event)
353 mlx4_set_slave_port_state(&priv->dev, slave, port,
354 SLAVE_PENDING_UP);
355 break;
356 case SLAVE_PENDING_UP:
357 if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event)
358 mlx4_set_slave_port_state(&priv->dev, slave, port,
359 SLAVE_PORT_DOWN);
360 else if (MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID == event) {
361 mlx4_set_slave_port_state(&priv->dev, slave, port,
362 SLAVE_PORT_UP);
363 *gen_event = SLAVE_PORT_GEN_EVENT_UP;
364 }
365 break;
366 case SLAVE_PORT_UP:
367 if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) {
368 mlx4_set_slave_port_state(&priv->dev, slave, port,
369 SLAVE_PORT_DOWN);
370 *gen_event = SLAVE_PORT_GEN_EVENT_DOWN;
371 } else if (MLX4_PORT_STATE_IB_EVENT_GID_INVALID ==
372 event) {
373 mlx4_set_slave_port_state(&priv->dev, slave, port,
374 SLAVE_PENDING_UP);
375 *gen_event = SLAVE_PORT_GEN_EVENT_DOWN;
376 }
377 break;
378 default:
379 pr_err("%s: BUG!!! UNKNOWN state: "
380 "slave:%d, port:%d\n", __func__, slave, port);
381 goto out;
382 }
383 ret = mlx4_get_slave_port_state(&priv->dev, slave, port);
384
385 out:
386 spin_unlock_irqrestore(&ctx->lock, flags);
387 return ret;
388 }
389
390 EXPORT_SYMBOL(set_and_calc_slave_port_state);
391
392 int mlx4_gen_slaves_port_mgt_ev(struct mlx4_priv *priv, u8 port, int attr, u16 sm_lid, u8 sm_sl)
393 {
394 struct mlx4_eqe eqe;
395
396 memset(&eqe, 0, sizeof eqe);
397
398 eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
399 eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PORT_INFO;
400 eqe.event.port_mgmt_change.port = port;
401 eqe.event.port_mgmt_change.params.port_info.changed_attr =
402 cpu_to_be32((u32) attr);
403 if (attr & MSTR_SM_CHANGE_MASK) {
404 eqe.event.port_mgmt_change.params.port_info.mstr_sm_lid =
405 cpu_to_be16(sm_lid);
406 eqe.event.port_mgmt_change.params.port_info.mstr_sm_sl =
407 sm_sl;
408 }
409
410 slave_event(&priv->dev, ALL_SLAVES, &eqe);
411 return 0;
412 }
413 EXPORT_SYMBOL(mlx4_gen_slaves_port_mgt_ev);
414
415 void mlx4_master_handle_slave_flr(struct work_struct *work)
416 {
417 struct mlx4_mfunc_master_ctx *master =
418 container_of(work, struct mlx4_mfunc_master_ctx,
419 slave_flr_event_work);
420 struct mlx4_mfunc *mfunc =
421 container_of(master, struct mlx4_mfunc, master);
422 struct mlx4_priv *priv =
423 container_of(mfunc, struct mlx4_priv, mfunc);
424 struct mlx4_dev *dev = &priv->dev;
425 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
426 int i;
427 int err;
428 unsigned long flags;
429
430 MLX4_DEBUG( "mlx4_handle_slave_flr\n");
431
432 for (i = 0 ; i < priv->dev.num_slaves; i++) {
433
434 if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
435 MLX4_DEBUG( "mlx4_handle_slave_flr: "
436 "clean slave: %d\n", i);
437
438 mlx4_delete_all_resources_for_slave(&priv->dev, i);
439 return the slave to running mode
440 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
441 slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
442 slave_state[i].is_slave_going_down = 0;
443 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
444 notify the FW:
445 err = mlx4_cmd(&priv->dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
446 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
447 if (err)
448 MLX4_DEBUG( "Failed to notify FW on "
449 "FLR done (slave:%d)\n", i);
450 }
451 }
452 }
453
454 static int mlx4_eq_int(struct mlx4_priv *priv, struct mlx4_eq *eq)
455 {
456 struct mlx4_priv *priv = mlx4_priv(&priv->dev);
457 struct mlx4_eqe *eqe;
458 int cqn;
459 int eqes_found = 0;
460 int set_ci = 0;
461 int port;
462 int slave = 0;
463 int ret;
464 u32 flr_slave;
465 u8 update_slave_state;
466 int i;
467 enum slave_port_gen_event gen_event;
468 unsigned long flags;
469 struct mlx4_vport_state *s_info;
470
471 while ((eqe = next_eqe_sw(eq, priv->dev.caps.eqe_factor))) {
472
473 * Make sure we read EQ entry contents after we've
474 * checked the ownership bit.
475
476 rmb();
477
478 switch (eqe->type) {
479 case MLX4_EVENT_TYPE_COMP:
480 cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
481 mlx4_cq_completion(&priv->dev, cqn);
482 break;
483
484 case MLX4_EVENT_TYPE_PATH_MIG:
485 case MLX4_EVENT_TYPE_COMM_EST:
486 case MLX4_EVENT_TYPE_SQ_DRAINED:
487 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
488 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
489 case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
490 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
491 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
492 MLX4_DEBUG( "event %d arrived\n", eqe->type);
493 if (mlx4_is_master(&priv->dev)) {
494 forward only to slave owning the QP
495 ret = mlx4_get_slave_from_resource_id(&priv->dev,
496 RES_QP,
497 be32_to_cpu(eqe->event.qp.qpn)
498 & 0xffffff, &slave);
499 if (ret && ret != -ENOENT) {
500 MLX4_DEBUG( "QP event %02x(%02x) on "
501 "EQ %d at index %u: could "
502 "not get slave id (%d)\n",
503 eqe->type, eqe->subtype,
504 eq->eqn, eq->cons_index, ret);
505 break;
506 }
507
508 if (!ret && slave != priv->dev.caps.function) {
509 mlx4_slave_event(&priv->dev, slave, eqe);
510 break;
511 }
512
513 }
514 mlx4_qp_event(&priv->dev, be32_to_cpu(eqe->event.qp.qpn) &
515 0xffffff, eqe->type);
516 break;
517
518 case MLX4_EVENT_TYPE_SRQ_LIMIT:
519 MLX4_DEBUG( "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
520 __func__);
521 fall through
522 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
523 if (mlx4_is_master(&priv->dev)) {
524 forward only to slave owning the SRQ
525 ret = mlx4_get_slave_from_resource_id(&priv->dev,
526 RES_SRQ,
527 be32_to_cpu(eqe->event.srq.srqn)
528 & 0xffffff,
529 &slave);
530 if (ret && ret != -ENOENT) {
531 MLX4_DEBUG( "SRQ event %02x(%02x) "
532 "on EQ %d at index %u: could"
533 " not get slave id (%d)\n",
534 eqe->type, eqe->subtype,
535 eq->eqn, eq->cons_index, ret);
536 break;
537 }
538 MLX4_DEBUG( "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
539 __func__, slave,
540 be32_to_cpu(eqe->event.srq.srqn),
541 eqe->type, eqe->subtype);
542
543 if (!ret && slave != priv->dev.caps.function) {
544 MLX4_DEBUG( "%s: sending event %02x(%02x) to slave:%d\n",
545 __func__, eqe->type,
546 eqe->subtype, slave);
547 mlx4_slave_event(&priv->dev, slave, eqe);
548 break;
549 }
550 }
551 mlx4_srq_event(&priv->dev, be32_to_cpu(eqe->event.srq.srqn) &
552 0xffffff, eqe->type);
553 break;
554
555 case MLX4_EVENT_TYPE_CMD:
556 mlx4_cmd_event(&priv->dev,
557 be16_to_cpu(eqe->event.cmd.token),
558 eqe->event.cmd.status,
559 be64_to_cpu(eqe->event.cmd.out_param));
560 break;
561
562 case MLX4_EVENT_TYPE_PORT_CHANGE:
563 port = be32_to_cpu(eqe->event.port_change.port) >> 28;
564 if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
565 mlx4_dispatch_event(&priv->dev, MLX4_DEV_EVENT_PORT_DOWN,
566 port);
567 mlx4_priv(&priv->dev)->sense.do_sense_port[port] = 1;
568 if (!mlx4_is_master(&priv->dev))
569 break;
570 for (i = 0; i < priv->dev.num_slaves; i++) {
571 if (priv->dev.caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
572 if (i == mlx4_master_func_num(&priv->dev))
573 continue;
574 MLX4_DEBUG( "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN"
575 " to slave: %d, port:%d\n",
576 __func__, i, port);
577 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
578 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state)
579 mlx4_slave_event(&priv->dev, i, eqe);
580 } else {   IB port
581 set_and_calc_slave_port_state(&priv->dev, i, port,
582 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
583 &gen_event);
584 we can be in pending state, then do not send port_down event
585 if (SLAVE_PORT_GEN_EVENT_DOWN ==  gen_event) {
586 if (i == mlx4_master_func_num(&priv->dev))
587 continue;
588 mlx4_slave_event(&priv->dev, i, eqe);
589 }
590 }
591 }
592 } else {
593 mlx4_dispatch_event(&priv->dev, MLX4_DEV_EVENT_PORT_UP, port);
594
595 mlx4_priv(&priv->dev)->sense.do_sense_port[port] = 0;
596
597 if (!mlx4_is_master(&priv->dev))
598 break;
599 if (priv->dev.caps.port_type[port] == MLX4_PORT_TYPE_ETH)
600 for (i = 0; i < priv->dev.num_slaves; i++) {
601 if (i == mlx4_master_func_num(&priv->dev))
602 continue;
603 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
604 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state)
605 mlx4_slave_event(&priv->dev, i, eqe);
606 }
607 else  IB port
608 port-up event will be sent to a slave when the
609 * slave's alias-guid is set. This is done in alias_GUID.c
610
611 set_all_slave_state(&priv->dev, port, MLX4_DEV_EVENT_PORT_UP);
612 }
613 break;
614
615 case MLX4_EVENT_TYPE_CQ_ERROR:
616 MLX4_DEBUG( "CQ %s on CQN %06x\n",
617 eqe->event.cq_err.syndrome == 1 ?
618 "overrun" : "access violation",
619 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
620 if (mlx4_is_master(&priv->dev)) {
621 ret = mlx4_get_slave_from_resource_id(&priv->dev,
622 RES_CQ,
623 be32_to_cpu(eqe->event.cq_err.cqn)
624 & 0xffffff, &slave);
625 if (ret && ret != -ENOENT) {
626 MLX4_DEBUG( "CQ event %02x(%02x) on "
627 "EQ %d at index %u: could "
628 "not get slave id (%d)\n",
629 eqe->type, eqe->subtype,
630 eq->eqn, eq->cons_index, ret);
631 break;
632 }
633
634 if (!ret && slave != priv->dev.caps.function) {
635 mlx4_slave_event(&priv->dev, slave, eqe);
636 break;
637 }
638 }
639 mlx4_cq_event(&priv->dev,
640 be32_to_cpu(eqe->event.cq_err.cqn)
641 & 0xffffff,
642 eqe->type);
643 break;
644
645 case MLX4_EVENT_TYPE_EQ_OVERFLOW:
646 MLX4_DEBUG( "EQ overrun on EQN %d\n", eq->eqn);
647 break;
648
649 case MLX4_EVENT_TYPE_OP_REQUIRED:
650 atomic_inc(&priv->opreq_count);
651 FW commands can't be executed from interrupt context
652 working in deferred task
653 queue_work(mlx4_wq, &priv->opreq_task);
654 break;
655
656 case MLX4_EVENT_TYPE_COMM_CHANNEL:
657 if (!mlx4_is_master(&priv->dev)) {
658 MLX4_DEBUG( "Received comm channel event "
659 "for non master device\n");
660 break;
661 }
662
663 memcpy(&priv->mfunc.master.comm_arm_bit_vector,
664 eqe->event.comm_channel_arm.bit_vec,
665 sizeof eqe->event.comm_channel_arm.bit_vec);
666
667 if (!queue_work(priv->mfunc.master.comm_wq,
668 &priv->mfunc.master.comm_work))
669 MLX4_DEBUG( "Failed to queue comm channel work\n");
670
671 if (!queue_work(priv->mfunc.master.comm_wq,
672 &priv->mfunc.master.arm_comm_work))
673 MLX4_DEBUG( "Failed to queue arm comm channel work\n");
674 break;
675
676 case MLX4_EVENT_TYPE_FLR_EVENT:
677 flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
678 if (!mlx4_is_master(&priv->dev)) {
679 MLX4_DEBUG( "Non-master function received"
680 "FLR event\n");
681 break;
682 }
683
684 MLX4_DEBUG( "FLR event for slave: %d\n", flr_slave);
685
686 if (flr_slave >= priv->dev.num_slaves) {
687 MLX4_DEBUG(
688 "Got FLR for unknown function: %d\n",
689 flr_slave);
690 update_slave_state = 0;
691 } else
692 update_slave_state = 1;
693
694 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
695 if (update_slave_state) {
696 priv->mfunc.master.slave_state[flr_slave].active = false;
697 priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
698 priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
699 }
700 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
701 queue_work(priv->mfunc.master.comm_wq,
702 &priv->mfunc.master.slave_flr_event_work);
703 break;
704
705 case MLX4_EVENT_TYPE_FATAL_WARNING:
706 if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) {
707 if (mlx4_is_master(&priv->dev))
708 for (i = 0; i < priv->dev.num_slaves; i++) {
709 MLX4_DEBUG( "%s: Sending "
710 "MLX4_FATAL_WARNING_SUBTYPE_WARMING"
711 " to slave: %d\n", __func__, i);
712 if (i == priv->dev.caps.function)
713 continue;
714 mlx4_slave_event(&priv->dev, i, eqe);
715 }
716 MLX4_DEBUG( "Temperature Threshold was reached! "
717 "Threshold: %d celsius degrees; "
718 "Current Temperature: %d\n",
719 be16_to_cpu(eqe->event.warming.warning_threshold),
720 be16_to_cpu(eqe->event.warming.current_temperature));
721 } else
722 MLX4_DEBUG( "Unhandled event FATAL WARNING (%02x), "
723 "subtype %02x on EQ %d at index %u. owner=%x, "
724 "nent=0x%x, slave=%x, ownership=%s\n",
725 eqe->type, eqe->subtype, eq->eqn,
726 eq->cons_index, eqe->owner, eq->nent,
727 eqe->slave_id,
728 !!(eqe->owner & 0x80) ^
729 !!(eq->cons_index & eq->nent) ? "HW" : "SW");
730
731 break;
732
733 case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT:
734 mlx4_dispatch_event(&priv->dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE,
735 (unsigned long) eqe);
736 break;
737
738 case MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT:
739 switch (eqe->subtype) {
740 case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE:
741 MLX4_DEBUG( "Bad cable detected on port %u\n",
742 eqe->event.bad_cable.port);
743 break;
744 case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE:
745 MLX4_DEBUG( "Unsupported cable detected\n");
746 break;
747 default:
748 MLX4_DEBUG( "Unhandled recoverable error event "
749 "detected: %02x(%02x) on EQ %d at index %u. "
750 "owner=%x, nent=0x%x, ownership=%s\n",
751 eqe->type, eqe->subtype, eq->eqn,
752 eq->cons_index, eqe->owner, eq->nent,
753 !!(eqe->owner & 0x80) ^
754 !!(eq->cons_index & eq->nent) ? "HW" : "SW");
755 break;
756 }
757 break;
758
759 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
760 case MLX4_EVENT_TYPE_ECC_DETECT:
761 default:
762 MLX4_DEBUG( "Unhandled event %02x(%02x) on EQ %d at "
763 "index %u. owner=%x, nent=0x%x, slave=%x, "
764 "ownership=%s\n",
765 eqe->type, eqe->subtype, eq->eqn,
766 eq->cons_index, eqe->owner, eq->nent,
767 eqe->slave_id,
768 !!(eqe->owner & 0x80) ^
769 !!(eq->cons_index & eq->nent) ? "HW" : "SW");
770 break;
771 };
772
773 ++eq->cons_index;
774 eqes_found = 1;
775 ++set_ci;
776
777
778 * The HCA will think the queue has overflowed if we
779 * don't tell it we've been processing events.  We
780 * create our EQs with MLX4_NUM_SPARE_EQE extra
781 * entries, so we must update our consumer index at
782 * least that often.
783
784 if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
785 eq_set_ci(eq, 0);
786 set_ci = 0;
787 }
788 }
789
790 eq_set_ci(eq, 1);
791
792 return eqes_found;
793 }
794
795 static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
796 {
797 struct mlx4_dev *dev = dev_ptr;
798 struct mlx4_priv *priv = mlx4_priv(&priv->dev);
799 int work = 0;
800 int i;
801
802 writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
803
804 for (i = 0; i < priv->dev.caps.num_comp_vectors + 1; ++i)
805 work |= mlx4_eq_int(&priv->dev, &priv->eq_table.eq[i]);
806
807 return IRQ_RETVAL(work);
808 }
809
810 static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
811 {
812 struct mlx4_eq  *eq  = eq_ptr;
813 struct mlx4_dev *dev = eq->dev;
814
815 mlx4_eq_int(&priv->dev, eq);
816
817 MSI-X vectors always belong to us
818 return IRQ_HANDLED;
819 }
820
821 int mlx4_MAP_EQ_wrapper(struct mlx4_priv *priv, int slave,
822 struct mlx4_vhcr *vhcr,
823 struct mlx4_cmd_mailbox *inbox,
824 struct mlx4_cmd_mailbox *outbox,
825 struct mlx4_cmd_info *cmd)
826 {
827 struct mlx4_priv *priv = mlx4_priv(&priv->dev);
828 struct mlx4_slave_event_eq_info *event_eq =
829 priv->mfunc.master.slave_state[slave].event_eq;
830 u32 in_modifier = vhcr->in_modifier;
831 u32 eqn = in_modifier & 0x3FF;
832 u64 in_param =  vhcr->in_param;
833 int err = 0;
834 int i;
835
836 if (slave == priv->dev.caps.function)
837 err = mlx4_cmd(&priv->dev, in_param, (in_modifier & 0x80000000) | eqn,
838 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
839 MLX4_CMD_NATIVE);
840 if (!err)
841 for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i)
842 if (in_param & (1LL << i))
843 event_eq[i].eqn = in_modifier >> 31 ? -1 : eqn;
844
845 return err;
846 }
847 */
848static int mlx4_MAP_EQ(struct mlx4_priv *priv, u64 event_mask, int unmap,
849		int eq_num) {
850	return mlx4_cmd(&priv->dev, event_mask, (unmap << 31) | eq_num, 0,
851			MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
852}
853
854static int mlx4_SW2HW_EQ(struct mlx4_priv *priv,
855		struct mlx4_cmd_mailbox *mailbox, int eq_num) {
856	return mlx4_cmd(&priv->dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ,
857			MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
858}
859/*
860 static int mlx4_HW2SW_EQ(struct mlx4_priv *priv, struct mlx4_cmd_mailbox *mailbox,
861 int eq_num)
862 {
863 return mlx4_cmd_box(&priv->dev, 0, mailbox->dma, eq_num,
864 0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A,
865 MLX4_CMD_WRAPPED);
866 }
867 */
868static int mlx4_num_eq_uar(struct mlx4_dev *dev) {
869
870	/*
871	 * Each UAR holds 4 EQ doorbells.  To figure out how many UARs
872	 * we need to map, take the difference of highest index and
873	 * the lowest index we'll use and add 1.
874	 */
875	return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs
876			+ dev->caps.comp_pool) / 4 - dev->caps.reserved_eqs / 4 + 1;
877}
878
879static void *mlx4_get_eq_uar(struct mlx4_priv *priv, struct mlx4_eq *eq) {
880	int index;
881
882	index = eq->eqn / 4 - priv->dev.caps.reserved_eqs / 4;
883
884	if (!priv->eq_table.uar_map[index]) {
885		priv->eq_table.uar_map[index] = priv->dev.bar_info[1].vaddr
886				+ ((eq->eqn / 4) << PAGE_SHIFT);
887	}
888
889	return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
890}
891/*
892 static void mlx4_unmap_uar(struct mlx4_priv *priv)
893 {
894 struct mlx4_priv *priv = mlx4_priv(&priv->dev);
895 int i;
896
897 for (i = 0; i < mlx4_num_eq_uar(&priv->dev); ++i)
898 if (priv->eq_table.uar_map[i]) {
899 iounmap(priv->eq_table.uar_map[i]);
900 priv->eq_table.uar_map[i] = NULL;
901 }
902 }
903 */
904static int mlx4_create_eq(struct mlx4_priv *priv, int nent, u8 intr,
905		struct mlx4_eq *eq) {
906	struct mlx4_cmd_mailbox *mailbox;
907	struct mlx4_eq_context *eq_context;
908	int npages;
909	u64 *dma_list = NULL;
910	genpaddr_t t = 0;
911	u64 mtt_addr;
912	int err = -ENOMEM;
913	int i;
914
915	eq->priv = priv;
916	eq->nent = roundup_pow_of_two(max(nent, 2));
917	/* CX3 is capable of extending the CQE\EQE from 32 to 64 bytes*/
918	npages = PAGE_ALIGN(
919			eq->nent * (MLX4_EQ_ENTRY_SIZE << priv->dev.caps.eqe_factor))
920			/ BASE_PAGE_SIZE;
921
922	eq->page_list = malloc(npages * sizeof *eq->page_list);
923	if (!eq->page_list)
924		goto err_out;
925
926	for (i = 0; i < npages; ++i)
927		eq->page_list[i].buf = NULL;
928
929	dma_list = malloc(npages * sizeof *dma_list);
930	if (!dma_list)
931		goto err_out_free;
932
933	mailbox = mlx4_alloc_cmd_mailbox();
934	if (IS_ERR(mailbox))
935		goto err_out_free;
936	eq_context = mailbox->buf;
937
938	for (i = 0; i < npages; ++i) {
939		eq->page_list[i].buf = dma_alloc(BASE_PAGE_SIZE, &t);
940		if (!eq->page_list[i].buf)
941			goto err_out_free_pages;
942
943		dma_list[i] = t;
944		eq->page_list[i].map = t;
945
946		memset(eq->page_list[i].buf, 0, BASE_PAGE_SIZE);
947	}
948
949	eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
950	if (eq->eqn == -1)
951		goto err_out_free_pages;
952
953	eq->doorbell = mlx4_get_eq_uar(priv, eq);
954	if (!eq->doorbell) {
955		err = -ENOMEM;
956		goto err_out_free_eq;
957	}
958
959	err = mlx4_mtt_init(&priv->dev, npages, PAGE_SHIFT, &eq->mtt);
960	if (err)
961		goto err_out_free_eq;
962
963	err = mlx4_write_mtt(&priv->dev, &eq->mtt, 0, npages, dma_list);
964	if (err)
965		goto err_out_free_mtt;
966
967	memset(eq_context, 0, sizeof *eq_context);
968	eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK |
969	MLX4_EQ_STATE_ARMED);
970	eq_context->log_eq_size = ilog2(eq->nent);
971	eq_context->intr = intr;
972	eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT;
973
974	/*printf("mtt_addr: %lx\n", mlx4_mtt_addr(&priv->dev, &eq->mtt));
975	 printf("off: %d\n", eq->mtt.offset);
976	 printf("size: %d\n", priv->dev.caps.mtt_entry_sz);*/
977
978	mtt_addr = mlx4_mtt_addr(&priv->dev, &eq->mtt);
979	eq_context->mtt_base_addr_h = mtt_addr >> 32;
980	eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
981
982	err = mlx4_SW2HW_EQ(priv, mailbox, eq->eqn);
983	if (err) {
984		MLX4_DEBUG("SW2HW_EQ failed (%d)\n", err);
985		goto err_out_free_mtt;
986	}
987
988	free(dma_list);
989	mlx4_free_cmd_mailbox(mailbox);
990
991	eq->cons_index = 0;
992
993	return err;
994
995	/*TODO*/
996	err_out_free_mtt: /*mlx4_mtt_cleanup(&priv->dev, &eq->mtt);*/
997
998	err_out_free_eq: /*mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn,
999	 MLX4_USE_RR);*/
1000
1001	err_out_free_pages: /*for (i = 0; i < npages; ++i)
1002	 if (eq->page_list[i].buf)
1003	 dma_free(&priv->dev.pdev->dev, PAGE_SIZE,
1004	 eq->page_list[i].buf, eq->page_list[i].map);*/
1005
1006	mlx4_free_cmd_mailbox(mailbox);
1007
1008	err_out_free: free(eq->page_list);
1009	free(dma_list);
1010
1011	err_out: return err;
1012}
1013/*
1014 static void mlx4_free_eq(struct mlx4_priv *priv,
1015 struct mlx4_eq *eq)
1016 {
1017 struct mlx4_priv *priv = mlx4_priv(&priv->dev);
1018 struct mlx4_cmd_mailbox *mailbox;
1019 int err;
1020 int i;
1021 CX3 is capable of extending the CQE\EQE from 32 to 64 bytes
1022 int npages = PAGE_ALIGN((MLX4_EQ_ENTRY_SIZE << priv->dev.caps.eqe_factor) * eq->nent) / PAGE_SIZE;
1023
1024 mailbox = mlx4_alloc_cmd_mailbox(&priv->dev);
1025 if (IS_ERR(mailbox))
1026 return;
1027
1028 err = mlx4_HW2SW_EQ(&priv->dev, mailbox, eq->eqn);
1029 if (err)
1030 MLX4_DEBUG( "HW2SW_EQ failed (%d)\n", err);
1031
1032 if (0) {
1033 MLX4_DEBUG( "Dumping EQ context %02x:\n", eq->eqn);
1034 for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
1035 if (i % 4 == 0)
1036 pr_cont("[%02x] ", i * 4);
1037 pr_cont(" %08x", be32_to_cpup(mailbox->buf + i * 4));
1038 if ((i + 1) % 4 == 0)
1039 pr_cont("\n");
1040 }
1041 }
1042
1043 mlx4_mtt_cleanup(&priv->dev, &eq->mtt);
1044 for (i = 0; i < npages; ++i)
1045 dma_free_coherent(&priv->dev.pdev->dev, PAGE_SIZE,
1046 eq->page_list[i].buf,
1047 eq->page_list[i].map);
1048
1049 kfree(eq->page_list);
1050 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
1051 mlx4_free_cmd_mailbox(&priv->dev, mailbox);
1052 }
1053
1054 static void mlx4_free_irqs(struct mlx4_priv *priv)
1055 {
1056 struct mlx4_eq_table *eq_table = &mlx4_priv(&priv->dev)->eq_table;
1057 struct mlx4_priv *priv = mlx4_priv(&priv->dev);
1058 int	i, vec;
1059
1060 if (eq_table->have_irq)
1061 free_irq(priv->dev.pdev->irq, dev);
1062
1063 for (i = 0; i < priv->dev.caps.num_comp_vectors + 1; ++i)
1064 if (eq_table->eq[i].have_irq) {
1065 free_irq(eq_table->eq[i].irq, eq_table->eq + i);
1066 eq_table->eq[i].have_irq = 0;
1067 }
1068
1069 for (i = 0; i < priv->dev.caps.comp_pool; i++) {
1070
1071 * Freeing the assigned irq's
1072 * all bits should be 0, but we need to validate
1073
1074 if (priv->msix_ctl.pool_bm & 1ULL << i) {
1075 NO need protecting
1076 vec = priv->dev.caps.num_comp_vectors + 1 + i;
1077 free_irq(priv->eq_table.eq[vec].irq,
1078 &priv->eq_table.eq[vec]);
1079 }
1080 }
1081
1082
1083 kfree(eq_table->irq_names);
1084 }
1085 */
1086static int mlx4_map_clr_int(struct mlx4_priv *priv) {
1087
1088	/*equivalent of: pci_resource_start(priv->dev.pdev, priv->fw.clr_int_bar) + priv->fw.clr_int_base*/
1089	priv->clr_base = priv->dev.bar_info[priv->fw.clr_int_bar].vaddr
1090			+ priv->fw.clr_int_base;
1091
1092	return 0;
1093}
1094/*
1095 static void mlx4_unmap_clr_int(struct mlx4_priv *priv)
1096 {
1097 struct mlx4_priv *priv = mlx4_priv(&priv->dev);
1098
1099 iounmap(priv->clr_base);
1100 }
1101 */
1102int mlx4_alloc_eq_table(struct mlx4_priv *priv) {
1103
1104	priv->eq_table.eq = calloc(
1105			priv->dev.caps.num_eqs - priv->dev.caps.reserved_eqs,
1106			sizeof *priv->eq_table.eq);
1107	if (!priv->eq_table.eq)
1108		return -ENOMEM;
1109
1110	return 0;
1111}
1112/*
1113 void mlx4_free_eq_table(struct mlx4_priv *priv)
1114 {
1115 kfree(mlx4_priv(&priv->dev)->eq_table.eq);
1116 }
1117 */
1118int mlx4_init_eq_table(struct mlx4_priv *priv) {
1119	int err;
1120	int i;
1121
1122	priv->eq_table.uar_map = calloc(mlx4_num_eq_uar(&priv->dev),
1123			sizeof *priv->eq_table.uar_map);
1124	if (!priv->eq_table.uar_map) {
1125		err = -ENOMEM;
1126		goto err_out_free;
1127	}
1128
1129	err = mlx4_bitmap_init(&priv->eq_table.bitmap, priv->dev.caps.num_eqs,
1130			priv->dev.caps.num_eqs - 1, priv->dev.caps.reserved_eqs, 0);
1131	if (err)
1132		goto err_out_free;
1133
1134	for (i = 0; i < mlx4_num_eq_uar(&priv->dev); ++i)
1135		priv->eq_table.uar_map[i] = NULL;
1136
1137	if (!mlx4_is_slave(&priv->dev)) {
1138		err = mlx4_map_clr_int(priv);
1139		if (err)
1140			goto err_out_bitmap;
1141
1142		priv->eq_table.clr_mask = swab32(1 << (priv->eq_table.inta_pin & 31));
1143		priv->eq_table.clr_int = priv->clr_base
1144				+ (priv->eq_table.inta_pin < 32 ? 4 : 0);
1145	}
1146
1147	priv->eq_table.irq_names = malloc(
1148			MLX4_IRQNAME_SIZE
1149					* (priv->dev.caps.num_comp_vectors + 1
1150							+ priv->dev.caps.comp_pool));
1151	if (!priv->eq_table.irq_names) {
1152		err = -ENOMEM;
1153		goto err_out_clr_int;
1154	}
1155
1156	for (i = 0; i < priv->dev.caps.num_comp_vectors; ++i) {
1157		err = mlx4_create_eq(priv,
1158				priv->dev.caps.num_cqs - priv->dev.caps.reserved_cqs
1159						+ MLX4_NUM_SPARE_EQE,
1160				(priv->dev.flags & MLX4_FLAG_MSI_X) ? i : 0,
1161				&priv->eq_table.eq[i]);
1162		if (err) {
1163			--i;
1164			goto err_out_unmap;
1165		}
1166	}
1167
1168	err = mlx4_create_eq(priv, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
1169			(priv->dev.flags & MLX4_FLAG_MSI_X) ?
1170					priv->dev.caps.num_comp_vectors : 0,
1171			&priv->eq_table.eq[priv->dev.caps.num_comp_vectors]);
1172	if (err)
1173		goto err_out_comp;
1174
1175	/*if additional completion vectors poolsize is 0 this loop will not run*/
1176	for (i = priv->dev.caps.num_comp_vectors + 1;
1177			i < priv->dev.caps.num_comp_vectors + priv->dev.caps.comp_pool + 1;
1178			++i) {
1179
1180		err = mlx4_create_eq(priv,
1181				priv->dev.caps.num_cqs - priv->dev.caps.reserved_cqs
1182						+ MLX4_NUM_SPARE_EQE,
1183				(priv->dev.flags & MLX4_FLAG_MSI_X) ? i : 0,
1184				&priv->eq_table.eq[i]);
1185		if (err) {
1186			--i;
1187			goto err_out_unmap;
1188		}
1189	}
1190
1191	if (priv->dev.flags & MLX4_FLAG_MSI_X) {
1192		assert(!"not implemented!");
1193		/*const char *eq_name;
1194
1195		 for (i = 0; i < priv->dev.caps.num_comp_vectors + 1; ++i) {
1196		 if (i < priv->dev.caps.num_comp_vectors) {
1197		 snprintf(priv->eq_table.irq_names + i * MLX4_IRQNAME_SIZE,
1198		 MLX4_IRQNAME_SIZE, "mlx4-comp-%d@pci:", i
1199		 pci_name(priv->dev.pdev));
1200		 } else {
1201		 snprintf(priv->eq_table.irq_names + i * MLX4_IRQNAME_SIZE,
1202		 MLX4_IRQNAME_SIZE, "mlx4-async@pci:"
1203		 pci_name(priv->dev.pdev));
1204		 }
1205
1206		 eq_name = priv->eq_table.irq_names + i * MLX4_IRQNAME_SIZE;
1207		 err = request_irq(priv->eq_table.eq[i].irq, mlx4_msi_x_interrupt, 0,
1208		 eq_name, priv->eq_table.eq + i);
1209		 if (err)
1210		 goto err_out_async;
1211
1212		 priv->eq_table.eq[i].have_irq = 1;
1213		 }*/
1214	} else {
1215		snprintf(priv->eq_table.irq_names, MLX4_IRQNAME_SIZE,
1216		DRV_NAME "@pci:"/*, pci_name(priv->dev.pdev)*/);
1217		/*err = request_irq(priv->dev.pdev->irq, mlx4_interrupt, IRQF_SHARED,
1218		 priv->eq_table.irq_names, dev);
1219		 if (err)
1220		 goto err_out_async;*/
1221
1222		priv->eq_table.have_irq = 1;
1223	}
1224
1225	err = mlx4_MAP_EQ(priv, get_async_ev_mask(priv), 0,
1226			priv->eq_table.eq[priv->dev.caps.num_comp_vectors].eqn);
1227	if (err)
1228		MLX4_DEBUG("MAP_EQ for async EQ %d failed (%d)\n",
1229				priv->eq_table.eq[priv->dev.caps.num_comp_vectors].eqn, err);
1230
1231	for (i = 0; i < priv->dev.caps.num_comp_vectors + 1; ++i)
1232		eq_set_ci(&priv->eq_table.eq[i], 1);
1233
1234	return 0;
1235
1236	/*TODO*/
1237	/*err_out_async:*//*mlx4_free_eq(&priv->dev,
1238	 &priv->eq_table.eq[priv->dev.caps.num_comp_vectors]);*/
1239
1240	err_out_comp: i = priv->dev.caps.num_comp_vectors - 1;
1241
1242	err_out_unmap: /*while (i >= 0) {
1243	 mlx4_free_eq(&priv->dev, &priv->eq_table.eq[i]);
1244	 --i;
1245	 }
1246	 mlx4_free_irqs(&priv->dev);*/
1247
1248	err_out_clr_int: /*if (!mlx4_is_slave(&priv->dev))
1249	 mlx4_unmap_clr_int(&priv->dev);*/
1250
1251	err_out_bitmap: /*mlx4_unmap_uar(&priv->dev);*/
1252	mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
1253
1254	err_out_free: free(priv->eq_table.uar_map);
1255
1256	return err;
1257}
1258/*		void mlx4_cleanup_eq_table(struct mlx4_priv *priv)
1259 {
1260 struct mlx4_priv *priv = mlx4_priv(&priv->dev);
1261 int i;
1262
1263 mlx4_MAP_EQ(&priv->dev, get_async_ev_mask(&priv->dev), 1,
1264 priv->eq_table.eq[priv->dev.caps.num_comp_vectors].eqn);
1265
1266 mlx4_free_irqs(&priv->dev);
1267
1268 for (i = 0; i < priv->dev.caps.num_comp_vectors + priv->dev.caps.comp_pool + 1; ++i)
1269 mlx4_free_eq(&priv->dev, &priv->eq_table.eq[i]);
1270
1271 if (!mlx4_is_slave(&priv->dev))
1272 mlx4_unmap_clr_int(&priv->dev);
1273
1274 mlx4_unmap_uar(&priv->dev);
1275 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
1276
1277 kfree(priv->eq_table.uar_map);
1278 }
1279
1280 A test that verifies that we can accept interrupts on all
1281 * the irq vectors of the device.
1282 * Interrupts are checked using the NOP command.
1283
1284 int mlx4_test_interrupts(struct mlx4_priv *priv)
1285 {
1286 struct mlx4_priv *priv = mlx4_priv(&priv->dev);
1287 int i;
1288 int err;
1289
1290 err = mlx4_NOP(&priv->dev);
1291 When not in MSI_X, there is only one irq to check
1292 if (!(priv->dev.flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(&priv->dev))
1293 return err;
1294
1295 A loop over all completion vectors, for each vector we will check
1296 * whether it works by mapping command completions to that vector
1297 * and performing a NOP command
1298
1299 for(i = 0; !err && (i < priv->dev.caps.num_comp_vectors); ++i) {
1300 Temporary use polling for command completions
1301 mlx4_cmd_use_polling(&priv->dev);
1302
1303 Map the new eq to handle all asyncronous events
1304 err = mlx4_MAP_EQ(&priv->dev, get_async_ev_mask(&priv->dev), 0,
1305 priv->eq_table.eq[i].eqn);
1306 if (err) {
1307 MLX4_DEBUG( "Failed mapping eq for interrupt test\n");
1308 mlx4_cmd_use_events(&priv->dev);
1309 break;
1310 }
1311
1312 Go back to using events
1313 mlx4_cmd_use_events(&priv->dev);
1314 err = mlx4_NOP(&priv->dev);
1315 }
1316
1317 Return to default
1318 mlx4_MAP_EQ(&priv->dev, get_async_ev_mask(&priv->dev), 0,
1319 priv->eq_table.eq[priv->dev.caps.num_comp_vectors].eqn);
1320 return err;
1321 }
1322 EXPORT_SYMBOL( mlx4_test_interrupts);
1323
1324 int mlx4_assign_eq(struct mlx4_priv *priv, char* name, int * vector) {
1325
1326 struct mlx4_priv *priv = mlx4_priv(&priv->dev);
1327 int vec = 0, err = 0, i;
1328
1329 mutex_lock(&priv->msix_ctl.pool_lock);
1330 for (i = 0; !vec && i < priv->dev.caps.comp_pool; i++) {
1331 if (~priv->msix_ctl.pool_bm & 1ULL << i) {
1332 priv->msix_ctl.pool_bm |= 1ULL << i;
1333 vec = priv->dev.caps.num_comp_vectors + 1 + i;
1334 snprintf(priv->eq_table.irq_names + vec * MLX4_IRQNAME_SIZE,
1335 MLX4_IRQNAME_SIZE, "%s", name);
1336 err = request_irq(priv->eq_table.eq[vec].irq,
1337 mlx4_msi_x_interrupt, 0,
1338 &priv->eq_table.irq_names[vec << 5],
1339 priv->eq_table.eq + vec);
1340 if (err) {
1341 zero out
1342 bit by
1343 fliping it
1344 priv->msix_ctl.pool_bm ^= 1 << i;
1345 vec = 0;
1346 continue;
1347 we dont
1348 want to
1349 break here
1350 }
1351 eq_set_ci(&priv->eq_table.eq[vec], 1);
1352 }
1353 }
1354 mutex_unlock(&priv->msix_ctl.pool_lock);
1355
1356 if (vec) {
1357 *vector = vec;
1358 } else {
1359 *vector = 0;
1360 err = (i == priv->dev.caps.comp_pool) ? -ENOSPC : err;
1361 }
1362 return err;
1363 }
1364 EXPORT_SYMBOL( mlx4_assign_eq);
1365
1366 void mlx4_release_eq(struct mlx4_priv *priv, int vec) {
1367 struct mlx4_priv *priv = mlx4_priv(&priv->dev);
1368 bm index
1369 int i = vec - priv->dev.caps.num_comp_vectors - 1;
1370
1371 if (likely(i >= 0)) {
1372 sanity check, making
1373 sure were
1374 not trying
1375 to free
1376 irq
1377 's
1378 Belonging to
1379 a legacy
1380 EQ
1381 mutex_lock(&priv->msix_ctl.pool_lock);
1382 if (priv->msix_ctl.pool_bm & 1ULL << i) {
1383 free_irq(priv->eq_table.eq[vec].irq, &priv->eq_table.eq[vec]);
1384 priv->msix_ctl.pool_bm &= ~(1ULL << i);
1385 }
1386 mutex_unlock(&priv->msix_ctl.pool_lock);
1387 }
1388
1389 }
1390 EXPORT_SYMBOL( mlx4_release_eq);
1391 */
1392