Lines Matching refs:port

53 	u8 port;
63 u8 port;
83 pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num, (long long)guid_indexes);
105 static __be64 get_cached_alias_guid(struct mlx4_ib_dev *dev, int port, int index)
111 return *(__be64 *)&dev->sriov.demux[port - 1].guid_cache[index];
124 * updated; in this case it waits for the smp_snoop or the port management
147 pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num, (long long)guid_indexes);
163 * If it is different, wait for the snoop_smp or the port mgmt
164 * change event to update the slave on its port state change
177 pr_debug("slave: %d, port: %d prev_port_state: %d,"
181 pr_debug("sending PORT_UP event to slave: %d, port: %d\n",
190 pr_debug("sending PORT DOWN event to slave: %d, port: %d\n",
213 port_index = cb_ctx->port - 1;
219 pr_debug("(port: %d) failed: status = %d\n",
220 cb_ctx->port, status);
230 pr_debug("lid/port: %d/%d, block_num: %d\n",
231 be16_to_cpu(guid_rec->lid), cb_ctx->port,
294 cb_ctx->port,
312 static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index)
318 dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status
320 dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].method
326 *(u64 *)&dev->sriov.alias_guid.ports_guid[port - 1].
337 ports_guid[port - 1].all_rec_per_port[index].ownership)
341 dev->sriov.alias_guid.ports_guid[port - 1].
346 u8 port, int index,
357 &dev->sriov.alias_guid.ports_guid[port - 1].cb_list;
359 err = __mlx4_ib_query_port(ibdev, port, &attr, 1);
361 pr_debug("mlx4_ib_query_port failed (err: %d), port: %d\n",
362 err, port);
365 /*check the port was configured by the sm, otherwise no need to send */
367 pr_debug("port %d not active...rescheduling\n", port);
379 callback_context->port = port;
400 ibdev, port, &guid_info_rec,
423 invalidate_guid_record(dev, port, index);
425 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq,
426 &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work,
436 void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port)
441 pr_debug("port %d\n", port);
446 invalidate_guid_record(dev, port, i);
455 ports_guid[port - 1].alias_guid_work);
456 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq,
457 &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work,
466 static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port,
474 if (dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status ==
477 &dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j],
479 rec->port = port;
481 dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status =
491 static void set_administratively_guid_record(struct mlx4_ib_dev *dev, int port,
495 dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].guid_indexes =
497 memcpy(dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].all_recs,
499 dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].status =
503 static void set_all_slaves_guids(struct mlx4_ib_dev *dev, int port)
516 set_administratively_guid_record(dev, port, j, &rec_det);
540 pr_debug("starting [port: %d]...\n", sriov_alias_port->port + 1);
541 ret = get_next_record_to_update(dev, sriov_alias_port->port, rec);
547 set_guid_rec(&dev->ib_dev, rec->port + 1, rec->block_num,
555 void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port)
564 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq,
565 &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0);
659 dev->sriov.alias_guid.ports_guid[i].port = i;