1255932Salfred/*
2255932Salfred * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
3255932Salfred *
4255932Salfred * This software is available to you under a choice of one of two
5255932Salfred * licenses.  You may choose to be licensed under the terms of the GNU
6255932Salfred * General Public License (GPL) Version 2, available from the file
7255932Salfred * COPYING in the main directory of this source tree, or the
8255932Salfred * OpenIB.org BSD license below:
9255932Salfred *
10255932Salfred *     Redistribution and use in source and binary forms, with or
11255932Salfred *     without modification, are permitted provided that the following
12255932Salfred *     conditions are met:
13255932Salfred *
14255932Salfred *      - Redistributions of source code must retain the above
15255932Salfred *        copyright notice, this list of conditions and the following
16255932Salfred *        disclaimer.
17255932Salfred *
18255932Salfred *      - Redistributions in binary form must reproduce the above
19255932Salfred *        copyright notice, this list of conditions and the following
20255932Salfred *        disclaimer in the documentation and/or other materials
21255932Salfred *        provided with the distribution.
22255932Salfred *
23255932Salfred * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24255932Salfred * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25255932Salfred * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26255932Salfred * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27255932Salfred * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28255932Salfred * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29255932Salfred * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30255932Salfred * SOFTWARE.
31255932Salfred */
32255932Salfred /***********************************************************/
33255932Salfred/*This file support the handling of the Alias GUID feature. */
34255932Salfred/***********************************************************/
35255932Salfred#include <rdma/ib_mad.h>
36255932Salfred#include <rdma/ib_smi.h>
37255932Salfred#include <rdma/ib_cache.h>
38255932Salfred#include <rdma/ib_sa.h>
39255932Salfred#include <rdma/ib_pack.h>
40306486Shselasky#include <dev/mlx4/cmd.h>
41255932Salfred#include <linux/module.h>
42255932Salfred#include <linux/errno.h>
43255932Salfred#include <rdma/ib_user_verbs.h>
44255932Salfred#include <linux/delay.h>
45331769Shselasky#include <linux/math64.h>
46331769Shselasky#include <linux/ktime.h>
47255932Salfred#include "mlx4_ib.h"
48255932Salfred
49255932Salfred/*
50255932SalfredThe driver keeps the current state of all guids, as they are in the HW.
51255932SalfredWhenever we receive an smp mad GUIDInfo record, the data will be cached.
52255932Salfred*/
53255932Salfred
54255932Salfredstruct mlx4_alias_guid_work_context {
55255932Salfred	u8 port;
56255932Salfred	struct mlx4_ib_dev     *dev ;
57255932Salfred	struct ib_sa_query     *sa_query;
58255932Salfred	struct completion	done;
59255932Salfred	int			query_id;
60255932Salfred	struct list_head	list;
61255932Salfred	int			block_num;
62331769Shselasky	ib_sa_comp_mask		guid_indexes;
63278886Shselasky	u8			method;
64255932Salfred};
65255932Salfred
66255932Salfredstruct mlx4_next_alias_guid_work {
67255932Salfred	u8 port;
68255932Salfred	u8 block_num;
69331769Shselasky	u8 method;
70255932Salfred	struct mlx4_sriov_alias_guid_info_rec_det rec_det;
71255932Salfred};
72255932Salfred
73331769Shselaskystatic int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port,
74331769Shselasky				     int *resched_delay_sec);
75255932Salfred
76255932Salfredvoid mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num,
77255932Salfred					 u8 port_num, u8 *p_data)
78255932Salfred{
79255932Salfred	int i;
80255932Salfred	u64 guid_indexes;
81255932Salfred	int slave_id;
82255932Salfred	int port_index = port_num - 1;
83255932Salfred
84255932Salfred	if (!mlx4_is_master(dev->dev))
85255932Salfred		return;
86255932Salfred
87255932Salfred	guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
88255932Salfred				   ports_guid[port_num - 1].
89255932Salfred				   all_rec_per_port[block_num].guid_indexes);
90278886Shselasky	pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num,
91278886Shselasky	    (unsigned long long)guid_indexes);
92255932Salfred
93255932Salfred	for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
94255932Salfred		/* The location of the specific index starts from bit number 4
95255932Salfred		 * until bit num 11 */
96255932Salfred		if (test_bit(i + 4, (unsigned long *)&guid_indexes)) {
97255932Salfred			slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
98255932Salfred			if (slave_id >= dev->dev->num_slaves) {
99255932Salfred				pr_debug("The last slave: %d\n", slave_id);
100255932Salfred				return;
101255932Salfred			}
102255932Salfred
103255932Salfred			/* cache the guid: */
104255932Salfred			memcpy(&dev->sriov.demux[port_index].guid_cache[slave_id],
105255932Salfred			       &p_data[i * GUID_REC_SIZE],
106255932Salfred			       GUID_REC_SIZE);
107255932Salfred		} else
108255932Salfred			pr_debug("Guid number: %d in block: %d"
109255932Salfred				 " was not updated\n", i, block_num);
110255932Salfred	}
111255932Salfred}
112255932Salfred
113255932Salfredstatic __be64 get_cached_alias_guid(struct mlx4_ib_dev *dev, int port, int index)
114255932Salfred{
115255932Salfred	if (index >= NUM_ALIAS_GUID_PER_PORT) {
116255932Salfred		pr_err("%s: ERROR: asked for index:%d\n", __func__, index);
117255932Salfred		return (__force __be64) -1;
118255932Salfred	}
119255932Salfred	return *(__be64 *)&dev->sriov.demux[port - 1].guid_cache[index];
120255932Salfred}
121255932Salfred
122255932Salfred
123255932Salfredib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index)
124255932Salfred{
125255932Salfred	return IB_SA_COMP_MASK(4 + index);
126255932Salfred}
127255932Salfred
128331769Shselaskyvoid mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev *dev, int slave,
129331769Shselasky				    int port,  int slave_init)
130331769Shselasky{
131331769Shselasky	__be64 curr_guid, required_guid;
132331769Shselasky	int record_num = slave / 8;
133331769Shselasky	int index = slave % 8;
134331769Shselasky	int port_index = port - 1;
135331769Shselasky	unsigned long flags;
136331769Shselasky	int do_work = 0;
137331769Shselasky
138331769Shselasky	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
139331769Shselasky	if (dev->sriov.alias_guid.ports_guid[port_index].state_flags &
140331769Shselasky	    GUID_STATE_NEED_PORT_INIT)
141331769Shselasky		goto unlock;
142331769Shselasky	if (!slave_init) {
143331769Shselasky		curr_guid = *(__be64 *)&dev->sriov.
144331769Shselasky			alias_guid.ports_guid[port_index].
145331769Shselasky			all_rec_per_port[record_num].
146331769Shselasky			all_recs[GUID_REC_SIZE * index];
147331769Shselasky		if (curr_guid == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL) ||
148331769Shselasky		    !curr_guid)
149331769Shselasky			goto unlock;
150331769Shselasky		required_guid = cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL);
151331769Shselasky	} else {
152331769Shselasky		required_guid = mlx4_get_admin_guid(dev->dev, slave, port);
153331769Shselasky		if (required_guid == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL))
154331769Shselasky			goto unlock;
155331769Shselasky	}
156331769Shselasky	*(__be64 *)&dev->sriov.alias_guid.ports_guid[port_index].
157331769Shselasky		all_rec_per_port[record_num].
158331769Shselasky		all_recs[GUID_REC_SIZE * index] = required_guid;
159331769Shselasky	dev->sriov.alias_guid.ports_guid[port_index].
160331769Shselasky		all_rec_per_port[record_num].guid_indexes
161331769Shselasky		|= mlx4_ib_get_aguid_comp_mask_from_ix(index);
162331769Shselasky	dev->sriov.alias_guid.ports_guid[port_index].
163331769Shselasky		all_rec_per_port[record_num].status
164331769Shselasky		= MLX4_GUID_INFO_STATUS_IDLE;
165331769Shselasky	/* set to run immediately */
166331769Shselasky	dev->sriov.alias_guid.ports_guid[port_index].
167331769Shselasky		all_rec_per_port[record_num].time_to_run = 0;
168331769Shselasky	dev->sriov.alias_guid.ports_guid[port_index].
169331769Shselasky		all_rec_per_port[record_num].
170331769Shselasky		guids_retry_schedule[index] = 0;
171331769Shselasky	do_work = 1;
172331769Shselaskyunlock:
173331769Shselasky	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
174331769Shselasky
175331769Shselasky	if (do_work)
176331769Shselasky		mlx4_ib_init_alias_guid_work(dev, port_index);
177331769Shselasky}
178331769Shselasky
179255932Salfred/*
180255932Salfred * Whenever new GUID is set/unset (guid table change) create event and
181255932Salfred * notify the relevant slave (master also should be notified).
182255932Salfred * If the GUID value is not as we have in the cache the slave will not be
183255932Salfred * updated; in this case it waits for the smp_snoop or the port management
184255932Salfred * event to call the function and to update the slave.
185255932Salfred * block_number - the index of the block (16 blocks available)
186255932Salfred * port_number - 1 or 2
187255932Salfred */
188255932Salfredvoid mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
189255932Salfred					  int block_num, u8 port_num,
190255932Salfred					  u8 *p_data)
191255932Salfred{
192255932Salfred	int i;
193255932Salfred	u64 guid_indexes;
194331769Shselasky	int slave_id, slave_port;
195255932Salfred	enum slave_port_state new_state;
196255932Salfred	enum slave_port_state prev_state;
197255932Salfred	__be64 tmp_cur_ag, form_cache_ag;
198255932Salfred	enum slave_port_gen_event gen_event;
199331769Shselasky	struct mlx4_sriov_alias_guid_info_rec_det *rec;
200331769Shselasky	unsigned long flags;
201331769Shselasky	__be64 required_value;
202255932Salfred
203255932Salfred	if (!mlx4_is_master(dev->dev))
204255932Salfred		return;
205255932Salfred
206331769Shselasky	rec = &dev->sriov.alias_guid.ports_guid[port_num - 1].
207331769Shselasky			all_rec_per_port[block_num];
208255932Salfred	guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
209255932Salfred				   ports_guid[port_num - 1].
210255932Salfred				   all_rec_per_port[block_num].guid_indexes);
211278886Shselasky	pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num,
212278886Shselasky	    (unsigned long long)guid_indexes);
213255932Salfred
214255932Salfred	/*calculate the slaves and notify them*/
215255932Salfred	for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
216255932Salfred		/* the location of the specific index runs from bits 4..11 */
217255932Salfred		if (!(test_bit(i + 4, (unsigned long *)&guid_indexes)))
218255932Salfred			continue;
219255932Salfred
220255932Salfred		slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
221331769Shselasky		if (slave_id >= dev->dev->persist->num_vfs + 1)
222255932Salfred			return;
223331769Shselasky
224331769Shselasky		slave_port = mlx4_phys_to_slave_port(dev->dev, slave_id, port_num);
225331769Shselasky		if (slave_port < 0) /* this port isn't available for the VF */
226331769Shselasky			continue;
227331769Shselasky
228255932Salfred		tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE];
229255932Salfred		form_cache_ag = get_cached_alias_guid(dev, port_num,
230255932Salfred					(NUM_ALIAS_GUID_IN_REC * block_num) + i);
231255932Salfred		/*
232255932Salfred		 * Check if guid is not the same as in the cache,
233255932Salfred		 * If it is different, wait for the snoop_smp or the port mgmt
234255932Salfred		 * change event to update the slave on its port state change
235255932Salfred		 */
236255932Salfred		if (tmp_cur_ag != form_cache_ag)
237255932Salfred			continue;
238331769Shselasky
239331769Shselasky		spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
240331769Shselasky		required_value = *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE];
241331769Shselasky
242331769Shselasky		if (required_value == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL))
243331769Shselasky			required_value = 0;
244331769Shselasky
245331769Shselasky		if (tmp_cur_ag == required_value) {
246331769Shselasky			rec->guid_indexes = rec->guid_indexes &
247331769Shselasky			       ~mlx4_ib_get_aguid_comp_mask_from_ix(i);
248331769Shselasky		} else {
249331769Shselasky			/* may notify port down if value is 0 */
250331769Shselasky			if (tmp_cur_ag != MLX4_NOT_SET_GUID) {
251331769Shselasky				spin_unlock_irqrestore(&dev->sriov.
252331769Shselasky					alias_guid.ag_work_lock, flags);
253331769Shselasky				continue;
254331769Shselasky			}
255331769Shselasky		}
256331769Shselasky		spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock,
257331769Shselasky				       flags);
258255932Salfred		mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num);
259255932Salfred		/*2 cases: Valid GUID, and Invalid Guid*/
260255932Salfred
261255932Salfred		if (tmp_cur_ag != MLX4_NOT_SET_GUID) { /*valid GUID*/
262255932Salfred			prev_state = mlx4_get_slave_port_state(dev->dev, slave_id, port_num);
263255932Salfred			new_state = set_and_calc_slave_port_state(dev->dev, slave_id, port_num,
264255932Salfred								  MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID,
265255932Salfred								  &gen_event);
266255932Salfred			pr_debug("slave: %d, port: %d prev_port_state: %d,"
267255932Salfred				 " new_port_state: %d, gen_event: %d\n",
268255932Salfred				 slave_id, port_num, prev_state, new_state, gen_event);
269255932Salfred			if (gen_event == SLAVE_PORT_GEN_EVENT_UP) {
270255932Salfred				pr_debug("sending PORT_UP event to slave: %d, port: %d\n",
271255932Salfred					 slave_id, port_num);
272255932Salfred				mlx4_gen_port_state_change_eqe(dev->dev, slave_id,
273255932Salfred							       port_num, MLX4_PORT_CHANGE_SUBTYPE_ACTIVE);
274255932Salfred			}
275255932Salfred		} else { /* request to invalidate GUID */
276255932Salfred			set_and_calc_slave_port_state(dev->dev, slave_id, port_num,
277255932Salfred						      MLX4_PORT_STATE_IB_EVENT_GID_INVALID,
278255932Salfred						      &gen_event);
279331769Shselasky			if (gen_event == SLAVE_PORT_GEN_EVENT_DOWN) {
280331769Shselasky				pr_debug("sending PORT DOWN event to slave: %d, port: %d\n",
281331769Shselasky					 slave_id, port_num);
282331769Shselasky				mlx4_gen_port_state_change_eqe(dev->dev,
283331769Shselasky							       slave_id,
284331769Shselasky							       port_num,
285331769Shselasky							       MLX4_PORT_CHANGE_SUBTYPE_DOWN);
286331769Shselasky			}
287255932Salfred		}
288255932Salfred	}
289255932Salfred}
290255932Salfred
291255932Salfredstatic void aliasguid_query_handler(int status,
292255932Salfred				    struct ib_sa_guidinfo_rec *guid_rec,
293255932Salfred				    void *context)
294255932Salfred{
295255932Salfred	struct mlx4_ib_dev *dev;
296255932Salfred	struct mlx4_alias_guid_work_context *cb_ctx = context;
297278886Shselasky	u8 port_index;
298255932Salfred	int i;
299255932Salfred	struct mlx4_sriov_alias_guid_info_rec_det *rec;
300255932Salfred	unsigned long flags, flags1;
301331769Shselasky	ib_sa_comp_mask declined_guid_indexes = 0;
302331769Shselasky	ib_sa_comp_mask applied_guid_indexes = 0;
303331769Shselasky	unsigned int resched_delay_sec = 0;
304255932Salfred
305255932Salfred	if (!context)
306255932Salfred		return;
307255932Salfred
308255932Salfred	dev = cb_ctx->dev;
309255932Salfred	port_index = cb_ctx->port - 1;
310255932Salfred	rec = &dev->sriov.alias_guid.ports_guid[port_index].
311255932Salfred		all_rec_per_port[cb_ctx->block_num];
312255932Salfred
313255932Salfred	if (status) {
314255932Salfred		pr_debug("(port: %d) failed: status = %d\n",
315255932Salfred			 cb_ctx->port, status);
316331769Shselasky		rec->time_to_run = ktime_get_ns() + 1 * NSEC_PER_SEC;
317255932Salfred		goto out;
318255932Salfred	}
319255932Salfred
320255932Salfred	if (guid_rec->block_num != cb_ctx->block_num) {
321255932Salfred		pr_err("block num mismatch: %d != %d\n",
322255932Salfred		       cb_ctx->block_num, guid_rec->block_num);
323255932Salfred		goto out;
324255932Salfred	}
325255932Salfred
326255932Salfred	pr_debug("lid/port: %d/%d, block_num: %d\n",
327255932Salfred		 be16_to_cpu(guid_rec->lid), cb_ctx->port,
328255932Salfred		 guid_rec->block_num);
329255932Salfred
330255932Salfred	rec = &dev->sriov.alias_guid.ports_guid[port_index].
331255932Salfred		all_rec_per_port[guid_rec->block_num];
332255932Salfred
333331769Shselasky	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
334331769Shselasky	for (i = 0 ; i < NUM_ALIAS_GUID_IN_REC; i++) {
335331769Shselasky		__be64 sm_response, required_val;
336255932Salfred
337331769Shselasky		if (!(cb_ctx->guid_indexes &
338331769Shselasky			mlx4_ib_get_aguid_comp_mask_from_ix(i)))
339278886Shselasky			continue;
340331769Shselasky		sm_response = *(__be64 *)&guid_rec->guid_info_list
341331769Shselasky				[i * GUID_REC_SIZE];
342331769Shselasky		required_val = *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE];
343331769Shselasky		if (cb_ctx->method == MLX4_GUID_INFO_RECORD_DELETE) {
344331769Shselasky			if (required_val ==
345331769Shselasky			    cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL))
346331769Shselasky				goto next_entry;
347331769Shselasky
348331769Shselasky			/* A new value was set till we got the response */
349331769Shselasky			pr_debug("need to set new value %llx, record num %d, block_num:%d\n",
350331769Shselasky				 (long long)be64_to_cpu(required_val),
351331769Shselasky				 i, guid_rec->block_num);
352331769Shselasky			goto entry_declined;
353278886Shselasky		}
354278886Shselasky
355255932Salfred		/* check if the SM didn't assign one of the records.
356331769Shselasky		 * if it didn't, re-ask for.
357255932Salfred		 */
358331769Shselasky		if (sm_response == MLX4_NOT_SET_GUID) {
359331769Shselasky			if (rec->guids_retry_schedule[i] == 0)
360331769Shselasky				mlx4_ib_warn(&dev->ib_dev,
361331769Shselasky					     "%s:Record num %d in  block_num: %d was declined by SM\n",
362331769Shselasky					     __func__, i,
363331769Shselasky					     guid_rec->block_num);
364331769Shselasky			goto entry_declined;
365255932Salfred		} else {
366255932Salfred		       /* properly assigned record. */
367255932Salfred		       /* We save the GUID we just got from the SM in the
368255932Salfred			* admin_guid in order to be persistent, and in the
369255932Salfred			* request from the sm the process will ask for the same GUID */
370331769Shselasky			if (required_val &&
371331769Shselasky			    sm_response != required_val) {
372331769Shselasky				/* Warn only on first retry */
373331769Shselasky				if (rec->guids_retry_schedule[i] == 0)
374331769Shselasky					mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set"
375331769Shselasky						     " admin guid after SysAdmin "
376331769Shselasky						     "configuration. "
377331769Shselasky						     "Record num %d in block_num:%d "
378331769Shselasky						     "was declined by SM, "
379331769Shselasky						     "new val(0x%llx) was kept, SM returned (0x%llx)\n",
380331769Shselasky						      __func__, i,
381331769Shselasky						     guid_rec->block_num,
382331769Shselasky						     (long long)be64_to_cpu(required_val),
383331769Shselasky						     (long long)be64_to_cpu(sm_response));
384331769Shselasky				goto entry_declined;
385255932Salfred			} else {
386331769Shselasky				*(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] =
387331769Shselasky					sm_response;
388331769Shselasky				if (required_val == 0)
389331769Shselasky					mlx4_set_admin_guid(dev->dev,
390331769Shselasky							    sm_response,
391331769Shselasky							    (guid_rec->block_num
392331769Shselasky							    * NUM_ALIAS_GUID_IN_REC) + i,
393331769Shselasky							    cb_ctx->port);
394331769Shselasky				goto next_entry;
395255932Salfred			}
396255932Salfred		}
397331769Shselaskyentry_declined:
398331769Shselasky		declined_guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
399331769Shselasky		rec->guids_retry_schedule[i] =
400331769Shselasky			(rec->guids_retry_schedule[i] == 0) ?  1 :
401331769Shselasky			min((unsigned int)60,
402331769Shselasky			    rec->guids_retry_schedule[i] * 2);
403331769Shselasky		/* using the minimum value among all entries in that record */
404331769Shselasky		resched_delay_sec = (resched_delay_sec == 0) ?
405331769Shselasky				rec->guids_retry_schedule[i] :
406331769Shselasky				min(resched_delay_sec,
407331769Shselasky				    rec->guids_retry_schedule[i]);
408331769Shselasky		continue;
409331769Shselasky
410331769Shselaskynext_entry:
411331769Shselasky		rec->guids_retry_schedule[i] = 0;
412255932Salfred	}
413331769Shselasky
414331769Shselasky	applied_guid_indexes =  cb_ctx->guid_indexes & ~declined_guid_indexes;
415331769Shselasky	if (declined_guid_indexes ||
416331769Shselasky	    rec->guid_indexes & ~(applied_guid_indexes)) {
417331769Shselasky		pr_debug("record=%d wasn't fully set, guid_indexes=0x%llx applied_indexes=0x%llx, declined_indexes=0x%llx\n",
418331769Shselasky			 guid_rec->block_num,
419331769Shselasky			 (long long)be64_to_cpu((__force __be64)rec->guid_indexes),
420331769Shselasky			 (long long)be64_to_cpu((__force __be64)applied_guid_indexes),
421331769Shselasky			 (long long)be64_to_cpu((__force __be64)declined_guid_indexes));
422331769Shselasky		rec->time_to_run = ktime_get_ns() +
423331769Shselasky			resched_delay_sec * NSEC_PER_SEC;
424331769Shselasky	} else {
425331769Shselasky		rec->status = MLX4_GUID_INFO_STATUS_SET;
426331769Shselasky	}
427331769Shselasky	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
428255932Salfred	/*
429255932Salfred	The func is call here to close the cases when the
430255932Salfred	sm doesn't send smp, so in the sa response the driver
431255932Salfred	notifies the slave.
432255932Salfred	*/
433255932Salfred	mlx4_ib_notify_slaves_on_guid_change(dev, guid_rec->block_num,
434255932Salfred					     cb_ctx->port,
435255932Salfred					     guid_rec->guid_info_list);
436255932Salfredout:
437255932Salfred	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
438255932Salfred	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
439331769Shselasky	if (!dev->sriov.is_going_down) {
440331769Shselasky		get_low_record_time_index(dev, port_index, &resched_delay_sec);
441255932Salfred		queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq,
442255932Salfred				   &dev->sriov.alias_guid.ports_guid[port_index].
443331769Shselasky				   alias_guid_work,
444331769Shselasky				   msecs_to_jiffies(resched_delay_sec * 1000));
445331769Shselasky	}
446255932Salfred	if (cb_ctx->sa_query) {
447255932Salfred		list_del(&cb_ctx->list);
448255932Salfred		kfree(cb_ctx);
449255932Salfred	} else
450255932Salfred		complete(&cb_ctx->done);
451255932Salfred	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
452255932Salfred	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
453255932Salfred}
454255932Salfred
455255932Salfredstatic void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index)
456255932Salfred{
457255932Salfred	int i;
458255932Salfred	u64 cur_admin_val;
459255932Salfred	ib_sa_comp_mask comp_mask = 0;
460255932Salfred
461255932Salfred	dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status
462331769Shselasky		= MLX4_GUID_INFO_STATUS_SET;
463255932Salfred
464255932Salfred	/* calculate the comp_mask for that record.*/
465255932Salfred	for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
466255932Salfred		cur_admin_val =
467255932Salfred			*(u64 *)&dev->sriov.alias_guid.ports_guid[port - 1].
468255932Salfred			all_rec_per_port[index].all_recs[GUID_REC_SIZE * i];
469255932Salfred		/*
470255932Salfred		check the admin value: if it's for delete (~00LL) or
471255932Salfred		it is the first guid of the first record (hw guid) or
472255932Salfred		the records is not in ownership of the sysadmin and the sm doesn't
473255932Salfred		need to assign GUIDs, then don't put it up for assignment.
474255932Salfred		*/
475255932Salfred		if (MLX4_GUID_FOR_DELETE_VAL == cur_admin_val ||
476331769Shselasky		    (!index && !i))
477255932Salfred			continue;
478255932Salfred		comp_mask |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
479255932Salfred	}
480255932Salfred	dev->sriov.alias_guid.ports_guid[port - 1].
481331769Shselasky		all_rec_per_port[index].guid_indexes |= comp_mask;
482331769Shselasky	if (dev->sriov.alias_guid.ports_guid[port - 1].
483331769Shselasky	    all_rec_per_port[index].guid_indexes)
484331769Shselasky		dev->sriov.alias_guid.ports_guid[port - 1].
485331769Shselasky		all_rec_per_port[index].status = MLX4_GUID_INFO_STATUS_IDLE;
486331769Shselasky
487255932Salfred}
488255932Salfred
489255932Salfredstatic int set_guid_rec(struct ib_device *ibdev,
490331769Shselasky			struct mlx4_next_alias_guid_work *rec)
491255932Salfred{
492255932Salfred	int err;
493255932Salfred	struct mlx4_ib_dev *dev = to_mdev(ibdev);
494255932Salfred	struct ib_sa_guidinfo_rec guid_info_rec;
495255932Salfred	ib_sa_comp_mask comp_mask;
496255932Salfred	struct ib_port_attr attr;
497255932Salfred	struct mlx4_alias_guid_work_context *callback_context;
498255932Salfred	unsigned long resched_delay, flags, flags1;
499331769Shselasky	u8 port = rec->port + 1;
500331769Shselasky	int index = rec->block_num;
501331769Shselasky	struct mlx4_sriov_alias_guid_info_rec_det *rec_det = &rec->rec_det;
502255932Salfred	struct list_head *head =
503255932Salfred		&dev->sriov.alias_guid.ports_guid[port - 1].cb_list;
504255932Salfred
505255932Salfred	err = __mlx4_ib_query_port(ibdev, port, &attr, 1);
506255932Salfred	if (err) {
507255932Salfred		pr_debug("mlx4_ib_query_port failed (err: %d), port: %d\n",
508255932Salfred			 err, port);
509255932Salfred		return err;
510255932Salfred	}
511255932Salfred	/*check the port was configured by the sm, otherwise no need to send */
512255932Salfred	if (attr.state != IB_PORT_ACTIVE) {
513255932Salfred		pr_debug("port %d not active...rescheduling\n", port);
514255932Salfred		resched_delay = 5 * HZ;
515255932Salfred		err = -EAGAIN;
516255932Salfred		goto new_schedule;
517255932Salfred	}
518255932Salfred
519255932Salfred	callback_context = kmalloc(sizeof *callback_context, GFP_KERNEL);
520255932Salfred	if (!callback_context) {
521255932Salfred		err = -ENOMEM;
522255932Salfred		resched_delay = HZ * 5;
523255932Salfred		goto new_schedule;
524255932Salfred	}
525255932Salfred	callback_context->port = port;
526255932Salfred	callback_context->dev = dev;
527255932Salfred	callback_context->block_num = index;
528331769Shselasky	callback_context->guid_indexes = rec_det->guid_indexes;
529331769Shselasky	callback_context->method = rec->method;
530331769Shselasky
531255932Salfred	memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec));
532255932Salfred
533255932Salfred	guid_info_rec.lid = cpu_to_be16(attr.lid);
534255932Salfred	guid_info_rec.block_num = index;
535255932Salfred
536255932Salfred	memcpy(guid_info_rec.guid_info_list, rec_det->all_recs,
537255932Salfred	       GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC);
538255932Salfred	comp_mask = IB_SA_GUIDINFO_REC_LID | IB_SA_GUIDINFO_REC_BLOCK_NUM |
539255932Salfred		rec_det->guid_indexes;
540255932Salfred
541255932Salfred	init_completion(&callback_context->done);
542255932Salfred	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
543255932Salfred	list_add_tail(&callback_context->list, head);
544255932Salfred	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
545255932Salfred
546255932Salfred	callback_context->query_id =
547255932Salfred		ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client,
548255932Salfred					  ibdev, port, &guid_info_rec,
549331769Shselasky					  comp_mask, rec->method, 1000,
550255932Salfred					  GFP_KERNEL, aliasguid_query_handler,
551255932Salfred					  callback_context,
552255932Salfred					  &callback_context->sa_query);
553255932Salfred	if (callback_context->query_id < 0) {
554255932Salfred		pr_debug("ib_sa_guid_info_rec_query failed, query_id: "
555255932Salfred			 "%d. will reschedule to the next 1 sec.\n",
556255932Salfred			 callback_context->query_id);
557255932Salfred		spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
558255932Salfred		list_del(&callback_context->list);
559255932Salfred		kfree(callback_context);
560255932Salfred		spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
561255932Salfred		resched_delay = 1 * HZ;
562255932Salfred		err = -EAGAIN;
563255932Salfred		goto new_schedule;
564255932Salfred	}
565255932Salfred	err = 0;
566255932Salfred	goto out;
567255932Salfred
568255932Salfrednew_schedule:
569255932Salfred	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
570255932Salfred	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
571255932Salfred	invalidate_guid_record(dev, port, index);
572255932Salfred	if (!dev->sriov.is_going_down) {
573255932Salfred		queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq,
574255932Salfred				   &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work,
575255932Salfred				   resched_delay);
576255932Salfred	}
577255932Salfred	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
578255932Salfred	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
579255932Salfred
580255932Salfredout:
581255932Salfred	return err;
582255932Salfred}
583255932Salfred
584331769Shselaskystatic void mlx4_ib_guid_port_init(struct mlx4_ib_dev *dev, int port)
585331769Shselasky{
586331769Shselasky	int j, k, entry;
587331769Shselasky	__be64 guid;
588331769Shselasky
589331769Shselasky	/*Check if the SM doesn't need to assign the GUIDs*/
590331769Shselasky	for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
591331769Shselasky		for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) {
592331769Shselasky			entry = j * NUM_ALIAS_GUID_IN_REC + k;
593331769Shselasky			/* no request for the 0 entry (hw guid) */
594331769Shselasky			if (!entry || entry > dev->dev->persist->num_vfs ||
595331769Shselasky			    !mlx4_is_slave_active(dev->dev, entry))
596331769Shselasky				continue;
597331769Shselasky			guid = mlx4_get_admin_guid(dev->dev, entry, port);
598331769Shselasky			*(__be64 *)&dev->sriov.alias_guid.ports_guid[port - 1].
599331769Shselasky				all_rec_per_port[j].all_recs
600331769Shselasky				[GUID_REC_SIZE * k] = guid;
601331769Shselasky			pr_debug("guid was set, entry=%d, val=0x%llx, port=%d\n",
602331769Shselasky				 entry,
603331769Shselasky				 (long long)be64_to_cpu(guid),
604331769Shselasky				 port);
605331769Shselasky		}
606331769Shselasky	}
607331769Shselasky}
608255932Salfredvoid mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port)
609255932Salfred{
610255932Salfred	int i;
611255932Salfred	unsigned long flags, flags1;
612255932Salfred
613255932Salfred	pr_debug("port %d\n", port);
614255932Salfred
615255932Salfred	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
616255932Salfred	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
617331769Shselasky
618331769Shselasky	if (dev->sriov.alias_guid.ports_guid[port - 1].state_flags &
619331769Shselasky		GUID_STATE_NEED_PORT_INIT) {
620331769Shselasky		mlx4_ib_guid_port_init(dev, port);
621331769Shselasky		dev->sriov.alias_guid.ports_guid[port - 1].state_flags &=
622331769Shselasky			(~GUID_STATE_NEED_PORT_INIT);
623331769Shselasky	}
624255932Salfred	for (i = 0; i < NUM_ALIAS_GUID_REC_IN_PORT; i++)
625255932Salfred		invalidate_guid_record(dev, port, i);
626255932Salfred
627255932Salfred	if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) {
628255932Salfred		/*
629255932Salfred		make sure no work waits in the queue, if the work is already
630255932Salfred		queued(not on the timer) the cancel will fail. That is not a problem
631255932Salfred		because we just want the work started.
632255932Salfred		*/
633255932Salfred		cancel_delayed_work(&dev->sriov.alias_guid.
634255932Salfred				      ports_guid[port - 1].alias_guid_work);
635255932Salfred		queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq,
636255932Salfred				   &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work,
637255932Salfred				   0);
638255932Salfred	}
639255932Salfred	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
640255932Salfred	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
641255932Salfred}
642255932Salfred
643331769Shselaskystatic void set_required_record(struct mlx4_ib_dev *dev, u8 port,
644331769Shselasky				struct mlx4_next_alias_guid_work *next_rec,
645331769Shselasky				int record_index)
646255932Salfred{
647331769Shselasky	int i;
648331769Shselasky	int lowset_time_entry = -1;
649331769Shselasky	int lowest_time = 0;
650331769Shselasky	ib_sa_comp_mask delete_guid_indexes = 0;
651331769Shselasky	ib_sa_comp_mask set_guid_indexes = 0;
652331769Shselasky	struct mlx4_sriov_alias_guid_info_rec_det *rec =
653331769Shselasky			&dev->sriov.alias_guid.ports_guid[port].
654331769Shselasky			all_rec_per_port[record_index];
655331769Shselasky
656331769Shselasky	for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
657331769Shselasky		if (!(rec->guid_indexes &
658331769Shselasky			mlx4_ib_get_aguid_comp_mask_from_ix(i)))
659331769Shselasky			continue;
660331769Shselasky
661331769Shselasky		if (*(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] ==
662331769Shselasky				cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL))
663331769Shselasky			delete_guid_indexes |=
664331769Shselasky				mlx4_ib_get_aguid_comp_mask_from_ix(i);
665331769Shselasky		else
666331769Shselasky			set_guid_indexes |=
667331769Shselasky				mlx4_ib_get_aguid_comp_mask_from_ix(i);
668331769Shselasky
669331769Shselasky		if (lowset_time_entry == -1 || rec->guids_retry_schedule[i] <=
670331769Shselasky			lowest_time) {
671331769Shselasky			lowset_time_entry = i;
672331769Shselasky			lowest_time = rec->guids_retry_schedule[i];
673331769Shselasky		}
674331769Shselasky	}
675331769Shselasky
676331769Shselasky	memcpy(&next_rec->rec_det, rec, sizeof(*rec));
677331769Shselasky	next_rec->port = port;
678331769Shselasky	next_rec->block_num = record_index;
679331769Shselasky
680331769Shselasky	if (*(__be64 *)&rec->all_recs[lowset_time_entry * GUID_REC_SIZE] ==
681331769Shselasky				cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) {
682331769Shselasky		next_rec->rec_det.guid_indexes = delete_guid_indexes;
683331769Shselasky		next_rec->method = MLX4_GUID_INFO_RECORD_DELETE;
684331769Shselasky	} else {
685331769Shselasky		next_rec->rec_det.guid_indexes = set_guid_indexes;
686331769Shselasky		next_rec->method = MLX4_GUID_INFO_RECORD_SET;
687331769Shselasky	}
688331769Shselasky}
689331769Shselasky
690331769Shselasky/* return index of record that should be updated based on lowest
691331769Shselasky * rescheduled time
692331769Shselasky */
693331769Shselaskystatic int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port,
694331769Shselasky				     int *resched_delay_sec)
695331769Shselasky{
696331769Shselasky	int record_index = -1;
697331769Shselasky	u64 low_record_time = 0;
698331769Shselasky	struct mlx4_sriov_alias_guid_info_rec_det rec;
699255932Salfred	int j;
700255932Salfred
701255932Salfred	for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
702331769Shselasky		rec = dev->sriov.alias_guid.ports_guid[port].
703331769Shselasky			all_rec_per_port[j];
704331769Shselasky		if (rec.status == MLX4_GUID_INFO_STATUS_IDLE &&
705331769Shselasky		    rec.guid_indexes) {
706331769Shselasky			if (record_index == -1 ||
707331769Shselasky			    rec.time_to_run < low_record_time) {
708331769Shselasky				record_index = j;
709331769Shselasky				low_record_time = rec.time_to_run;
710331769Shselasky			}
711255932Salfred		}
712255932Salfred	}
713331769Shselasky	if (resched_delay_sec) {
714331769Shselasky		u64 curr_time = ktime_get_ns();
715255932Salfred
716331769Shselasky		*resched_delay_sec = (low_record_time < curr_time) ? 0 :
717331769Shselasky			div_u64((low_record_time - curr_time), NSEC_PER_SEC);
718331769Shselasky	}
719331769Shselasky
720331769Shselasky	return record_index;
721255932Salfred}
722255932Salfred
723331769Shselasky/* The function returns the next record that was
724331769Shselasky * not configured (or failed to be configured) */
725331769Shselaskystatic int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port,
726331769Shselasky				     struct mlx4_next_alias_guid_work *rec)
727255932Salfred{
728331769Shselasky	unsigned long flags;
729331769Shselasky	int record_index;
730331769Shselasky	int ret = 0;
731255932Salfred
732331769Shselasky	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
733331769Shselasky	record_index = get_low_record_time_index(dev, port, NULL);
734331769Shselasky
735331769Shselasky	if (record_index < 0) {
736331769Shselasky		ret = -ENOENT;
737331769Shselasky		goto out;
738255932Salfred	}
739331769Shselasky
740331769Shselasky	set_required_record(dev, port, rec, record_index);
741331769Shselaskyout:
742331769Shselasky	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
743331769Shselasky	return ret;
744255932Salfred}
745255932Salfred
746255932Salfredstatic void alias_guid_work(struct work_struct *work)
747255932Salfred{
748255932Salfred	struct delayed_work *delay = to_delayed_work(work);
749255932Salfred	int ret = 0;
750255932Salfred	struct mlx4_next_alias_guid_work *rec;
751255932Salfred	struct mlx4_sriov_alias_guid_port_rec_det *sriov_alias_port =
752255932Salfred		container_of(delay, struct mlx4_sriov_alias_guid_port_rec_det,
753255932Salfred			     alias_guid_work);
754255932Salfred	struct mlx4_sriov_alias_guid *sriov_alias_guid = sriov_alias_port->parent;
755255932Salfred	struct mlx4_ib_sriov *ib_sriov = container_of(sriov_alias_guid,
756255932Salfred						struct mlx4_ib_sriov,
757255932Salfred						alias_guid);
758255932Salfred	struct mlx4_ib_dev *dev = container_of(ib_sriov, struct mlx4_ib_dev, sriov);
759255932Salfred
760255932Salfred	rec = kzalloc(sizeof *rec, GFP_KERNEL);
761255932Salfred	if (!rec) {
762255932Salfred		pr_err("alias_guid_work: No Memory\n");
763255932Salfred		return;
764255932Salfred	}
765255932Salfred
766255932Salfred	pr_debug("starting [port: %d]...\n", sriov_alias_port->port + 1);
767255932Salfred	ret = get_next_record_to_update(dev, sriov_alias_port->port, rec);
768255932Salfred	if (ret) {
769255932Salfred		pr_debug("No more records to update.\n");
770255932Salfred		goto out;
771255932Salfred	}
772255932Salfred
773331769Shselasky	set_guid_rec(&dev->ib_dev, rec);
774255932Salfredout:
775255932Salfred	kfree(rec);
776255932Salfred}
777255932Salfred
778255932Salfred
779255932Salfredvoid mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port)
780255932Salfred{
781255932Salfred	unsigned long flags, flags1;
782255932Salfred
783255932Salfred	if (!mlx4_is_master(dev->dev))
784255932Salfred		return;
785255932Salfred	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
786255932Salfred	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
787255932Salfred	if (!dev->sriov.is_going_down) {
788331769Shselasky		/* If there is pending one should cancell then run, otherwise
789331769Shselasky		  * won't run till previous one is ended as same work
790331769Shselasky		  * struct is used.
791331769Shselasky		  */
792331769Shselasky		cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[port].
793331769Shselasky				    alias_guid_work);
794255932Salfred		queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq,
795255932Salfred			   &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0);
796255932Salfred	}
797255932Salfred	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
798255932Salfred	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
799255932Salfred}
800255932Salfred
801255932Salfredvoid mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
802255932Salfred{
803255932Salfred	int i;
804255932Salfred	struct mlx4_ib_sriov *sriov = &dev->sriov;
805255932Salfred	struct mlx4_alias_guid_work_context *cb_ctx;
806255932Salfred	struct mlx4_sriov_alias_guid_port_rec_det *det;
807255932Salfred	struct ib_sa_query *sa_query;
808255932Salfred	unsigned long flags;
809255932Salfred
810255932Salfred	for (i = 0 ; i < dev->num_ports; i++) {
811255932Salfred		cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work);
812255932Salfred		det = &sriov->alias_guid.ports_guid[i];
813255932Salfred		spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
814255932Salfred		while (!list_empty(&det->cb_list)) {
815255932Salfred			cb_ctx = list_entry(det->cb_list.next,
816255932Salfred					    struct mlx4_alias_guid_work_context,
817255932Salfred					    list);
818255932Salfred			sa_query = cb_ctx->sa_query;
819255932Salfred			cb_ctx->sa_query = NULL;
820255932Salfred			list_del(&cb_ctx->list);
821255932Salfred			spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags);
822255932Salfred			ib_sa_cancel_query(cb_ctx->query_id, sa_query);
823255932Salfred			wait_for_completion(&cb_ctx->done);
824255932Salfred			kfree(cb_ctx);
825255932Salfred			spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
826255932Salfred		}
827255932Salfred		spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags);
828255932Salfred	}
829255932Salfred	for (i = 0 ; i < dev->num_ports; i++) {
830255932Salfred		flush_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
831255932Salfred		destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
832255932Salfred	}
833255932Salfred	ib_sa_unregister_client(dev->sriov.alias_guid.sa_client);
834255932Salfred	kfree(dev->sriov.alias_guid.sa_client);
835255932Salfred}
836255932Salfred
837255932Salfredint mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
838255932Salfred{
839255932Salfred	char alias_wq_name[15];
840255932Salfred	int ret = 0;
841331769Shselasky	int i, j;
842255932Salfred	union ib_gid gid;
843255932Salfred
844255932Salfred	if (!mlx4_is_master(dev->dev))
845255932Salfred		return 0;
846255932Salfred	dev->sriov.alias_guid.sa_client =
847255932Salfred		kzalloc(sizeof *dev->sriov.alias_guid.sa_client, GFP_KERNEL);
848255932Salfred	if (!dev->sriov.alias_guid.sa_client)
849255932Salfred		return -ENOMEM;
850255932Salfred
851255932Salfred	ib_sa_register_client(dev->sriov.alias_guid.sa_client);
852255932Salfred
853255932Salfred	spin_lock_init(&dev->sriov.alias_guid.ag_work_lock);
854255932Salfred
855255932Salfred	for (i = 1; i <= dev->num_ports; ++i) {
856255932Salfred		if (dev->ib_dev.query_gid(&dev->ib_dev , i, 0, &gid)) {
857255932Salfred			ret = -EFAULT;
858255932Salfred			goto err_unregister;
859255932Salfred		}
860255932Salfred	}
861255932Salfred
862255932Salfred	for (i = 0 ; i < dev->num_ports; i++) {
863255932Salfred		memset(&dev->sriov.alias_guid.ports_guid[i], 0,
864255932Salfred		       sizeof (struct mlx4_sriov_alias_guid_port_rec_det));
865331769Shselasky		dev->sriov.alias_guid.ports_guid[i].state_flags |=
866331769Shselasky				GUID_STATE_NEED_PORT_INIT;
867255932Salfred		for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
868331769Shselasky			/* mark each val as it was deleted */
869331769Shselasky			memset(dev->sriov.alias_guid.ports_guid[i].
870331769Shselasky				all_rec_per_port[j].all_recs, 0xFF,
871331769Shselasky				sizeof(dev->sriov.alias_guid.ports_guid[i].
872331769Shselasky				all_rec_per_port[j].all_recs));
873255932Salfred		}
874255932Salfred		INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list);
875255932Salfred		/*prepare the records, set them to be allocated by sm*/
876331769Shselasky		if (mlx4_ib_sm_guid_assign)
877331769Shselasky			for (j = 1; j < NUM_ALIAS_GUID_PER_PORT; j++)
878331769Shselasky				mlx4_set_admin_guid(dev->dev, 0, j, i + 1);
879255932Salfred		for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT; j++)
880255932Salfred			invalidate_guid_record(dev, i + 1, j);
881255932Salfred
882255932Salfred		dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid;
883255932Salfred		dev->sriov.alias_guid.ports_guid[i].port  = i;
884255932Salfred
885255932Salfred		snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i);
886255932Salfred		dev->sriov.alias_guid.ports_guid[i].wq =
887331769Shselasky			alloc_ordered_workqueue(alias_wq_name, WQ_MEM_RECLAIM);
888255932Salfred		if (!dev->sriov.alias_guid.ports_guid[i].wq) {
889255932Salfred			ret = -ENOMEM;
890255932Salfred			goto err_thread;
891255932Salfred		}
892255932Salfred		INIT_DELAYED_WORK(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work,
893255932Salfred			  alias_guid_work);
894255932Salfred	}
895255932Salfred	return 0;
896255932Salfred
897255932Salfrederr_thread:
898255932Salfred	for (--i; i >= 0; i--) {
899255932Salfred		destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
900255932Salfred		dev->sriov.alias_guid.ports_guid[i].wq = NULL;
901255932Salfred	}
902255932Salfred
903255932Salfrederr_unregister:
904255932Salfred	ib_sa_unregister_client(dev->sriov.alias_guid.sa_client);
905255932Salfred	kfree(dev->sriov.alias_guid.sa_client);
906255932Salfred	dev->sriov.alias_guid.sa_client = NULL;
907255932Salfred	pr_err("init_alias_guid_service: Failed. (ret:%d)\n", ret);
908255932Salfred	return ret;
909255932Salfred}
910