1/*
2
3 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
4 * Copyright (c) 2005 Intel Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses.  You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 *     Redistribution and use in source and binary forms, with or
15 *     without modification, are permitted provided that the following
16 *     conditions are met:
17 *
18 *      - Redistributions of source code must retain the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer.
21 *
22 *      - Redistributions in binary form must reproduce the above
23 *        copyright notice, this list of conditions and the following
24 *        disclaimer in the documentation and/or other materials
25 *        provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35
36
37 #include <linux/module.h>
38 #include <linux/errno.h>
39 #include <linux/slab.h>
40 #include <linux/workqueue.h>
41
42 #include <rdma/ib_cache.h>
43 */
44#include "core_priv.h"
45
46struct ib_pkey_cache {
47	int table_len;
48	u16 table[0];
49};
50
51struct ib_gid_cache {
52	int table_len;
53	union ib_gid table[0];
54};
55/*
56 struct ib_update_work {
57 struct work_struct work;
58 struct ib_device  *device;
59 u8                 port_num;
60 };
61 */
62static inline int start_port(struct ib_device *device) {
63	return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1;
64}
65
66static inline int end_port(struct ib_device *device) {
67	return (device->node_type == RDMA_NODE_IB_SWITCH) ?
68			0 : device->phys_port_cnt;
69}
70/*
71 int ib_get_cached_gid(struct ib_device *device,
72 u8                port_num,
73 int               index,
74 union ib_gid     *gid)
75 {
76 struct ib_gid_cache *cache;
77 unsigned long flags;
78 int ret = 0;
79
80 if (port_num < start_port(device) || port_num > end_port(device))
81 return -EINVAL;
82
83 read_lock_irqsave(&device->cache.lock, flags);
84
85 cache = device->cache.gid_cache[port_num - start_port(device)];
86
87 if (index < 0 || index >= cache->table_len)
88 ret = -EINVAL;
89 else
90 *gid = cache->table[index];
91
92 read_unlock_irqrestore(&device->cache.lock, flags);
93
94 return ret;
95 }
96 EXPORT_SYMBOL(ib_get_cached_gid);
97
98 int ib_find_cached_gid(struct ib_device *device,
99 union ib_gid	*gid,
100 u8               *port_num,
101 u16              *index)
102 {
103 struct ib_gid_cache *cache;
104 unsigned long flags;
105 int p, i;
106 int ret = -ENOENT;
107
108 *port_num = -1;
109 if (index)
110 *index = -1;
111
112 read_lock_irqsave(&device->cache.lock, flags);
113
114 for (p = 0; p <= end_port(device) - start_port(device); ++p) {
115 cache = device->cache.gid_cache[p];
116 for (i = 0; i < cache->table_len; ++i) {
117 if (!memcmp(gid, &cache->table[i], sizeof *gid)) {
118 *port_num = p + start_port(device);
119 if (index)
120 *index = i;
121 ret = 0;
122 goto found;
123 }
124 }
125 }
126 found:
127 read_unlock_irqrestore(&device->cache.lock, flags);
128
129 return ret;
130 }
131 EXPORT_SYMBOL(ib_find_cached_gid);
132
133 int ib_get_cached_pkey(struct ib_device *device,
134 u8                port_num,
135 int               index,
136 u16              *pkey)
137 {
138 struct ib_pkey_cache *cache;
139 unsigned long flags;
140 int ret = 0;
141
142 if (port_num < start_port(device) || port_num > end_port(device))
143 return -EINVAL;
144
145 read_lock_irqsave(&device->cache.lock, flags);
146
147 cache = device->cache.pkey_cache[port_num - start_port(device)];
148
149 if (index < 0 || index >= cache->table_len)
150 ret = -EINVAL;
151 else
152 *pkey = cache->table[index];
153
154 read_unlock_irqrestore(&device->cache.lock, flags);
155
156 return ret;
157 }
158 EXPORT_SYMBOL(ib_get_cached_pkey);
159
160 int ib_find_cached_pkey(struct ib_device *device,
161 u8                port_num,
162 u16               pkey,
163 u16              *index)
164 {
165 struct ib_pkey_cache *cache;
166 unsigned long flags;
167 int i;
168 int ret = -ENOENT;
169
170 if (port_num < start_port(device) || port_num > end_port(device))
171 return -EINVAL;
172
173 read_lock_irqsave(&device->cache.lock, flags);
174
175 cache = device->cache.pkey_cache[port_num - start_port(device)];
176
177 *index = -1;
178
179 for (i = 0; i < cache->table_len; ++i)
180 if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
181 *index = i;
182 ret = 0;
183 break;
184 }
185
186 read_unlock_irqrestore(&device->cache.lock, flags);
187
188 return ret;
189 }
190 EXPORT_SYMBOL(ib_find_cached_pkey);
191
192 int ib_get_cached_lmc(struct ib_device *device,
193 u8                port_num,
194 u8                *lmc)
195 {
196 unsigned long flags;
197 int ret = 0;
198
199 if (port_num < start_port(device) || port_num > end_port(device))
200 return -EINVAL;
201
202 read_lock_irqsave(&device->cache.lock, flags);
203 *lmc = device->cache.lmc_cache[port_num - start_port(device)];
204 read_unlock_irqrestore(&device->cache.lock, flags);
205
206 return ret;
207 }
208 EXPORT_SYMBOL(ib_get_cached_lmc);
209 */
210static void ib_cache_update(struct ib_device *device, u8 port) {
211	struct ib_port_attr *tprops = NULL;
212	struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
213	struct ib_gid_cache *gid_cache = NULL, *old_gid_cache;
214	int i;
215	int ret;
216
217	tprops = malloc(sizeof *tprops);
218	if (!tprops)
219		return;
220
221	ret = ib_query_port(device, port, tprops);
222	if (ret) {
223		printf("ib_query_port failed (%d) for %s\n", ret, device->name);
224		goto err;
225	}
226
227	pkey_cache = malloc(
228			sizeof *pkey_cache
229					+ tprops->pkey_tbl_len * sizeof *pkey_cache->table);
230	if (!pkey_cache)
231		goto err;
232
233	pkey_cache->table_len = tprops->pkey_tbl_len;
234
235	gid_cache = malloc(
236			sizeof *gid_cache + tprops->gid_tbl_len * sizeof *gid_cache->table);
237	if (!gid_cache)
238		goto err;
239
240	gid_cache->table_len = tprops->gid_tbl_len;
241
242	for (i = 0; i < pkey_cache->table_len; ++i) {
243		ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
244		if (ret) {
245			printf("ib_query_pkey failed (%d) for %s (index %d)\n", ret,
246					device->name, i);
247			goto err;
248		}
249	}
250
251	for (i = 0; i < gid_cache->table_len; ++i) {
252		ret = ib_query_gid(device, port, i, gid_cache->table + i);
253		if (ret) {
254			printf("ib_query_gid failed (%d) for %s (index %d)\n", ret,
255					device->name, i);
256			goto err;
257		}
258	}
259
260	/*write_lock_irq(&device->cache.lock);*/
261
262	old_pkey_cache = device->cache.pkey_cache[port - start_port(device)];
263	old_gid_cache = device->cache.gid_cache[port - start_port(device)];
264
265	device->cache.pkey_cache[port - start_port(device)] = pkey_cache;
266	device->cache.gid_cache[port - start_port(device)] = gid_cache;
267
268	device->cache.lmc_cache[port - start_port(device)] = tprops->lmc;
269
270	/*write_unlock_irq(&device->cache.lock);*/
271
272	free(old_pkey_cache);
273	free(old_gid_cache);
274	free(tprops);
275	return;
276
277	err: free(pkey_cache);
278	free(gid_cache);
279	free(tprops);
280}
281/*
282 static void ib_cache_task(struct work_struct *_work)
283 {
284 struct ib_update_work *work =
285 container_of(_work, struct ib_update_work, work);
286
287 ib_cache_update(work->device, work->port_num);
288 kfree(work);
289 }
290
291 static void ib_cache_event(struct ib_event_handler *handler,
292 struct ib_event *event)
293 {
294 struct ib_update_work *work;
295
296 if (event->event == IB_EVENT_PORT_ERR    ||
297 event->event == IB_EVENT_PORT_ACTIVE ||
298 event->event == IB_EVENT_LID_CHANGE  ||
299 event->event == IB_EVENT_PKEY_CHANGE ||
300 event->event == IB_EVENT_SM_CHANGE   ||
301 event->event == IB_EVENT_CLIENT_REREGISTER ||
302 event->event == IB_EVENT_GID_CHANGE) {
303 work = kmalloc(sizeof *work, GFP_ATOMIC);
304 if (work) {
305 INIT_WORK(&work->work, ib_cache_task);
306 work->device   = event->device;
307 work->port_num = event->element.port_num;
308 schedule_work(&work->work);
309 }
310 }
311 }
312 */
313void ib_cache_setup_one(struct ib_device *device) {
314	int p;
315
316	/*rwlock_init(&device->cache.lock);*/
317
318	device->cache.pkey_cache = malloc(
319			sizeof *device->cache.pkey_cache
320					* (end_port(device) - start_port(device) + 1));
321	device->cache.gid_cache = malloc(
322			sizeof *device->cache.gid_cache
323					* (end_port(device) - start_port(device) + 1));
324
325	device->cache.lmc_cache = malloc(
326			sizeof *device->cache.lmc_cache
327					* (end_port(device) - start_port(device) + 1));
328
329	if (!device->cache.pkey_cache || !device->cache.gid_cache
330			|| !device->cache.lmc_cache) {
331		printf("Couldn't allocate cache "
332				"for %s\n", device->name);
333		goto err;
334	}
335
336	printf("end_port(device): %d\n", end_port(device));
337	printf("start_port(device): %d\n", start_port(device));
338
339	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
340		device->cache.pkey_cache[p] = NULL;
341		device->cache.gid_cache[p] = NULL;
342		ib_cache_update(device, p + start_port(device));
343	}
344
345	/*INIT_IB_EVENT_HANDLER(&device->cache.event_handler, device, ib_cache_event);
346	 if (ib_register_event_handler(&device->cache.event_handler))
347	 goto err_cache;*/
348
349	return;
350
351	/*err_cache: for (p = 0; p <= end_port(device) - start_port(device); ++p) {
352	 free(device->cache.pkey_cache[p]);
353	 free(device->cache.gid_cache[p]);
354	 }*/
355
356	err: free(device->cache.pkey_cache);
357	free(device->cache.gid_cache);
358	free(device->cache.lmc_cache);
359}
360/*
361 static void ib_cache_cleanup_one(struct ib_device *device)
362 {
363 int p;
364
365 ib_unregister_event_handler(&device->cache.event_handler);
366 flush_scheduled_work();
367
368 for (p = 0; p <= end_port(device) - start_port(device); ++p) {
369 kfree(device->cache.pkey_cache[p]);
370 kfree(device->cache.gid_cache[p]);
371 }
372
373 kfree(device->cache.pkey_cache);
374 kfree(device->cache.gid_cache);
375 kfree(device->cache.lmc_cache);
376 }
377
378 static struct ib_client cache_client = {
379 .name   = "cache",
380 .add    = ib_cache_setup_one,
381 .remove = ib_cache_cleanup_one
382 };
383
384 int __init ib_cache_setup(void)
385 {
386 return ib_register_client(&cache_client);
387 }
388
389 void __exit ib_cache_cleanup(void)
390 {
391 ib_unregister_client(&cache_client);
392 }
393 */
394