1219820Sjeff/*
2230135Suqs * Copyright (c) 2006 Intel Corporation.  All rights reserved.
3219820Sjeff *
4219820Sjeff * This software is available to you under a choice of one of two
5219820Sjeff * licenses.  You may choose to be licensed under the terms of the GNU
6219820Sjeff * General Public License (GPL) Version 2, available from the file
7219820Sjeff * COPYING in the main directory of this source tree, or the
8219820Sjeff * OpenIB.org BSD license below:
9219820Sjeff *
10219820Sjeff *     Redistribution and use in source and binary forms, with or
11219820Sjeff *     without modification, are permitted provided that the following
12219820Sjeff *     conditions are met:
13219820Sjeff *
14219820Sjeff *      - Redistributions of source code must retain the above
15219820Sjeff *        copyright notice, this list of conditions and the following
16219820Sjeff *        disclaimer.
17219820Sjeff *
18219820Sjeff *      - Redistributions in binary form must reproduce the above
19219820Sjeff *        copyright notice, this list of conditions and the following
20219820Sjeff *        disclaimer in the documentation and/or other materials
21219820Sjeff *        provided with the distribution.
22219820Sjeff *
23219820Sjeff * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24219820Sjeff * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25219820Sjeff * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26219820Sjeff * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27219820Sjeff * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28219820Sjeff * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29219820Sjeff * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30219820Sjeff * SOFTWARE.
31219820Sjeff */
32219820Sjeff
33219820Sjeff#include <linux/completion.h>
34219820Sjeff#include <linux/dma-mapping.h>
35219820Sjeff#include <linux/err.h>
36219820Sjeff#include <linux/interrupt.h>
37219820Sjeff#include <linux/pci.h>
38219820Sjeff#include <linux/bitops.h>
39219820Sjeff#include <linux/random.h>
40219820Sjeff
41219820Sjeff#include "sa.h"
42219820Sjeff
43219820SjeffMODULE_AUTHOR("Sean Hefty");
44219820SjeffMODULE_DESCRIPTION("InfiniBand InformInfo & Notice event handling");
45219820SjeffMODULE_LICENSE("Dual BSD/GPL");
46219820Sjeff
47219820Sjeffstatic void inform_add_one(struct ib_device *device);
48219820Sjeffstatic void inform_remove_one(struct ib_device *device);
49219820Sjeff
50219820Sjeffstatic struct ib_client inform_client = {
51219820Sjeff	.name   = "ib_notice",
52219820Sjeff	.add    = inform_add_one,
53219820Sjeff	.remove = inform_remove_one
54219820Sjeff};
55219820Sjeff
56219820Sjeffstatic struct ib_sa_client	sa_client;
57219820Sjeffstatic struct workqueue_struct	*inform_wq;
58219820Sjeff
59219820Sjeffstruct inform_device;
60219820Sjeff
61219820Sjeffstruct inform_port {
62219820Sjeff	struct inform_device	*dev;
63219820Sjeff	spinlock_t		lock;
64219820Sjeff	struct rb_root		table;
65219820Sjeff	atomic_t		refcount;
66219820Sjeff	struct completion	comp;
67219820Sjeff	u8			port_num;
68219820Sjeff};
69219820Sjeff
70219820Sjeffstruct inform_device {
71219820Sjeff	struct ib_device	*device;
72219820Sjeff	struct ib_event_handler	event_handler;
73219820Sjeff	int			start_port;
74219820Sjeff	int			end_port;
75219820Sjeff	struct inform_port	port[0];
76219820Sjeff};
77219820Sjeff
78219820Sjeffenum inform_state {
79219820Sjeff	INFORM_IDLE,
80219820Sjeff	INFORM_REGISTERING,
81219820Sjeff	INFORM_MEMBER,
82219820Sjeff	INFORM_BUSY,
83219820Sjeff	INFORM_ERROR
84219820Sjeff};
85219820Sjeff
86219820Sjeffstruct inform_member;
87219820Sjeff
88219820Sjeffstruct inform_group {
89219820Sjeff	u16			trap_number;
90219820Sjeff	struct rb_node		node;
91219820Sjeff	struct inform_port	*port;
92219820Sjeff	spinlock_t		lock;
93219820Sjeff	struct work_struct	work;
94219820Sjeff	struct list_head	pending_list;
95219820Sjeff	struct list_head	active_list;
96219820Sjeff	struct list_head	notice_list;
97219820Sjeff	struct inform_member	*last_join;
98219820Sjeff	int			members;
99219820Sjeff	enum inform_state	join_state; /* State relative to SA */
100219820Sjeff	atomic_t		refcount;
101219820Sjeff	enum inform_state	state;
102219820Sjeff	struct ib_sa_query	*query;
103219820Sjeff	int			query_id;
104219820Sjeff};
105219820Sjeff
106219820Sjeffstruct inform_member {
107219820Sjeff	struct ib_inform_info	info;
108219820Sjeff	struct ib_sa_client	*client;
109219820Sjeff	struct inform_group	*group;
110219820Sjeff	struct list_head	list;
111219820Sjeff	enum inform_state	state;
112219820Sjeff	atomic_t		refcount;
113219820Sjeff	struct completion	comp;
114219820Sjeff};
115219820Sjeff
116219820Sjeffstruct inform_notice {
117219820Sjeff	struct list_head	list;
118219820Sjeff	struct ib_sa_notice	notice;
119219820Sjeff};
120219820Sjeff
121219820Sjeffstatic void reg_handler(int status, struct ib_sa_inform *inform,
122219820Sjeff			 void *context);
123219820Sjeffstatic void unreg_handler(int status, struct ib_sa_inform *inform,
124219820Sjeff			  void *context);
125219820Sjeff
126219820Sjeffstatic struct inform_group *inform_find(struct inform_port *port,
127219820Sjeff					u16 trap_number)
128219820Sjeff{
129219820Sjeff	struct rb_node *node = port->table.rb_node;
130219820Sjeff	struct inform_group *group;
131219820Sjeff
132219820Sjeff	while (node) {
133219820Sjeff		group = rb_entry(node, struct inform_group, node);
134219820Sjeff		if (trap_number < group->trap_number)
135219820Sjeff			node = node->rb_left;
136219820Sjeff		else if (trap_number > group->trap_number)
137219820Sjeff			node = node->rb_right;
138219820Sjeff		else
139219820Sjeff			return group;
140219820Sjeff	}
141219820Sjeff	return NULL;
142219820Sjeff}
143219820Sjeff
144219820Sjeffstatic struct inform_group *inform_insert(struct inform_port *port,
145219820Sjeff					  struct inform_group *group)
146219820Sjeff{
147219820Sjeff	struct rb_node **link = &port->table.rb_node;
148219820Sjeff	struct rb_node *parent = NULL;
149219820Sjeff	struct inform_group *cur_group;
150219820Sjeff
151219820Sjeff	while (*link) {
152219820Sjeff		parent = *link;
153219820Sjeff		cur_group = rb_entry(parent, struct inform_group, node);
154219820Sjeff		if (group->trap_number < cur_group->trap_number)
155219820Sjeff			link = &(*link)->rb_left;
156219820Sjeff		else if (group->trap_number > cur_group->trap_number)
157219820Sjeff			link = &(*link)->rb_right;
158219820Sjeff		else
159219820Sjeff			return cur_group;
160219820Sjeff	}
161219820Sjeff	rb_link_node(&group->node, parent, link);
162219820Sjeff	rb_insert_color(&group->node, &port->table);
163219820Sjeff	return NULL;
164219820Sjeff}
165219820Sjeff
166219820Sjeffstatic void deref_port(struct inform_port *port)
167219820Sjeff{
168219820Sjeff	if (atomic_dec_and_test(&port->refcount))
169219820Sjeff		complete(&port->comp);
170219820Sjeff}
171219820Sjeff
172219820Sjeffstatic void release_group(struct inform_group *group)
173219820Sjeff{
174219820Sjeff	struct inform_port *port = group->port;
175219820Sjeff	unsigned long flags;
176219820Sjeff
177219820Sjeff	spin_lock_irqsave(&port->lock, flags);
178219820Sjeff	if (atomic_dec_and_test(&group->refcount)) {
179219820Sjeff		rb_erase(&group->node, &port->table);
180219820Sjeff		spin_unlock_irqrestore(&port->lock, flags);
181219820Sjeff		kfree(group);
182219820Sjeff		deref_port(port);
183219820Sjeff	} else
184219820Sjeff		spin_unlock_irqrestore(&port->lock, flags);
185219820Sjeff}
186219820Sjeff
187219820Sjeffstatic void deref_member(struct inform_member *member)
188219820Sjeff{
189219820Sjeff	if (atomic_dec_and_test(&member->refcount))
190219820Sjeff		complete(&member->comp);
191219820Sjeff}
192219820Sjeff
193219820Sjeffstatic void queue_reg(struct inform_member *member)
194219820Sjeff{
195219820Sjeff	struct inform_group *group = member->group;
196219820Sjeff	unsigned long flags;
197219820Sjeff
198219820Sjeff	spin_lock_irqsave(&group->lock, flags);
199219820Sjeff	list_add(&member->list, &group->pending_list);
200219820Sjeff	if (group->state == INFORM_IDLE) {
201219820Sjeff		group->state = INFORM_BUSY;
202219820Sjeff		atomic_inc(&group->refcount);
203219820Sjeff		queue_work(inform_wq, &group->work);
204219820Sjeff	}
205219820Sjeff	spin_unlock_irqrestore(&group->lock, flags);
206219820Sjeff}
207219820Sjeff
208219820Sjeffstatic int send_reg(struct inform_group *group, struct inform_member *member)
209219820Sjeff{
210219820Sjeff	struct inform_port *port = group->port;
211219820Sjeff	struct ib_sa_inform inform;
212219820Sjeff	int ret;
213219820Sjeff
214219820Sjeff	memset(&inform, 0, sizeof inform);
215219820Sjeff	inform.lid_range_begin = cpu_to_be16(0xFFFF);
216219820Sjeff	inform.is_generic = 1;
217219820Sjeff	inform.subscribe = 1;
218219820Sjeff	inform.type = cpu_to_be16(IB_SA_EVENT_TYPE_ALL);
219219820Sjeff	inform.trap.generic.trap_num = cpu_to_be16(member->info.trap_number);
220219820Sjeff	inform.trap.generic.resp_time = 19;
221219820Sjeff	inform.trap.generic.producer_type =
222219820Sjeff				cpu_to_be32(IB_SA_EVENT_PRODUCER_TYPE_ALL);
223219820Sjeff
224219820Sjeff	group->last_join = member;
225219820Sjeff	ret = ib_sa_informinfo_query(&sa_client, port->dev->device,
226219820Sjeff				     port->port_num, &inform, 3000, GFP_KERNEL,
227219820Sjeff				     reg_handler, group,&group->query);
228219820Sjeff	if (ret >= 0) {
229219820Sjeff		group->query_id = ret;
230219820Sjeff		ret = 0;
231219820Sjeff	}
232219820Sjeff	return ret;
233219820Sjeff}
234219820Sjeff
235219820Sjeffstatic int send_unreg(struct inform_group *group)
236219820Sjeff{
237219820Sjeff	struct inform_port *port = group->port;
238219820Sjeff	struct ib_sa_inform inform;
239219820Sjeff	int ret;
240219820Sjeff
241219820Sjeff	memset(&inform, 0, sizeof inform);
242219820Sjeff	inform.lid_range_begin = cpu_to_be16(0xFFFF);
243219820Sjeff	inform.is_generic = 1;
244219820Sjeff	inform.type = cpu_to_be16(IB_SA_EVENT_TYPE_ALL);
245219820Sjeff	inform.trap.generic.trap_num = cpu_to_be16(group->trap_number);
246219820Sjeff	inform.trap.generic.qpn = IB_QP1;
247219820Sjeff	inform.trap.generic.resp_time = 19;
248219820Sjeff	inform.trap.generic.producer_type =
249219820Sjeff				cpu_to_be32(IB_SA_EVENT_PRODUCER_TYPE_ALL);
250219820Sjeff
251219820Sjeff	ret = ib_sa_informinfo_query(&sa_client, port->dev->device,
252219820Sjeff				     port->port_num, &inform, 3000, GFP_KERNEL,
253219820Sjeff				     unreg_handler, group, &group->query);
254219820Sjeff	if (ret >= 0) {
255219820Sjeff		group->query_id = ret;
256219820Sjeff		ret = 0;
257219820Sjeff	}
258219820Sjeff	return ret;
259219820Sjeff}
260219820Sjeff
261219820Sjeffstatic void join_group(struct inform_group *group, struct inform_member *member)
262219820Sjeff{
263219820Sjeff	member->state = INFORM_MEMBER;
264219820Sjeff	group->members++;
265219820Sjeff	list_move(&member->list, &group->active_list);
266219820Sjeff}
267219820Sjeff
268219820Sjeffstatic int fail_join(struct inform_group *group, struct inform_member *member,
269219820Sjeff		     int status)
270219820Sjeff{
271219820Sjeff	spin_lock_irq(&group->lock);
272219820Sjeff	list_del_init(&member->list);
273219820Sjeff	spin_unlock_irq(&group->lock);
274219820Sjeff	return member->info.callback(status, &member->info, NULL);
275219820Sjeff}
276219820Sjeff
277219820Sjeffstatic void process_group_error(struct inform_group *group)
278219820Sjeff{
279219820Sjeff	struct inform_member *member;
280219820Sjeff	int ret;
281219820Sjeff
282219820Sjeff	spin_lock_irq(&group->lock);
283219820Sjeff	while (!list_empty(&group->active_list)) {
284219820Sjeff		member = list_entry(group->active_list.next,
285219820Sjeff				    struct inform_member, list);
286219820Sjeff		atomic_inc(&member->refcount);
287219820Sjeff		list_del_init(&member->list);
288219820Sjeff		group->members--;
289219820Sjeff		member->state = INFORM_ERROR;
290219820Sjeff		spin_unlock_irq(&group->lock);
291219820Sjeff
292219820Sjeff		ret = member->info.callback(-ENETRESET, &member->info, NULL);
293219820Sjeff		deref_member(member);
294219820Sjeff		if (ret)
295219820Sjeff			ib_sa_unregister_inform_info(&member->info);
296219820Sjeff		spin_lock_irq(&group->lock);
297219820Sjeff	}
298219820Sjeff
299219820Sjeff	group->join_state = INFORM_IDLE;
300219820Sjeff	group->state = INFORM_BUSY;
301219820Sjeff	spin_unlock_irq(&group->lock);
302219820Sjeff}
303219820Sjeff
304219820Sjeff/*
305219820Sjeff * Report a notice to all active subscribers.  We use a temporary list to
306219820Sjeff * handle unsubscription requests while the notice is being reported, which
307219820Sjeff * avoids holding the group lock while in the user's callback.
308219820Sjeff */
309219820Sjeffstatic void process_notice(struct inform_group *group,
310219820Sjeff			   struct inform_notice *info_notice)
311219820Sjeff{
312219820Sjeff	struct inform_member *member;
313219820Sjeff	struct list_head list;
314219820Sjeff	int ret;
315219820Sjeff
316219820Sjeff	INIT_LIST_HEAD(&list);
317219820Sjeff
318219820Sjeff	spin_lock_irq(&group->lock);
319219820Sjeff	list_splice_init(&group->active_list, &list);
320219820Sjeff	while (!list_empty(&list)) {
321219820Sjeff
322219820Sjeff		member = list_entry(list.next, struct inform_member, list);
323219820Sjeff		atomic_inc(&member->refcount);
324219820Sjeff		list_move(&member->list, &group->active_list);
325219820Sjeff		spin_unlock_irq(&group->lock);
326219820Sjeff
327219820Sjeff		ret = member->info.callback(0, &member->info,
328219820Sjeff					    &info_notice->notice);
329219820Sjeff		deref_member(member);
330219820Sjeff		if (ret)
331219820Sjeff			ib_sa_unregister_inform_info(&member->info);
332219820Sjeff		spin_lock_irq(&group->lock);
333219820Sjeff	}
334219820Sjeff	spin_unlock_irq(&group->lock);
335219820Sjeff}
336219820Sjeff
337219820Sjeffstatic void inform_work_handler(struct work_struct *work)
338219820Sjeff{
339219820Sjeff	struct inform_group *group;
340219820Sjeff	struct inform_member *member;
341219820Sjeff	struct ib_inform_info *info;
342219820Sjeff	struct inform_notice *info_notice;
343219820Sjeff	int status, ret;
344219820Sjeff
345219820Sjeff	group = container_of(work, typeof(*group), work);
346219820Sjeffretest:
347219820Sjeff	spin_lock_irq(&group->lock);
348219820Sjeff	while (!list_empty(&group->pending_list) ||
349219820Sjeff	       !list_empty(&group->notice_list) ||
350219820Sjeff	       (group->state == INFORM_ERROR)) {
351219820Sjeff
352219820Sjeff		if (group->state == INFORM_ERROR) {
353219820Sjeff			spin_unlock_irq(&group->lock);
354219820Sjeff			process_group_error(group);
355219820Sjeff			goto retest;
356219820Sjeff		}
357219820Sjeff
358219820Sjeff		if (!list_empty(&group->notice_list)) {
359219820Sjeff			info_notice = list_entry(group->notice_list.next,
360219820Sjeff						 struct inform_notice, list);
361219820Sjeff			list_del(&info_notice->list);
362219820Sjeff			spin_unlock_irq(&group->lock);
363219820Sjeff			process_notice(group, info_notice);
364219820Sjeff			kfree(info_notice);
365219820Sjeff			goto retest;
366219820Sjeff		}
367219820Sjeff
368219820Sjeff		member = list_entry(group->pending_list.next,
369219820Sjeff				    struct inform_member, list);
370219820Sjeff		info = &member->info;
371219820Sjeff		atomic_inc(&member->refcount);
372219820Sjeff
373219820Sjeff		if (group->join_state == INFORM_MEMBER) {
374219820Sjeff			join_group(group, member);
375219820Sjeff			spin_unlock_irq(&group->lock);
376219820Sjeff			ret = info->callback(0, info, NULL);
377219820Sjeff		} else {
378219820Sjeff			spin_unlock_irq(&group->lock);
379219820Sjeff			status = send_reg(group, member);
380219820Sjeff			if (!status) {
381219820Sjeff				deref_member(member);
382219820Sjeff				return;
383219820Sjeff			}
384219820Sjeff			ret = fail_join(group, member, status);
385219820Sjeff		}
386219820Sjeff
387219820Sjeff		deref_member(member);
388219820Sjeff		if (ret)
389219820Sjeff			ib_sa_unregister_inform_info(&member->info);
390219820Sjeff		spin_lock_irq(&group->lock);
391219820Sjeff	}
392219820Sjeff
393219820Sjeff	if (!group->members && (group->join_state == INFORM_MEMBER)) {
394219820Sjeff		group->join_state = INFORM_IDLE;
395219820Sjeff		spin_unlock_irq(&group->lock);
396219820Sjeff		if (send_unreg(group))
397219820Sjeff			goto retest;
398219820Sjeff	} else {
399219820Sjeff		group->state = INFORM_IDLE;
400219820Sjeff		spin_unlock_irq(&group->lock);
401219820Sjeff		release_group(group);
402219820Sjeff	}
403219820Sjeff}
404219820Sjeff
405219820Sjeff/*
406219820Sjeff * Fail a join request if it is still active - at the head of the pending queue.
407219820Sjeff */
408219820Sjeffstatic void process_join_error(struct inform_group *group, int status)
409219820Sjeff{
410219820Sjeff	struct inform_member *member;
411219820Sjeff	int ret;
412219820Sjeff
413219820Sjeff	spin_lock_irq(&group->lock);
414219820Sjeff	member = list_entry(group->pending_list.next,
415219820Sjeff			    struct inform_member, list);
416219820Sjeff	if (group->last_join == member) {
417219820Sjeff		atomic_inc(&member->refcount);
418219820Sjeff		list_del_init(&member->list);
419219820Sjeff		spin_unlock_irq(&group->lock);
420219820Sjeff		ret = member->info.callback(status, &member->info, NULL);
421219820Sjeff		deref_member(member);
422219820Sjeff		if (ret)
423219820Sjeff			ib_sa_unregister_inform_info(&member->info);
424219820Sjeff	} else
425219820Sjeff		spin_unlock_irq(&group->lock);
426219820Sjeff}
427219820Sjeff
428219820Sjeffstatic void reg_handler(int status, struct ib_sa_inform *inform, void *context)
429219820Sjeff{
430219820Sjeff	struct inform_group *group = context;
431219820Sjeff
432219820Sjeff	if (status)
433219820Sjeff		process_join_error(group, status);
434219820Sjeff	else
435219820Sjeff		group->join_state = INFORM_MEMBER;
436219820Sjeff
437219820Sjeff	inform_work_handler(&group->work);
438219820Sjeff}
439219820Sjeff
440219820Sjeffstatic void unreg_handler(int status, struct ib_sa_inform *rec, void *context)
441219820Sjeff{
442219820Sjeff	struct inform_group *group = context;
443219820Sjeff
444219820Sjeff	inform_work_handler(&group->work);
445219820Sjeff}
446219820Sjeff
447219820Sjeffint notice_dispatch(struct ib_device *device, u8 port_num,
448219820Sjeff		    struct ib_sa_notice *notice)
449219820Sjeff{
450219820Sjeff	struct inform_device *dev;
451219820Sjeff	struct inform_port *port;
452219820Sjeff	struct inform_group *group;
453219820Sjeff	struct inform_notice *info_notice;
454219820Sjeff
455219820Sjeff	dev = ib_get_client_data(device, &inform_client);
456219820Sjeff	if (!dev)
457219820Sjeff		return 0; /* No one to give notice to. */
458219820Sjeff
459219820Sjeff	port = &dev->port[port_num - dev->start_port];
460219820Sjeff	spin_lock_irq(&port->lock);
461219820Sjeff	group = inform_find(port, __be16_to_cpu(notice->trap.
462219820Sjeff						generic.trap_num));
463219820Sjeff	if (!group) {
464219820Sjeff		spin_unlock_irq(&port->lock);
465219820Sjeff		return 0;
466219820Sjeff	}
467219820Sjeff
468219820Sjeff	atomic_inc(&group->refcount);
469219820Sjeff	spin_unlock_irq(&port->lock);
470219820Sjeff
471219820Sjeff	info_notice = kmalloc(sizeof *info_notice, GFP_KERNEL);
472219820Sjeff	if (!info_notice) {
473219820Sjeff		release_group(group);
474219820Sjeff		return -ENOMEM;
475219820Sjeff	}
476219820Sjeff
477219820Sjeff	info_notice->notice = *notice;
478219820Sjeff
479219820Sjeff	spin_lock_irq(&group->lock);
480219820Sjeff	list_add(&info_notice->list, &group->notice_list);
481219820Sjeff	if (group->state == INFORM_IDLE) {
482219820Sjeff		group->state = INFORM_BUSY;
483219820Sjeff		spin_unlock_irq(&group->lock);
484219820Sjeff		inform_work_handler(&group->work);
485219820Sjeff	} else {
486219820Sjeff		spin_unlock_irq(&group->lock);
487219820Sjeff		release_group(group);
488219820Sjeff	}
489219820Sjeff
490219820Sjeff	return 0;
491219820Sjeff}
492219820Sjeff
493219820Sjeffstatic struct inform_group *acquire_group(struct inform_port *port,
494219820Sjeff					  u16 trap_number, gfp_t gfp_mask)
495219820Sjeff{
496219820Sjeff	struct inform_group *group, *cur_group;
497219820Sjeff	unsigned long flags;
498219820Sjeff
499219820Sjeff	spin_lock_irqsave(&port->lock, flags);
500219820Sjeff	group = inform_find(port, trap_number);
501219820Sjeff	if (group)
502219820Sjeff		goto found;
503219820Sjeff	spin_unlock_irqrestore(&port->lock, flags);
504219820Sjeff
505219820Sjeff	group = kzalloc(sizeof *group, gfp_mask);
506219820Sjeff	if (!group)
507219820Sjeff		return NULL;
508219820Sjeff
509219820Sjeff	group->port = port;
510219820Sjeff	group->trap_number = trap_number;
511219820Sjeff	INIT_LIST_HEAD(&group->pending_list);
512219820Sjeff	INIT_LIST_HEAD(&group->active_list);
513219820Sjeff	INIT_LIST_HEAD(&group->notice_list);
514219820Sjeff	INIT_WORK(&group->work, inform_work_handler);
515219820Sjeff	spin_lock_init(&group->lock);
516219820Sjeff
517219820Sjeff	spin_lock_irqsave(&port->lock, flags);
518219820Sjeff	cur_group = inform_insert(port, group);
519219820Sjeff	if (cur_group) {
520219820Sjeff		kfree(group);
521219820Sjeff		group = cur_group;
522219820Sjeff	} else
523219820Sjeff		atomic_inc(&port->refcount);
524219820Sjefffound:
525219820Sjeff	atomic_inc(&group->refcount);
526219820Sjeff	spin_unlock_irqrestore(&port->lock, flags);
527219820Sjeff	return group;
528219820Sjeff}
529219820Sjeff
530219820Sjeff/*
531219820Sjeff * We serialize all join requests to a single group to make our lives much
532219820Sjeff * easier.  Otherwise, two users could try to join the same group
533219820Sjeff * simultaneously, with different configurations, one could leave while the
534219820Sjeff * join is in progress, etc., which makes locking around error recovery
535219820Sjeff * difficult.
536219820Sjeff */
537219820Sjeffstruct ib_inform_info *
538219820Sjeffib_sa_register_inform_info(struct ib_sa_client *client,
539219820Sjeff			   struct ib_device *device, u8 port_num,
540219820Sjeff			   u16 trap_number, gfp_t gfp_mask,
541219820Sjeff			   int (*callback)(int status,
542219820Sjeff					   struct ib_inform_info *info,
543219820Sjeff					   struct ib_sa_notice *notice),
544219820Sjeff			   void *context)
545219820Sjeff{
546219820Sjeff	struct inform_device *dev;
547219820Sjeff	struct inform_member *member;
548219820Sjeff	struct ib_inform_info *info;
549219820Sjeff	int ret;
550219820Sjeff
551219820Sjeff	dev = ib_get_client_data(device, &inform_client);
552219820Sjeff	if (!dev)
553219820Sjeff		return ERR_PTR(-ENODEV);
554219820Sjeff
555219820Sjeff	member = kzalloc(sizeof *member, gfp_mask);
556219820Sjeff	if (!member)
557219820Sjeff		return ERR_PTR(-ENOMEM);
558219820Sjeff
559219820Sjeff	ib_sa_client_get(client);
560219820Sjeff	member->client = client;
561219820Sjeff	member->info.trap_number = trap_number;
562219820Sjeff	member->info.callback = callback;
563219820Sjeff	member->info.context = context;
564219820Sjeff	init_completion(&member->comp);
565219820Sjeff	atomic_set(&member->refcount, 1);
566219820Sjeff	member->state = INFORM_REGISTERING;
567219820Sjeff
568219820Sjeff	member->group = acquire_group(&dev->port[port_num - dev->start_port],
569219820Sjeff				      trap_number, gfp_mask);
570219820Sjeff	if (!member->group) {
571219820Sjeff		ret = -ENOMEM;
572219820Sjeff		goto err;
573219820Sjeff	}
574219820Sjeff
575219820Sjeff	/*
576219820Sjeff	 * The user will get the info structure in their callback.  They
577219820Sjeff	 * could then free the info structure before we can return from
578219820Sjeff	 * this routine.  So we save the pointer to return before queuing
579219820Sjeff	 * any callback.
580219820Sjeff	 */
581219820Sjeff	info = &member->info;
582219820Sjeff	queue_reg(member);
583219820Sjeff	return info;
584219820Sjeff
585219820Sjefferr:
586219820Sjeff	ib_sa_client_put(member->client);
587219820Sjeff	kfree(member);
588219820Sjeff	return ERR_PTR(ret);
589219820Sjeff}
590219820SjeffEXPORT_SYMBOL(ib_sa_register_inform_info);
591219820Sjeff
592219820Sjeffvoid ib_sa_unregister_inform_info(struct ib_inform_info *info)
593219820Sjeff{
594219820Sjeff	struct inform_member *member;
595219820Sjeff	struct inform_group *group;
596219820Sjeff
597219820Sjeff	member = container_of(info, struct inform_member, info);
598219820Sjeff	group = member->group;
599219820Sjeff
600219820Sjeff	spin_lock_irq(&group->lock);
601219820Sjeff	if (member->state == INFORM_MEMBER)
602219820Sjeff		group->members--;
603219820Sjeff
604219820Sjeff	list_del_init(&member->list);
605219820Sjeff
606219820Sjeff	if (group->state == INFORM_IDLE) {
607219820Sjeff		group->state = INFORM_BUSY;
608219820Sjeff		spin_unlock_irq(&group->lock);
609219820Sjeff		/* Continue to hold reference on group until callback */
610219820Sjeff		queue_work(inform_wq, &group->work);
611219820Sjeff	} else {
612219820Sjeff		spin_unlock_irq(&group->lock);
613219820Sjeff		release_group(group);
614219820Sjeff	}
615219820Sjeff
616219820Sjeff	deref_member(member);
617219820Sjeff	wait_for_completion(&member->comp);
618219820Sjeff	ib_sa_client_put(member->client);
619219820Sjeff	kfree(member);
620219820Sjeff}
621219820SjeffEXPORT_SYMBOL(ib_sa_unregister_inform_info);
622219820Sjeff
623219820Sjeffstatic void inform_groups_lost(struct inform_port *port)
624219820Sjeff{
625219820Sjeff	struct inform_group *group;
626219820Sjeff	struct rb_node *node;
627219820Sjeff	unsigned long flags;
628219820Sjeff
629219820Sjeff	spin_lock_irqsave(&port->lock, flags);
630219820Sjeff	for (node = rb_first(&port->table); node; node = rb_next(node)) {
631219820Sjeff		group = rb_entry(node, struct inform_group, node);
632219820Sjeff		spin_lock(&group->lock);
633219820Sjeff		if (group->state == INFORM_IDLE) {
634219820Sjeff			atomic_inc(&group->refcount);
635219820Sjeff			queue_work(inform_wq, &group->work);
636219820Sjeff		}
637219820Sjeff		group->state = INFORM_ERROR;
638219820Sjeff		spin_unlock(&group->lock);
639219820Sjeff	}
640219820Sjeff	spin_unlock_irqrestore(&port->lock, flags);
641219820Sjeff}
642219820Sjeff
643219820Sjeffstatic void inform_event_handler(struct ib_event_handler *handler,
644219820Sjeff				struct ib_event *event)
645219820Sjeff{
646219820Sjeff	struct inform_device *dev;
647219820Sjeff
648219820Sjeff	dev = container_of(handler, struct inform_device, event_handler);
649219820Sjeff
650219820Sjeff	switch (event->event) {
651219820Sjeff	case IB_EVENT_PORT_ERR:
652219820Sjeff	case IB_EVENT_LID_CHANGE:
653219820Sjeff	case IB_EVENT_SM_CHANGE:
654219820Sjeff	case IB_EVENT_CLIENT_REREGISTER:
655219820Sjeff		inform_groups_lost(&dev->port[event->element.port_num -
656219820Sjeff					      dev->start_port]);
657219820Sjeff		break;
658219820Sjeff	default:
659219820Sjeff		break;
660219820Sjeff	}
661219820Sjeff}
662219820Sjeff
663219820Sjeffstatic void inform_add_one(struct ib_device *device)
664219820Sjeff{
665219820Sjeff	struct inform_device *dev;
666219820Sjeff	struct inform_port *port;
667219820Sjeff	int i;
668219820Sjeff
669219820Sjeff	if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
670219820Sjeff		return;
671219820Sjeff
672219820Sjeff	dev = kmalloc(sizeof *dev + device->phys_port_cnt * sizeof *port,
673219820Sjeff		      GFP_KERNEL);
674219820Sjeff	if (!dev)
675219820Sjeff		return;
676219820Sjeff
677219820Sjeff	if (device->node_type == RDMA_NODE_IB_SWITCH)
678219820Sjeff		dev->start_port = dev->end_port = 0;
679219820Sjeff	else {
680219820Sjeff		dev->start_port = 1;
681219820Sjeff		dev->end_port = device->phys_port_cnt;
682219820Sjeff	}
683219820Sjeff
684219820Sjeff	for (i = 0; i <= dev->end_port - dev->start_port; i++) {
685219820Sjeff		port = &dev->port[i];
686219820Sjeff		port->dev = dev;
687219820Sjeff		port->port_num = dev->start_port + i;
688219820Sjeff		spin_lock_init(&port->lock);
689219820Sjeff		port->table = RB_ROOT;
690219820Sjeff		init_completion(&port->comp);
691219820Sjeff		atomic_set(&port->refcount, 1);
692219820Sjeff	}
693219820Sjeff
694219820Sjeff	dev->device = device;
695219820Sjeff	ib_set_client_data(device, &inform_client, dev);
696219820Sjeff
697219820Sjeff	INIT_IB_EVENT_HANDLER(&dev->event_handler, device, inform_event_handler);
698219820Sjeff	ib_register_event_handler(&dev->event_handler);
699219820Sjeff}
700219820Sjeff
701219820Sjeffstatic void inform_remove_one(struct ib_device *device)
702219820Sjeff{
703219820Sjeff	struct inform_device *dev;
704219820Sjeff	struct inform_port *port;
705219820Sjeff	int i;
706219820Sjeff
707219820Sjeff	dev = ib_get_client_data(device, &inform_client);
708219820Sjeff	if (!dev)
709219820Sjeff		return;
710219820Sjeff
711219820Sjeff	ib_unregister_event_handler(&dev->event_handler);
712219820Sjeff	flush_workqueue(inform_wq);
713219820Sjeff
714219820Sjeff	for (i = 0; i <= dev->end_port - dev->start_port; i++) {
715219820Sjeff		port = &dev->port[i];
716219820Sjeff		deref_port(port);
717219820Sjeff		wait_for_completion(&port->comp);
718219820Sjeff	}
719219820Sjeff
720219820Sjeff	kfree(dev);
721219820Sjeff}
722219820Sjeff
723219820Sjeffint notice_init(void)
724219820Sjeff{
725219820Sjeff	int ret;
726219820Sjeff
727219820Sjeff	inform_wq = create_singlethread_workqueue("ib_inform");
728219820Sjeff	if (!inform_wq)
729219820Sjeff		return -ENOMEM;
730219820Sjeff
731219820Sjeff	ib_sa_register_client(&sa_client);
732219820Sjeff
733219820Sjeff	ret = ib_register_client(&inform_client);
734219820Sjeff	if (ret)
735219820Sjeff		goto err;
736219820Sjeff	return 0;
737219820Sjeff
738219820Sjefferr:
739219820Sjeff	ib_sa_unregister_client(&sa_client);
740219820Sjeff	destroy_workqueue(inform_wq);
741219820Sjeff	return ret;
742219820Sjeff}
743219820Sjeff
744219820Sjeffvoid notice_cleanup(void)
745219820Sjeff{
746219820Sjeff	ib_unregister_client(&inform_client);
747219820Sjeff	ib_sa_unregister_client(&sa_client);
748219820Sjeff	destroy_workqueue(inform_wq);
749219820Sjeff}
750