Lines Matching refs:group

116 	struct mcast_group	*group;
132 struct mcast_group *group;
136 group = rb_entry(node, struct mcast_group, node);
137 ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid);
139 return group;
150 struct mcast_group *group,
162 ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw,
163 sizeof group->rec.mgid);
173 rb_link_node(&group->node, parent, link);
174 rb_insert_color(&group->node, &port->table);
184 static void release_group(struct mcast_group *group)
186 struct mcast_port *port = group->port;
190 if (atomic_dec_and_test(&group->refcount)) {
191 rb_erase(&group->node, &port->table);
193 kfree(group);
207 struct mcast_group *group = member->group;
210 spin_lock_irqsave(&group->lock, flags);
211 list_add_tail(&member->list, &group->pending_list);
212 if (group->state == MCAST_IDLE) {
213 group->state = MCAST_BUSY;
214 atomic_inc(&group->refcount);
215 queue_work(mcast_wq, &group->work);
217 spin_unlock_irqrestore(&group->lock, flags);
221 * A multicast group has three types of members: full member, non member, and
226 static void adjust_membership(struct mcast_group *group, u8 join_state, int inc)
232 group->members[i] += inc;
236 * If a multicast group has zero members left for a particular join state, but
237 * the group is still a member with the SA, we need to leave that join state.
241 static u8 get_leave_state(struct mcast_group *group)
247 if (!group->members[i])
250 return leave_state & group->rec.join_state;
300 static int send_join(struct mcast_group *group, struct mcast_member *member)
302 struct mcast_port *port = group->port;
305 group->last_join = member;
310 3000, GFP_KERNEL, join_handler, group,
311 &group->query);
313 group->query_id = ret;
319 static int send_leave(struct mcast_group *group, u8 leave_state)
321 struct mcast_port *port = group->port;
325 rec = group->rec;
327 group->leave_state = leave_state;
335 group, &group->query);
337 group->query_id = ret;
343 static void join_group(struct mcast_group *group, struct mcast_member *member,
347 adjust_membership(group, join_state, 1);
348 group->rec.join_state |= join_state;
349 member->multicast.rec = group->rec;
351 list_move(&member->list, &group->active_list);
354 static int fail_join(struct mcast_group *group, struct mcast_member *member,
357 spin_lock_irq(&group->lock);
359 spin_unlock_irq(&group->lock);
363 static void process_group_error(struct mcast_group *group)
369 if (group->state == MCAST_PKEY_EVENT)
370 ret = ib_find_pkey(group->port->dev->device,
371 group->port->port_num,
372 be16_to_cpu(group->rec.pkey), &pkey_index);
374 spin_lock_irq(&group->lock);
375 if (group->state == MCAST_PKEY_EVENT && !ret &&
376 group->pkey_index == pkey_index)
379 while (!list_empty(&group->active_list)) {
380 member = list_entry(group->active_list.next,
384 adjust_membership(group, member->multicast.rec.join_state, -1);
386 spin_unlock_irq(&group->lock);
393 spin_lock_irq(&group->lock);
396 group->rec.join_state = 0;
398 group->state = MCAST_BUSY;
399 spin_unlock_irq(&group->lock);
404 struct mcast_group *group;
410 group = container_of(work, typeof(*group), work);
412 spin_lock_irq(&group->lock);
413 while (!list_empty(&group->pending_list) ||
414 (group->state != MCAST_BUSY)) {
416 if (group->state != MCAST_BUSY) {
417 spin_unlock_irq(&group->lock);
418 process_group_error(group);
422 member = list_entry(group->pending_list.next,
428 if (join_state == (group->rec.join_state & join_state)) {
429 status = cmp_rec(&group->rec, &multicast->rec,
432 join_group(group, member, join_state);
435 spin_unlock_irq(&group->lock);
438 spin_unlock_irq(&group->lock);
439 status = send_join(group, member);
444 ret = fail_join(group, member, status);
450 spin_lock_irq(&group->lock);
453 join_state = get_leave_state(group);
455 group->rec.join_state &= ~join_state;
456 spin_unlock_irq(&group->lock);
457 if (send_leave(group, join_state))
460 group->state = MCAST_IDLE;
461 spin_unlock_irq(&group->lock);
462 release_group(group);
469 static void process_join_error(struct mcast_group *group, int status)
474 spin_lock_irq(&group->lock);
475 member = list_entry(group->pending_list.next,
477 if (group->last_join == member) {
480 spin_unlock_irq(&group->lock);
486 spin_unlock_irq(&group->lock);
492 struct mcast_group *group = context;
496 process_join_error(group, status);
498 ib_find_pkey(group->port->dev->device, group->port->port_num,
501 spin_lock_irq(&group->port->lock);
502 group->rec = *rec;
503 if (group->state == MCAST_BUSY &&
504 group->pkey_index == MCAST_INVALID_PKEY_INDEX)
505 group->pkey_index = pkey_index;
506 if (!memcmp(&mgid0, &group->rec.mgid, sizeof mgid0)) {
507 rb_erase(&group->node, &group->port->table);
508 mcast_insert(group->port, group, 1);
510 spin_unlock_irq(&group->port->lock);
512 mcast_work_handler(&group->work);
518 struct mcast_group *group = context;
520 if (status && (group->retries > 0) &&
521 !send_leave(group, group->leave_state))
522 group->retries--;
524 mcast_work_handler(&group->work);
530 struct mcast_group *group, *cur_group;
537 group = mcast_find(port, mgid);
538 if (group)
543 group = kzalloc(sizeof *group, gfp_mask);
544 if (!group)
547 group->retries = 3;
548 group->port = port;
549 group->rec.mgid = *mgid;
550 group->pkey_index = MCAST_INVALID_PKEY_INDEX;
551 INIT_LIST_HEAD(&group->pending_list);
552 INIT_LIST_HEAD(&group->active_list);
553 INIT_WORK(&group->work, mcast_work_handler);
554 spin_lock_init(&group->lock);
557 cur_group = mcast_insert(port, group, is_mgid0);
559 kfree(group);
560 group = cur_group;
564 atomic_inc(&group->refcount);
566 return group;
570 * We serialize all join requests to a single group to make our lives much
571 * easier. Otherwise, two users could try to join the same group
608 member->group = acquire_group(&dev->port[port_num - dev->start_port],
610 if (!member->group) {
635 struct mcast_group *group;
638 group = member->group;
640 spin_lock_irq(&group->lock);
642 adjust_membership(group, multicast->rec.join_state, -1);
646 if (group->state == MCAST_IDLE) {
647 group->state = MCAST_BUSY;
648 spin_unlock_irq(&group->lock);
649 /* Continue to hold reference on group until callback */
650 queue_work(mcast_wq, &group->work);
652 spin_unlock_irq(&group->lock);
653 release_group(group);
668 struct mcast_group *group;
678 group = mcast_find(port, mgid);
679 if (group)
680 *rec = group->rec;
722 struct mcast_group *group;
728 group = rb_entry(node, struct mcast_group, node);
729 spin_lock(&group->lock);
730 if (group->state == MCAST_IDLE) {
731 atomic_inc(&group->refcount);
732 queue_work(mcast_wq, &group->work);
734 if (group->state != MCAST_GROUP_ERROR)
735 group->state = state;
736 spin_unlock(&group->lock);