Lines Matching defs:group

118 	struct mcast_group	*group;
134 struct mcast_group *group;
138 group = rb_entry(node, struct mcast_group, node);
139 ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid);
141 return group;
152 struct mcast_group *group,
164 ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw,
165 sizeof group->rec.mgid);
175 rb_link_node(&group->node, parent, link);
176 rb_insert_color(&group->node, &port->table);
186 static void release_group(struct mcast_group *group)
188 struct mcast_port *port = group->port;
192 if (atomic_dec_and_test(&group->refcount)) {
193 rb_erase(&group->node, &port->table);
195 kfree(group);
209 struct mcast_group *group = member->group;
212 spin_lock_irqsave(&group->lock, flags);
213 list_add_tail(&member->list, &group->pending_list);
214 if (group->state == MCAST_IDLE) {
215 group->state = MCAST_BUSY;
216 atomic_inc(&group->refcount);
217 queue_work(mcast_wq, &group->work);
219 spin_unlock_irqrestore(&group->lock, flags);
223 * A multicast group has three types of members: full member, non member, and
228 static void adjust_membership(struct mcast_group *group, u8 join_state, int inc)
234 group->members[i] += inc;
238 * If a multicast group has zero members left for a particular join state, but
239 * the group is still a member with the SA, we need to leave that join state.
243 static u8 get_leave_state(struct mcast_group *group)
249 if (!group->members[i])
252 return leave_state & group->rec.join_state;
302 static int send_join(struct mcast_group *group, struct mcast_member *member)
304 struct mcast_port *port = group->port;
307 group->last_join = member;
312 3000, GFP_KERNEL, join_handler, group,
313 &group->query);
315 group->query_id = ret;
321 static int send_leave(struct mcast_group *group, u8 leave_state)
323 struct mcast_port *port = group->port;
327 rec = group->rec;
329 group->leave_state = leave_state;
337 group, &group->query);
339 group->query_id = ret;
345 static void join_group(struct mcast_group *group, struct mcast_member *member,
349 adjust_membership(group, join_state, 1);
350 group->rec.join_state |= join_state;
351 member->multicast.rec = group->rec;
353 list_move(&member->list, &group->active_list);
356 static int fail_join(struct mcast_group *group, struct mcast_member *member,
359 spin_lock_irq(&group->lock);
361 spin_unlock_irq(&group->lock);
365 static void process_group_error(struct mcast_group *group)
371 if (group->state == MCAST_PKEY_EVENT)
372 ret = ib_find_pkey(group->port->dev->device,
373 group->port->port_num,
374 be16_to_cpu(group->rec.pkey), &pkey_index);
376 spin_lock_irq(&group->lock);
377 if (group->state == MCAST_PKEY_EVENT && !ret &&
378 group->pkey_index == pkey_index)
381 while (!list_empty(&group->active_list)) {
382 member = list_entry(group->active_list.next,
386 adjust_membership(group, member->multicast.rec.join_state, -1);
388 spin_unlock_irq(&group->lock);
395 spin_lock_irq(&group->lock);
398 group->rec.join_state = 0;
400 group->state = MCAST_BUSY;
401 spin_unlock_irq(&group->lock);
406 struct mcast_group *group;
412 group = container_of(work, typeof(*group), work);
414 spin_lock_irq(&group->lock);
415 while (!list_empty(&group->pending_list) ||
416 (group->state != MCAST_BUSY)) {
418 if (group->state != MCAST_BUSY) {
419 spin_unlock_irq(&group->lock);
420 process_group_error(group);
424 member = list_entry(group->pending_list.next,
430 if (join_state == (group->rec.join_state & join_state)) {
431 status = cmp_rec(&group->rec, &multicast->rec,
434 join_group(group, member, join_state);
437 spin_unlock_irq(&group->lock);
440 spin_unlock_irq(&group->lock);
441 status = send_join(group, member);
446 ret = fail_join(group, member, status);
452 spin_lock_irq(&group->lock);
455 join_state = get_leave_state(group);
457 group->rec.join_state &= ~join_state;
458 spin_unlock_irq(&group->lock);
459 if (send_leave(group, join_state))
462 group->state = MCAST_IDLE;
463 spin_unlock_irq(&group->lock);
464 release_group(group);
471 static void process_join_error(struct mcast_group *group, int status)
476 spin_lock_irq(&group->lock);
477 member = list_entry(group->pending_list.next,
479 if (group->last_join == member) {
482 spin_unlock_irq(&group->lock);
488 spin_unlock_irq(&group->lock);
494 struct mcast_group *group = context;
498 process_join_error(group, status);
500 ib_find_pkey(group->port->dev->device, group->port->port_num,
503 spin_lock_irq(&group->port->lock);
504 group->rec = *rec;
505 if (group->state == MCAST_BUSY &&
506 group->pkey_index == MCAST_INVALID_PKEY_INDEX)
507 group->pkey_index = pkey_index;
508 if (!memcmp(&mgid0, &group->rec.mgid, sizeof mgid0)) {
509 rb_erase(&group->node, &group->port->table);
510 mcast_insert(group->port, group, 1);
512 spin_unlock_irq(&group->port->lock);
514 mcast_work_handler(&group->work);
520 struct mcast_group *group = context;
522 if (status && (group->retries > 0) &&
523 !send_leave(group, group->leave_state))
524 group->retries--;
526 mcast_work_handler(&group->work);
532 struct mcast_group *group, *cur_group;
539 group = mcast_find(port, mgid);
540 if (group)
545 group = kzalloc(sizeof *group, gfp_mask);
546 if (!group)
549 group->retries = 3;
550 group->port = port;
551 group->rec.mgid = *mgid;
552 group->pkey_index = MCAST_INVALID_PKEY_INDEX;
553 INIT_LIST_HEAD(&group->pending_list);
554 INIT_LIST_HEAD(&group->active_list);
555 INIT_WORK(&group->work, mcast_work_handler);
556 spin_lock_init(&group->lock);
559 cur_group = mcast_insert(port, group, is_mgid0);
561 kfree(group);
562 group = cur_group;
566 atomic_inc(&group->refcount);
568 return group;
572 * We serialize all join requests to a single group to make our lives much
573 * easier. Otherwise, two users could try to join the same group
610 member->group = acquire_group(&dev->port[port_num - dev->start_port],
612 if (!member->group) {
637 struct mcast_group *group;
640 group = member->group;
642 spin_lock_irq(&group->lock);
644 adjust_membership(group, multicast->rec.join_state, -1);
648 if (group->state == MCAST_IDLE) {
649 group->state = MCAST_BUSY;
650 spin_unlock_irq(&group->lock);
651 /* Continue to hold reference on group until callback */
652 queue_work(mcast_wq, &group->work);
654 spin_unlock_irq(&group->lock);
655 release_group(group);
670 struct mcast_group *group;
680 group = mcast_find(port, mgid);
681 if (group)
682 *rec = group->rec;
724 struct mcast_group *group;
730 group = rb_entry(node, struct mcast_group, node);
731 spin_lock(&group->lock);
732 if (group->state == MCAST_IDLE) {
733 atomic_inc(&group->refcount);
734 queue_work(mcast_wq, &group->work);
736 if (group->state != MCAST_GROUP_ERROR)
737 group->state = state;
738 spin_unlock(&group->lock);