• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500-V1.0.1.40_1.0.68/src/linux/linux-2.6/drivers/infiniband/core/

Lines Matching refs:group

104 	struct mcast_group	*group;
120 struct mcast_group *group;
124 group = rb_entry(node, struct mcast_group, node);
125 ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid);
127 return group;
138 struct mcast_group *group,
150 ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw,
151 sizeof group->rec.mgid);
161 rb_link_node(&group->node, parent, link);
162 rb_insert_color(&group->node, &port->table);
172 static void release_group(struct mcast_group *group)
174 struct mcast_port *port = group->port;
178 if (atomic_dec_and_test(&group->refcount)) {
179 rb_erase(&group->node, &port->table);
181 kfree(group);
195 struct mcast_group *group = member->group;
198 spin_lock_irqsave(&group->lock, flags);
199 list_add(&member->list, &group->pending_list);
200 if (group->state == MCAST_IDLE) {
201 group->state = MCAST_BUSY;
202 atomic_inc(&group->refcount);
203 queue_work(mcast_wq, &group->work);
205 spin_unlock_irqrestore(&group->lock, flags);
209 * A multicast group has three types of members: full member, non member, and
214 static void adjust_membership(struct mcast_group *group, u8 join_state, int inc)
220 group->members[i] += inc;
224 * If a multicast group has zero members left for a particular join state, but
225 * the group is still a member with the SA, we need to leave that join state.
229 static u8 get_leave_state(struct mcast_group *group)
235 if (!group->members[i])
238 return leave_state & group->rec.join_state;
316 static int send_join(struct mcast_group *group, struct mcast_member *member)
318 struct mcast_port *port = group->port;
321 group->last_join = member;
326 3000, GFP_KERNEL, join_handler, group,
327 &group->query);
329 group->query_id = ret;
335 static int send_leave(struct mcast_group *group, u8 leave_state)
337 struct mcast_port *port = group->port;
341 rec = group->rec;
350 group, &group->query);
352 group->query_id = ret;
358 static void join_group(struct mcast_group *group, struct mcast_member *member,
362 adjust_membership(group, join_state, 1);
363 group->rec.join_state |= join_state;
364 member->multicast.rec = group->rec;
366 list_move(&member->list, &group->active_list);
369 static int fail_join(struct mcast_group *group, struct mcast_member *member,
372 spin_lock_irq(&group->lock);
374 spin_unlock_irq(&group->lock);
378 static void process_group_error(struct mcast_group *group)
383 spin_lock_irq(&group->lock);
384 while (!list_empty(&group->active_list)) {
385 member = list_entry(group->active_list.next,
389 adjust_membership(group, member->multicast.rec.join_state, -1);
391 spin_unlock_irq(&group->lock);
398 spin_lock_irq(&group->lock);
401 group->rec.join_state = 0;
402 group->state = MCAST_BUSY;
403 spin_unlock_irq(&group->lock);
408 struct mcast_group *group;
414 group = container_of(work, typeof(*group), work);
416 spin_lock_irq(&group->lock);
417 while (!list_empty(&group->pending_list) ||
418 (group->state == MCAST_ERROR)) {
420 if (group->state == MCAST_ERROR) {
421 spin_unlock_irq(&group->lock);
422 process_group_error(group);
426 member = list_entry(group->pending_list.next,
432 if (join_state == (group->rec.join_state & join_state)) {
433 status = cmp_rec(&group->rec, &multicast->rec,
436 join_group(group, member, join_state);
439 spin_unlock_irq(&group->lock);
442 spin_unlock_irq(&group->lock);
443 status = send_join(group, member);
448 ret = fail_join(group, member, status);
454 spin_lock_irq(&group->lock);
457 join_state = get_leave_state(group);
459 group->rec.join_state &= ~join_state;
460 spin_unlock_irq(&group->lock);
461 if (send_leave(group, join_state))
464 group->state = MCAST_IDLE;
465 spin_unlock_irq(&group->lock);
466 release_group(group);
473 static void process_join_error(struct mcast_group *group, int status)
478 spin_lock_irq(&group->lock);
479 member = list_entry(group->pending_list.next,
481 if (group->last_join == member) {
484 spin_unlock_irq(&group->lock);
490 spin_unlock_irq(&group->lock);
496 struct mcast_group *group = context;
499 process_join_error(group, status);
501 spin_lock_irq(&group->port->lock);
502 group->rec = *rec;
503 if (!memcmp(&mgid0, &group->rec.mgid, sizeof mgid0)) {
504 rb_erase(&group->node, &group->port->table);
505 mcast_insert(group->port, group, 1);
507 spin_unlock_irq(&group->port->lock);
509 mcast_work_handler(&group->work);
515 struct mcast_group *group = context;
517 mcast_work_handler(&group->work);
523 struct mcast_group *group, *cur_group;
530 group = mcast_find(port, mgid);
531 if (group)
536 group = kzalloc(sizeof *group, gfp_mask);
537 if (!group)
540 group->port = port;
541 group->rec.mgid = *mgid;
542 INIT_LIST_HEAD(&group->pending_list);
543 INIT_LIST_HEAD(&group->active_list);
544 INIT_WORK(&group->work, mcast_work_handler);
545 spin_lock_init(&group->lock);
548 cur_group = mcast_insert(port, group, is_mgid0);
550 kfree(group);
551 group = cur_group;
555 atomic_inc(&group->refcount);
557 return group;
561 * We serialize all join requests to a single group to make our lives much
562 * easier. Otherwise, two users could try to join the same group
599 member->group = acquire_group(&dev->port[port_num - dev->start_port],
601 if (!member->group) {
626 struct mcast_group *group;
629 group = member->group;
631 spin_lock_irq(&group->lock);
633 adjust_membership(group, multicast->rec.join_state, -1);
637 if (group->state == MCAST_IDLE) {
638 group->state = MCAST_BUSY;
639 spin_unlock_irq(&group->lock);
640 /* Continue to hold reference on group until callback */
641 queue_work(mcast_wq, &group->work);
643 spin_unlock_irq(&group->lock);
644 release_group(group);
659 struct mcast_group *group;
669 group = mcast_find(port, mgid);
670 if (group)
671 *rec = group->rec;
712 struct mcast_group *group;
718 group = rb_entry(node, struct mcast_group, node);
719 spin_lock(&group->lock);
720 if (group->state == MCAST_IDLE) {
721 atomic_inc(&group->refcount);
722 queue_work(mcast_wq, &group->work);
724 group->state = MCAST_ERROR;
725 spin_unlock(&group->lock);