• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/infiniband/core/

Lines Matching refs:group

117 	struct mcast_group	*group;
133 struct mcast_group *group;
137 group = rb_entry(node, struct mcast_group, node);
138 ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid);
140 return group;
151 struct mcast_group *group,
163 ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw,
164 sizeof group->rec.mgid);
174 rb_link_node(&group->node, parent, link);
175 rb_insert_color(&group->node, &port->table);
185 static void release_group(struct mcast_group *group)
187 struct mcast_port *port = group->port;
191 if (atomic_dec_and_test(&group->refcount)) {
192 rb_erase(&group->node, &port->table);
194 kfree(group);
208 struct mcast_group *group = member->group;
211 spin_lock_irqsave(&group->lock, flags);
212 list_add_tail(&member->list, &group->pending_list);
213 if (group->state == MCAST_IDLE) {
214 group->state = MCAST_BUSY;
215 atomic_inc(&group->refcount);
216 queue_work(mcast_wq, &group->work);
218 spin_unlock_irqrestore(&group->lock, flags);
222 * A multicast group has three types of members: full member, non member, and
227 static void adjust_membership(struct mcast_group *group, u8 join_state, int inc)
233 group->members[i] += inc;
237 * If a multicast group has zero members left for a particular join state, but
238 * the group is still a member with the SA, we need to leave that join state.
242 static u8 get_leave_state(struct mcast_group *group)
248 if (!group->members[i])
251 return leave_state & group->rec.join_state;
329 static int send_join(struct mcast_group *group, struct mcast_member *member)
331 struct mcast_port *port = group->port;
334 group->last_join = member;
339 3000, GFP_KERNEL, join_handler, group,
340 &group->query);
342 group->query_id = ret;
348 static int send_leave(struct mcast_group *group, u8 leave_state)
350 struct mcast_port *port = group->port;
354 rec = group->rec;
356 group->leave_state = leave_state;
364 group, &group->query);
366 group->query_id = ret;
372 static void join_group(struct mcast_group *group, struct mcast_member *member,
376 adjust_membership(group, join_state, 1);
377 group->rec.join_state |= join_state;
378 member->multicast.rec = group->rec;
380 list_move(&member->list, &group->active_list);
383 static int fail_join(struct mcast_group *group, struct mcast_member *member,
386 spin_lock_irq(&group->lock);
388 spin_unlock_irq(&group->lock);
392 static void process_group_error(struct mcast_group *group)
398 if (group->state == MCAST_PKEY_EVENT)
399 ret = ib_find_pkey(group->port->dev->device,
400 group->port->port_num,
401 be16_to_cpu(group->rec.pkey), &pkey_index);
403 spin_lock_irq(&group->lock);
404 if (group->state == MCAST_PKEY_EVENT && !ret &&
405 group->pkey_index == pkey_index)
408 while (!list_empty(&group->active_list)) {
409 member = list_entry(group->active_list.next,
413 adjust_membership(group, member->multicast.rec.join_state, -1);
415 spin_unlock_irq(&group->lock);
422 spin_lock_irq(&group->lock);
425 group->rec.join_state = 0;
427 group->state = MCAST_BUSY;
428 spin_unlock_irq(&group->lock);
433 struct mcast_group *group;
439 group = container_of(work, typeof(*group), work);
441 spin_lock_irq(&group->lock);
442 while (!list_empty(&group->pending_list) ||
443 (group->state != MCAST_BUSY)) {
445 if (group->state != MCAST_BUSY) {
446 spin_unlock_irq(&group->lock);
447 process_group_error(group);
451 member = list_entry(group->pending_list.next,
457 if (join_state == (group->rec.join_state & join_state)) {
458 status = cmp_rec(&group->rec, &multicast->rec,
461 join_group(group, member, join_state);
464 spin_unlock_irq(&group->lock);
467 spin_unlock_irq(&group->lock);
468 status = send_join(group, member);
473 ret = fail_join(group, member, status);
479 spin_lock_irq(&group->lock);
482 join_state = get_leave_state(group);
484 group->rec.join_state &= ~join_state;
485 spin_unlock_irq(&group->lock);
486 if (send_leave(group, join_state))
489 group->state = MCAST_IDLE;
490 spin_unlock_irq(&group->lock);
491 release_group(group);
498 static void process_join_error(struct mcast_group *group, int status)
503 spin_lock_irq(&group->lock);
504 member = list_entry(group->pending_list.next,
506 if (group->last_join == member) {
509 spin_unlock_irq(&group->lock);
515 spin_unlock_irq(&group->lock);
521 struct mcast_group *group = context;
525 process_join_error(group, status);
527 ib_find_pkey(group->port->dev->device, group->port->port_num,
530 spin_lock_irq(&group->port->lock);
531 group->rec = *rec;
532 if (group->state == MCAST_BUSY &&
533 group->pkey_index == MCAST_INVALID_PKEY_INDEX)
534 group->pkey_index = pkey_index;
535 if (!memcmp(&mgid0, &group->rec.mgid, sizeof mgid0)) {
536 rb_erase(&group->node, &group->port->table);
537 mcast_insert(group->port, group, 1);
539 spin_unlock_irq(&group->port->lock);
541 mcast_work_handler(&group->work);
547 struct mcast_group *group = context;
549 if (status && group->retries > 0 &&
550 !send_leave(group, group->leave_state))
551 group->retries--;
553 mcast_work_handler(&group->work);
559 struct mcast_group *group, *cur_group;
566 group = mcast_find(port, mgid);
567 if (group)
572 group = kzalloc(sizeof *group, gfp_mask);
573 if (!group)
576 group->retries = 3;
577 group->port = port;
578 group->rec.mgid = *mgid;
579 group->pkey_index = MCAST_INVALID_PKEY_INDEX;
580 INIT_LIST_HEAD(&group->pending_list);
581 INIT_LIST_HEAD(&group->active_list);
582 INIT_WORK(&group->work, mcast_work_handler);
583 spin_lock_init(&group->lock);
586 cur_group = mcast_insert(port, group, is_mgid0);
588 kfree(group);
589 group = cur_group;
593 atomic_inc(&group->refcount);
595 return group;
599 * We serialize all join requests to a single group to make our lives much
600 * easier. Otherwise, two users could try to join the same group
637 member->group = acquire_group(&dev->port[port_num - dev->start_port],
639 if (!member->group) {
664 struct mcast_group *group;
667 group = member->group;
669 spin_lock_irq(&group->lock);
671 adjust_membership(group, multicast->rec.join_state, -1);
675 if (group->state == MCAST_IDLE) {
676 group->state = MCAST_BUSY;
677 spin_unlock_irq(&group->lock);
678 /* Continue to hold reference on group until callback */
679 queue_work(mcast_wq, &group->work);
681 spin_unlock_irq(&group->lock);
682 release_group(group);
697 struct mcast_group *group;
707 group = mcast_find(port, mgid);
708 if (group)
709 *rec = group->rec;
751 struct mcast_group *group;
757 group = rb_entry(node, struct mcast_group, node);
758 spin_lock(&group->lock);
759 if (group->state == MCAST_IDLE) {
760 atomic_inc(&group->refcount);
761 queue_work(mcast_wq, &group->work);
763 if (group->state != MCAST_GROUP_ERROR)
764 group->state = state;
765 spin_unlock(&group->lock);