• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.10.1/xnu-2782.1.97/osfmk/kern/

Lines Matching defs:sched_group

59  * Non-bound threads are linked on their task's sched_group's runq
108 * One way to get sched_group below 4K without a new runq structure would be to remove the extra queues above realtime.
155 * Hand off quanta when hopping between threads with same sched_group
185 typedef run_queue_t group_runq_t; /* A run queue that is part of a sched_group */
191 struct sched_group {
359 sizeof(struct sched_group),
360 task_max * sizeof(struct sched_group),
401 sched_group_t sched_group;
406 sched_group = (sched_group_t)zalloc(sched_group_zone);
408 bzero(sched_group, sizeof(struct sched_group));
410 run_queue_init(&sched_group->runq);
413 sched_group->entries[i].runq = 0;
414 sched_group->entries[i].sched_pri = i;
418 queue_enter(&sched_groups, sched_group, sched_group_t, sched_groups);
422 return (sched_group);
426 sched_group_destroy(sched_group_t sched_group)
429 assert(sched_group == SCHED_GROUP_NULL);
433 assert(sched_group != SCHED_GROUP_NULL);
434 assert(sched_group->runq.count == 0);
437 assert(sched_group->entries[i].runq == 0);
438 assert(sched_group->entries[i].sched_pri == i);
442 queue_remove(&sched_groups, sched_group, sched_group_t, sched_groups);
446 zfree(sched_group_zone, sched_group);
521 assert(thread->sched_group == group);
583 __assert_only sched_entry_t thread_entry = group_entry_for_pri(thread->sched_group, thread->sched_pri);
601 sched_entry_t sched_entry = group_entry_for_pri(thread->sched_group, thread->sched_pri);
1019 sched_group_t group = current_thread()->sched_group;
1105 thread->sched_group,
1133 sched_entry_t entry = group_entry_for_pri(thread->sched_group, processor->current_pri);
1308 thread->sched_group,