1/*
2 * Copyright 2009, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Distributed under the terms of the MIT License.
4 */
5
6
7#include "Model.h"
8
9#include <new>
10
11#include <stdio.h>
12#include <stdlib.h>
13
14#include <AutoDeleter.h>
15
16#include <thread_defs.h>
17
18
19
20static const char* const kThreadStateNames[] = {
21	"running",
22	"still running",
23	"preempted",
24	"ready",
25	"waiting",
26	"unknown"
27};
28
29
30const char*
31thread_state_name(ThreadState state)
32{
33	return kThreadStateNames[state];
34}
35
36
37const char*
38wait_object_type_name(uint32 type)
39{
40	switch (type) {
41		case THREAD_BLOCK_TYPE_SEMAPHORE:
42			return "semaphore";
43		case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
44			return "condition";
45		case THREAD_BLOCK_TYPE_MUTEX:
46			return "mutex";
47		case THREAD_BLOCK_TYPE_RW_LOCK:
48			return "rw lock";
49		case THREAD_BLOCK_TYPE_USER:
50			return "user";
51		case THREAD_BLOCK_TYPE_OTHER:
52		case THREAD_BLOCK_TYPE_OTHER_OBJECT:
53			return "other";
54		case THREAD_BLOCK_TYPE_SNOOZE:
55			return "snooze";
56		case THREAD_BLOCK_TYPE_SIGNAL:
57			return "signal";
58		default:
59			return "unknown";
60	}
61}
62
63
64// #pragma mark - CPU
65
66
67Model::CPU::CPU()
68	:
69	fIdleTime(0)
70{
71}
72
73
74void
75Model::CPU::SetIdleTime(nanotime_t time)
76{
77	fIdleTime = time;
78}
79
80
81// #pragma mark - IORequest
82
83
84Model::IORequest::IORequest(
85	system_profiler_io_request_scheduled* scheduledEvent,
86	system_profiler_io_request_finished* finishedEvent, size_t operationCount)
87	:
88	scheduledEvent(scheduledEvent),
89	finishedEvent(finishedEvent),
90	operationCount(operationCount)
91{
92}
93
94
95Model::IORequest::~IORequest()
96{
97}
98
99
100/*static*/ Model::IORequest*
101Model::IORequest::Create(system_profiler_io_request_scheduled* scheduledEvent,
102	system_profiler_io_request_finished* finishedEvent, size_t operationCount)
103{
104	void* memory = malloc(
105		sizeof(IORequest) + operationCount * sizeof(IOOperation));
106	if (memory == NULL)
107		return NULL;
108
109	return new(memory) IORequest(scheduledEvent, finishedEvent, operationCount);
110}
111
112
113void
114Model::IORequest::Delete()
115{
116	free(this);
117}
118
119
120// #pragma mark - IOScheduler
121
122
123Model::IOScheduler::IOScheduler(system_profiler_io_scheduler_added* event,
124	int32 index)
125	:
126	fAddedEvent(event),
127	fIndex(index)
128{
129}
130
131
132// #pragma mark - WaitObject
133
134
135Model::WaitObject::WaitObject(const system_profiler_wait_object_info* event)
136	:
137	fEvent(event),
138	fWaits(0),
139	fTotalWaitTime(0)
140{
141}
142
143
144Model::WaitObject::~WaitObject()
145{
146}
147
148
149void
150Model::WaitObject::AddWait(nanotime_t waitTime)
151{
152	fWaits++;
153	fTotalWaitTime += waitTime;
154}
155
156
157// #pragma mark - WaitObjectGroup
158
159
160Model::WaitObjectGroup::WaitObjectGroup(WaitObject* waitObject)
161	:
162	fWaits(-1),
163	fTotalWaitTime(-1)
164{
165	fWaitObjects.AddItem(waitObject);
166}
167
168
169Model::WaitObjectGroup::~WaitObjectGroup()
170{
171}
172
173
174int64
175Model::WaitObjectGroup::Waits()
176{
177	if (fWaits < 0)
178		_ComputeWaits();
179
180	return fWaits;
181}
182
183
184nanotime_t
185Model::WaitObjectGroup::TotalWaitTime()
186{
187	if (fTotalWaitTime < 0)
188		_ComputeWaits();
189
190	return fTotalWaitTime;
191}
192
193
194void
195Model::WaitObjectGroup::_ComputeWaits()
196{
197	fWaits = 0;
198	fTotalWaitTime = 0;
199
200	for (int32 i = fWaitObjects.CountItems(); i-- > 0;) {
201		WaitObject* waitObject = fWaitObjects.ItemAt(i);
202
203		fWaits += waitObject->Waits();
204		fTotalWaitTime += waitObject->TotalWaitTime();
205	}
206}
207
208
209// #pragma mark - ThreadWaitObject
210
211
212Model::ThreadWaitObject::ThreadWaitObject(WaitObject* waitObject)
213	:
214	fWaitObject(waitObject),
215	fWaits(0),
216	fTotalWaitTime(0)
217{
218}
219
220
221Model::ThreadWaitObject::~ThreadWaitObject()
222{
223}
224
225
226void
227Model::ThreadWaitObject::AddWait(nanotime_t waitTime)
228{
229	fWaits++;
230	fTotalWaitTime += waitTime;
231
232	fWaitObject->AddWait(waitTime);
233}
234
235
236// #pragma mark - ThreadWaitObjectGroup
237
238
239Model::ThreadWaitObjectGroup::ThreadWaitObjectGroup(
240	ThreadWaitObject* threadWaitObject)
241{
242	fWaitObjects.Add(threadWaitObject);
243}
244
245
246Model::ThreadWaitObjectGroup::~ThreadWaitObjectGroup()
247{
248}
249
250
251bool
252Model::ThreadWaitObjectGroup::GetThreadWaitObjects(
253	BObjectList<ThreadWaitObject>& objects)
254{
255	ThreadWaitObjectList::Iterator it = fWaitObjects.GetIterator();
256	while (ThreadWaitObject* object = it.Next()) {
257		if (!objects.AddItem(object))
258			return false;
259	}
260
261	return true;
262}
263
264
265// #pragma mark - Team
266
267
268Model::Team::Team(const system_profiler_team_added* event, nanotime_t time)
269	:
270	fCreationEvent(event),
271	fCreationTime(time),
272	fDeletionTime(-1),
273	fThreads(10)
274{
275}
276
277
278Model::Team::~Team()
279{
280}
281
282
283bool
284Model::Team::AddThread(Thread* thread)
285{
286	return fThreads.BinaryInsert(thread, &Thread::CompareByCreationTimeID);
287}
288
289
290// #pragma mark - Thread
291
292
293Model::Thread::Thread(Team* team, const system_profiler_thread_added* event,
294	nanotime_t time)
295	:
296	fEvents(NULL),
297	fEventCount(0),
298	fIORequests(NULL),
299	fIORequestCount(0),
300	fTeam(team),
301	fCreationEvent(event),
302	fCreationTime(time),
303	fDeletionTime(-1),
304	fRuns(0),
305	fTotalRunTime(0),
306	fMinRunTime(-1),
307	fMaxRunTime(-1),
308	fLatencies(0),
309	fTotalLatency(0),
310	fMinLatency(-1),
311	fMaxLatency(-1),
312	fReruns(0),
313	fTotalRerunTime(0),
314	fMinRerunTime(-1),
315	fMaxRerunTime(-1),
316	fWaits(0),
317	fTotalWaitTime(0),
318	fUnspecifiedWaitTime(0),
319	fIOCount(0),
320	fIOTime(0),
321	fPreemptions(0),
322	fIndex(-1),
323	fWaitObjectGroups(20, true)
324{
325}
326
327
328Model::Thread::~Thread()
329{
330	if (fIORequests != NULL) {
331		for (size_t i = 0; i < fIORequestCount; i++)
332			fIORequests[i]->Delete();
333
334		delete[] fIORequests;
335	}
336
337	delete[] fEvents;
338}
339
340
341void
342Model::Thread::SetEvents(system_profiler_event_header** events,
343	size_t eventCount)
344{
345	fEvents = events;
346	fEventCount = eventCount;
347}
348
349
350void
351Model::Thread::SetIORequests(IORequest** requests, size_t requestCount)
352{
353	fIORequests = requests;
354	fIORequestCount = requestCount;
355}
356
357
358size_t
359Model::Thread::ClosestRequestStartIndex(nanotime_t minRequestStartTime) const
360{
361	size_t lower = 0;
362	size_t upper = fIORequestCount;
363	while (lower < upper) {
364		size_t mid = (lower + upper) / 2;
365		IORequest* request = fIORequests[mid];
366
367		if (request->ScheduledTime() < minRequestStartTime)
368			lower = mid + 1;
369		else
370			upper = mid;
371	}
372
373	return lower;
374}
375
376
377Model::ThreadWaitObjectGroup*
378Model::Thread::ThreadWaitObjectGroupFor(uint32 type, addr_t object) const
379{
380	type_and_object key;
381	key.type = type;
382	key.object = object;
383
384	return fWaitObjectGroups.BinarySearchByKey(key,
385		&ThreadWaitObjectGroup::CompareWithTypeObject);
386}
387
388
389void
390Model::Thread::AddRun(nanotime_t runTime)
391{
392	fRuns++;
393	fTotalRunTime += runTime;
394
395	if (fMinRunTime < 0 || runTime < fMinRunTime)
396		fMinRunTime = runTime;
397	if (runTime > fMaxRunTime)
398		fMaxRunTime = runTime;
399}
400
401
402void
403Model::Thread::AddRerun(nanotime_t runTime)
404{
405	fReruns++;
406	fTotalRerunTime += runTime;
407
408	if (fMinRerunTime < 0 || runTime < fMinRerunTime)
409		fMinRerunTime = runTime;
410	if (runTime > fMaxRerunTime)
411		fMaxRerunTime = runTime;
412}
413
414
415void
416Model::Thread::AddLatency(nanotime_t latency)
417{
418	fLatencies++;
419	fTotalLatency += latency;
420
421	if (fMinLatency < 0 || latency < fMinLatency)
422		fMinLatency = latency;
423	if (latency > fMaxLatency)
424		fMaxLatency = latency;
425}
426
427
428void
429Model::Thread::AddPreemption(nanotime_t runTime)
430{
431	fPreemptions++;
432}
433
434
435void
436Model::Thread::AddWait(nanotime_t waitTime)
437{
438	fWaits++;
439	fTotalWaitTime += waitTime;
440}
441
442
443void
444Model::Thread::AddUnspecifiedWait(nanotime_t waitTime)
445{
446	fUnspecifiedWaitTime += waitTime;
447}
448
449
450Model::ThreadWaitObject*
451Model::Thread::AddThreadWaitObject(WaitObject* waitObject,
452	ThreadWaitObjectGroup** _threadWaitObjectGroup)
453{
454	// create a thread wait object
455	ThreadWaitObject* threadWaitObject
456		= new(std::nothrow) ThreadWaitObject(waitObject);
457	if (threadWaitObject == NULL)
458		return NULL;
459
460	// find the thread wait object group
461	ThreadWaitObjectGroup* threadWaitObjectGroup
462		= ThreadWaitObjectGroupFor(waitObject->Type(), waitObject->Object());
463	if (threadWaitObjectGroup == NULL) {
464		// doesn't exist yet -- create
465		threadWaitObjectGroup = new(std::nothrow) ThreadWaitObjectGroup(
466			threadWaitObject);
467		if (threadWaitObjectGroup == NULL) {
468			delete threadWaitObject;
469			return NULL;
470		}
471
472		// add to the list
473		if (!fWaitObjectGroups.BinaryInsert(threadWaitObjectGroup,
474				&ThreadWaitObjectGroup::CompareByTypeObject)) {
475			delete threadWaitObjectGroup;
476			return NULL;
477		}
478	} else {
479		// exists -- just add the object
480		threadWaitObjectGroup->AddWaitObject(threadWaitObject);
481	}
482
483	if (_threadWaitObjectGroup != NULL)
484		*_threadWaitObjectGroup = threadWaitObjectGroup;
485
486	return threadWaitObject;
487}
488
489
490void
491Model::Thread::SetIOs(int64 count, nanotime_t time)
492{
493	fIOCount = count;
494	fIOTime = time;
495}
496
497
498// #pragma mark - SchedulingState
499
500
501Model::SchedulingState::~SchedulingState()
502{
503	Clear();
504}
505
506
507status_t
508Model::SchedulingState::Init()
509{
510	status_t error = fThreadStates.Init();
511	if (error != B_OK)
512		return error;
513
514	return B_OK;
515}
516
517
518status_t
519Model::SchedulingState::Init(const CompactSchedulingState* state)
520{
521	status_t error = Init();
522	if (error != B_OK)
523		return error;
524
525	if (state == NULL)
526		return B_OK;
527
528	fLastEventTime = state->LastEventTime();
529	for (int32 i = 0; const CompactThreadSchedulingState* compactThreadState
530			= state->ThreadStateAt(i); i++) {
531		ThreadSchedulingState* threadState
532			= new(std::nothrow) ThreadSchedulingState(*compactThreadState);
533		if (threadState == NULL)
534			return B_NO_MEMORY;
535
536		fThreadStates.Insert(threadState);
537	}
538
539	return B_OK;
540}
541
542
543void
544Model::SchedulingState::Clear()
545{
546	ThreadSchedulingState* state = fThreadStates.Clear(true);
547	while (state != NULL) {
548		ThreadSchedulingState* next = state->next;
549		DeleteThread(state);
550		state = next;
551	}
552
553	fLastEventTime = -1;
554}
555
556void
557Model::SchedulingState::DeleteThread(ThreadSchedulingState* thread)
558{
559	delete thread;
560}
561
562
563// #pragma mark - CompactSchedulingState
564
565
566/*static*/ Model::CompactSchedulingState*
567Model::CompactSchedulingState::Create(const SchedulingState& state,
568	off_t eventOffset)
569{
570	nanotime_t lastEventTime = state.LastEventTime();
571
572	// count the active threads
573	int32 threadCount = 0;
574	for (ThreadSchedulingStateTable::Iterator it
575				= state.ThreadStates().GetIterator();
576			ThreadSchedulingState* threadState = it.Next();) {
577		Thread* thread = threadState->thread;
578		if (thread->CreationTime() <= lastEventTime
579			&& (thread->DeletionTime() == -1
580				|| thread->DeletionTime() >= lastEventTime)) {
581			threadCount++;
582		}
583	}
584
585	CompactSchedulingState* compactState = (CompactSchedulingState*)malloc(
586		sizeof(CompactSchedulingState)
587			+ threadCount * sizeof(CompactThreadSchedulingState));
588	if (compactState == NULL)
589		return NULL;
590
591	// copy the state info
592	compactState->fEventOffset = eventOffset;
593	compactState->fThreadCount = threadCount;
594	compactState->fLastEventTime = lastEventTime;
595
596	int32 threadIndex = 0;
597	for (ThreadSchedulingStateTable::Iterator it
598				= state.ThreadStates().GetIterator();
599			ThreadSchedulingState* threadState = it.Next();) {
600		Thread* thread = threadState->thread;
601		if (thread->CreationTime() <= lastEventTime
602			&& (thread->DeletionTime() == -1
603				|| thread->DeletionTime() >= lastEventTime)) {
604			compactState->fThreadStates[threadIndex++] = *threadState;
605		}
606	}
607
608	return compactState;
609}
610
611
612void
613Model::CompactSchedulingState::Delete()
614{
615	free(this);
616}
617
618
619// #pragma mark - Model
620
621
622Model::Model(const char* dataSourceName, void* eventData, size_t eventDataSize,
623	system_profiler_event_header** events, size_t eventCount)
624	:
625	fDataSourceName(dataSourceName),
626	fEventData(eventData),
627	fEvents(events),
628	fEventDataSize(eventDataSize),
629	fEventCount(eventCount),
630	fCPUCount(1),
631	fBaseTime(0),
632	fLastEventTime(0),
633	fIdleTime(0),
634	fCPUs(20, true),
635	fTeams(20, true),
636	fThreads(20, true),
637	fWaitObjectGroups(20, true),
638	fIOSchedulers(10, true),
639	fSchedulingStates(100)
640{
641}
642
643
644Model::~Model()
645{
646	for (int32 i = 0; CompactSchedulingState* state
647		= fSchedulingStates.ItemAt(i); i++) {
648		state->Delete();
649	}
650
651	delete[] fEvents;
652
653	free(fEventData);
654
655	for (int32 i = 0; void* data = fAssociatedData.ItemAt(i); i++)
656		free(data);
657}
658
659
660size_t
661Model::ClosestEventIndex(nanotime_t eventTime) const
662{
663	// The events themselves are unmodified and use an absolute time.
664	eventTime += fBaseTime;
665
666	// Binary search the event. Since not all events have a timestamp, we have
667	// to do a bit of iteration, too.
668	size_t lower = 0;
669	size_t upper = CountEvents();
670	while (lower < upper) {
671		size_t mid = (lower + upper) / 2;
672		while (mid < upper) {
673			system_profiler_event_header* header = fEvents[mid];
674			switch (header->event) {
675				case B_SYSTEM_PROFILER_THREAD_SCHEDULED:
676				case B_SYSTEM_PROFILER_THREAD_ENQUEUED_IN_RUN_QUEUE:
677				case B_SYSTEM_PROFILER_THREAD_REMOVED_FROM_RUN_QUEUE:
678					break;
679				default:
680					mid++;
681					continue;
682			}
683
684			break;
685		}
686
687		if (mid == upper) {
688			lower = mid;
689			break;
690		}
691
692		system_profiler_thread_scheduling_event* event
693			= (system_profiler_thread_scheduling_event*)(fEvents[mid] + 1);
694		if (event->time < eventTime)
695			lower = mid + 1;
696		else
697			upper = mid;
698	}
699
700	return lower;
701}
702
703
704bool
705Model::AddAssociatedData(void* data)
706{
707	return fAssociatedData.AddItem(data);
708}
709
710
711void
712Model::RemoveAssociatedData(void* data)
713{
714	fAssociatedData.RemoveItem(data);
715}
716
717
718void
719Model::LoadingFinished()
720{
721	// set the thread indices
722	for (int32 i = 0; Thread* thread = fThreads.ItemAt(i); i++)
723		thread->SetIndex(i);
724
725	// compute the total idle time
726	fIdleTime = 0;
727	for (int32 i = 0; CPU* cpu = CPUAt(i); i++)
728		fIdleTime += cpu->IdleTime();
729}
730
731
732void
733Model::SetBaseTime(nanotime_t time)
734{
735	fBaseTime = time;
736}
737
738
739void
740Model::SetLastEventTime(nanotime_t time)
741{
742	fLastEventTime = time;
743}
744
745
746bool
747Model::SetCPUCount(int32 count)
748{
749	fCPUCount = count;
750
751	fCPUs.MakeEmpty();
752
753	for (int32 i = 0; i < fCPUCount; i++) {
754		CPU* cpu = new(std::nothrow) CPU;
755		if (cpu == NULL || !fCPUs.AddItem(cpu)) {
756			delete cpu;
757			return false;
758		}
759	}
760
761	return true;
762}
763
764
765int32
766Model::CountTeams() const
767{
768		return fTeams.CountItems();
769}
770
771
772Model::Team*
773Model::TeamAt(int32 index) const
774{
775	return fTeams.ItemAt(index);
776}
777
778
779Model::Team*
780Model::TeamByID(team_id id) const
781{
782	return fTeams.BinarySearchByKey(id, &Team::CompareWithID);
783}
784
785
786Model::Team*
787Model::AddTeam(const system_profiler_team_added* event, nanotime_t time)
788{
789	Team* team = TeamByID(event->team);
790	if (team != NULL) {
791		fprintf(stderr, "Duplicate team: %" B_PRId32 "\n", event->team);
792		// TODO: User feedback!
793		return team;
794	}
795
796	team = new(std::nothrow) Team(event, time);
797	if (team == NULL)
798		return NULL;
799
800	if (!fTeams.BinaryInsert(team, &Team::CompareByID)) {
801		delete team;
802		return NULL;
803	}
804
805	return team;
806}
807
808
809int32
810Model::CountThreads() const
811{
812	return fThreads.CountItems();
813}
814
815
816Model::Thread*
817Model::ThreadAt(int32 index) const
818{
819	return fThreads.ItemAt(index);
820}
821
822
823Model::Thread*
824Model::ThreadByID(thread_id id) const
825{
826	return fThreads.BinarySearchByKey(id, &Thread::CompareWithID);
827}
828
829
830Model::Thread*
831Model::AddThread(const system_profiler_thread_added* event, nanotime_t time)
832{
833	// check whether we do already know the thread
834	Thread* thread = ThreadByID(event->thread);
835	if (thread != NULL) {
836		fprintf(stderr, "Duplicate thread: %" B_PRId32 "\n", event->thread);
837		// TODO: User feedback!
838		return thread;
839	}
840
841	// get its team
842	Team* team = TeamByID(event->team);
843	if (team == NULL) {
844		fprintf(stderr, "No team for thread: %" B_PRId32 "\n", event->thread);
845		return NULL;
846	}
847
848	// create the thread and add it
849	thread = new(std::nothrow) Thread(team, event, time);
850	if (thread == NULL)
851		return NULL;
852	ObjectDeleter<Thread> threadDeleter(thread);
853
854	if (!fThreads.BinaryInsert(thread, &Thread::CompareByID))
855		return NULL;
856
857	if (!team->AddThread(thread)) {
858		fThreads.RemoveItem(thread);
859		return NULL;
860	}
861
862	threadDeleter.Detach();
863	return thread;
864}
865
866
867Model::WaitObject*
868Model::AddWaitObject(const system_profiler_wait_object_info* event,
869	WaitObjectGroup** _waitObjectGroup)
870{
871	// create a wait object
872	WaitObject* waitObject = new(std::nothrow) WaitObject(event);
873	if (waitObject == NULL)
874		return NULL;
875
876	// find the wait object group
877	WaitObjectGroup* waitObjectGroup
878		= WaitObjectGroupFor(waitObject->Type(), waitObject->Object());
879	if (waitObjectGroup == NULL) {
880		// doesn't exist yet -- create
881		waitObjectGroup = new(std::nothrow) WaitObjectGroup(waitObject);
882		if (waitObjectGroup == NULL) {
883			delete waitObject;
884			return NULL;
885		}
886
887		// add to the list
888		if (!fWaitObjectGroups.BinaryInsert(waitObjectGroup,
889				&WaitObjectGroup::CompareByTypeObject)) {
890			delete waitObjectGroup;
891			return NULL;
892		}
893	} else {
894		// exists -- just add the object
895		waitObjectGroup->AddWaitObject(waitObject);
896	}
897
898	if (_waitObjectGroup != NULL)
899		*_waitObjectGroup = waitObjectGroup;
900
901	return waitObject;
902}
903
904
905int32
906Model::CountWaitObjectGroups() const
907{
908	return fWaitObjectGroups.CountItems();
909}
910
911
912Model::WaitObjectGroup*
913Model::WaitObjectGroupAt(int32 index) const
914{
915	return fWaitObjectGroups.ItemAt(index);
916}
917
918
919Model::WaitObjectGroup*
920Model::WaitObjectGroupFor(uint32 type, addr_t object) const
921{
922	type_and_object key;
923	key.type = type;
924	key.object = object;
925
926	return fWaitObjectGroups.BinarySearchByKey(key,
927		&WaitObjectGroup::CompareWithTypeObject);
928}
929
930
931Model::ThreadWaitObject*
932Model::AddThreadWaitObject(thread_id threadID, WaitObject* waitObject,
933	ThreadWaitObjectGroup** _threadWaitObjectGroup)
934{
935	Thread* thread = ThreadByID(threadID);
936	if (thread == NULL)
937		return NULL;
938
939	return thread->AddThreadWaitObject(waitObject, _threadWaitObjectGroup);
940}
941
942
943Model::ThreadWaitObjectGroup*
944Model::ThreadWaitObjectGroupFor(thread_id threadID, uint32 type, addr_t object) const
945{
946	Thread* thread = ThreadByID(threadID);
947	if (thread == NULL)
948		return NULL;
949
950	return thread->ThreadWaitObjectGroupFor(type, object);
951}
952
953
954int32
955Model::CountIOSchedulers() const
956{
957	return fIOSchedulers.CountItems();
958}
959
960
961Model::IOScheduler*
962Model::IOSchedulerAt(int32 index) const
963{
964	return fIOSchedulers.ItemAt(index);
965}
966
967
968Model::IOScheduler*
969Model::IOSchedulerByID(int32 id) const
970{
971	for (int32 i = 0; IOScheduler* scheduler = fIOSchedulers.ItemAt(i); i++) {
972		if (scheduler->ID() == id)
973			return scheduler;
974	}
975
976	return NULL;
977}
978
979
980Model::IOScheduler*
981Model::AddIOScheduler(system_profiler_io_scheduler_added* event)
982{
983	IOScheduler* scheduler = new(std::nothrow) IOScheduler(event,
984		fIOSchedulers.CountItems());
985	if (scheduler == NULL || !fIOSchedulers.AddItem(scheduler)) {
986		delete scheduler;
987		return NULL;
988	}
989
990	return scheduler;
991}
992
993
994bool
995Model::AddSchedulingStateSnapshot(const SchedulingState& state,
996	off_t eventOffset)
997{
998	CompactSchedulingState* compactState = CompactSchedulingState::Create(state,
999		eventOffset);
1000	if (compactState == NULL)
1001		return false;
1002
1003	if (!fSchedulingStates.AddItem(compactState)) {
1004		compactState->Delete();
1005		return false;
1006	}
1007
1008	return true;
1009}
1010
1011
1012const Model::CompactSchedulingState*
1013Model::ClosestSchedulingState(nanotime_t eventTime) const
1014{
1015	int32 index = fSchedulingStates.BinarySearchIndexByKey(eventTime,
1016		&_CompareEventTimeSchedulingState);
1017	if (index >= 0)
1018		return fSchedulingStates.ItemAt(index);
1019
1020	// no exact match
1021	index = -index - 1;
1022	return index > 0 ? fSchedulingStates.ItemAt(index - 1) : NULL;
1023}
1024
1025
1026/*static*/ int
1027Model::_CompareEventTimeSchedulingState(const nanotime_t* time,
1028	const CompactSchedulingState* state)
1029{
1030	if (*time < state->LastEventTime())
1031		return -1;
1032	return *time == state->LastEventTime() ? 0 : 1;
1033}
1034
1035