1/*
2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2008-2009, Axel Dörfler, axeld@pinc-software.de.
4 * Copyright 2012, Rene Gollent, rene@gollent.com.
5 * Distributed under the terms of the MIT License.
6 */
7
8
9#include <tracing.h>
10
11#include <stdlib.h>
12
13#include <algorithm>
14
15#include <arch/debug.h>
16#include <debug.h>
17#include <debug_heap.h>
18#include <elf.h>
19#include <int.h>
20#include <kernel.h>
21#include <team.h>
22#include <thread.h>
23#include <util/AutoLock.h>
24#include <vm/vm.h>
25
26
27#if ENABLE_TRACING
28
29//#define TRACE_TRACING
30#ifdef TRACE_TRACING
31#	define TRACE(x) dprintf_no_syslog x
32#else
33#	define TRACE(x) ;
34#endif
35
36
37enum {
38	WRAP_ENTRY			= 0x01,
39	ENTRY_INITIALIZED	= 0x02,
40	BUFFER_ENTRY		= 0x04,
41	FILTER_MATCH		= 0x08,
42	INVALID_ENTRY		= 0x10,
43	CHECK_ENTRY			= 0x20,
44};
45
46
47static const size_t kTraceOutputBufferSize = 10240;
48static const size_t kBufferSize = MAX_TRACE_SIZE / sizeof(trace_entry);
49
50static const uint32 kMaxRecoveringErrorCount	= 100;
51static const addr_t kMetaDataBaseAddress		= 32 * 1024 * 1024;
52static const addr_t kMetaDataBaseEndAddress		= 128 * 1024 * 1024;
53static const addr_t kMetaDataAddressIncrement	= 8 * 1024 * 1024;
54static const uint32 kMetaDataMagic1 = 'Vali';
55static const uint32 kMetaDataMagic2 = 'dTra';
56static const uint32 kMetaDataMagic3 = 'cing';
57
58// the maximum we can address with the trace_entry::[previous_]size fields
59static const size_t kMaxTracingEntryByteSize
60	= ((1 << 13) - 1) * sizeof(trace_entry);
61
62
63struct TraceOutputPrint {
64	TraceOutputPrint(TraceOutput& output)
65		:
66		fOutput(output)
67	{
68	}
69
70	void operator()(const char* format,...) const
71	{
72		va_list args;
73		va_start(args, format);
74		fOutput.PrintArgs(format, args);
75		va_end(args);
76	}
77
78private:
79	TraceOutput&	fOutput;
80};
81
82
83class TracingMetaData {
84public:
85	static	status_t			Create(TracingMetaData*& _metaData);
86
87	inline	bool				Lock();
88	inline	void				Unlock();
89
90	inline	trace_entry*		FirstEntry() const;
91	inline	trace_entry*		AfterLastEntry() const;
92
93	inline	uint32				Entries() const;
94	inline	uint32				EntriesEver() const;
95
96	inline	void				IncrementEntriesEver();
97
98	inline	char*				TraceOutputBuffer() const;
99
100			trace_entry*		NextEntry(trace_entry* entry);
101			trace_entry*		PreviousEntry(trace_entry* entry);
102
103			trace_entry*		AllocateEntry(size_t size, uint16 flags);
104
105			bool				IsInBuffer(void* address, size_t size);
106
107private:
108			bool				_FreeFirstEntry();
109			bool				_MakeSpace(size_t needed);
110
111	static	status_t			_CreateMetaDataArea(bool findPrevious,
112									area_id& _area,
113									TracingMetaData*& _metaData);
114			bool				_InitPreviousTracingData();
115
116private:
117			uint32				fMagic1;
118			trace_entry*		fBuffer;
119			trace_entry*		fFirstEntry;
120			trace_entry*		fAfterLastEntry;
121			uint32				fEntries;
122			uint32				fMagic2;
123			uint32				fEntriesEver;
124			spinlock			fLock;
125			char*				fTraceOutputBuffer;
126			phys_addr_t			fPhysicalAddress;
127			uint32				fMagic3;
128};
129
130static TracingMetaData sDummyTracingMetaData;
131static TracingMetaData* sTracingMetaData = &sDummyTracingMetaData;
132static bool sTracingDataRecovered = false;
133
134
135// #pragma mark -
136
137
138template<typename Print>
139static void
140print_stack_trace(struct tracing_stack_trace* stackTrace,
141	const Print& print)
142{
143	if (stackTrace == NULL || stackTrace->depth <= 0)
144		return;
145
146	static const size_t kBufferSize = 256;
147	char* buffer = (char*)debug_malloc(kBufferSize);
148
149	for (int32 i = 0; i < stackTrace->depth; i++) {
150		addr_t address = stackTrace->return_addresses[i];
151
152		const char* symbol;
153		const char* demangledName = NULL;
154		const char* imageName;
155		bool exactMatch;
156		addr_t baseAddress;
157
158		if (elf_debug_lookup_symbol_address(address, &baseAddress, &symbol,
159				&imageName, &exactMatch) == B_OK) {
160
161			if (buffer != NULL) {
162				bool isObjectMethod;
163				demangledName = debug_demangle_symbol(symbol, buffer,
164					kBufferSize, &isObjectMethod);
165			}
166
167			print("  %p  %s + 0x%lx (%s)%s\n", (void*)address,
168				demangledName != NULL ? demangledName : symbol,
169				address - baseAddress, imageName,
170				exactMatch ? "" : " (nearest)");
171		} else
172			print("  %p\n", (void*)address);
173	}
174
175	if (buffer != NULL)
176		debug_free(buffer);
177}
178
179
180// #pragma mark - TracingMetaData
181
182
183bool
184TracingMetaData::Lock()
185{
186	acquire_spinlock(&fLock);
187	return true;
188}
189
190
191void
192TracingMetaData::Unlock()
193{
194	release_spinlock(&fLock);
195}
196
197
198trace_entry*
199TracingMetaData::FirstEntry() const
200{
201	return fFirstEntry;
202}
203
204
205trace_entry*
206TracingMetaData::AfterLastEntry() const
207{
208	return fAfterLastEntry;
209}
210
211
212uint32
213TracingMetaData::Entries() const
214{
215	return fEntries;
216}
217
218
219uint32
220TracingMetaData::EntriesEver() const
221{
222	return fEntriesEver;
223}
224
225
226void
227TracingMetaData::IncrementEntriesEver()
228{
229	fEntriesEver++;
230		// NOTE: Race condition on SMP machines! We should use atomic_add(),
231		// though that costs some performance and the information is for
232		// informational purpose anyway.
233}
234
235
236char*
237TracingMetaData::TraceOutputBuffer() const
238{
239	return fTraceOutputBuffer;
240}
241
242
243trace_entry*
244TracingMetaData::NextEntry(trace_entry* entry)
245{
246	entry += entry->size;
247	if ((entry->flags & WRAP_ENTRY) != 0)
248		entry = fBuffer;
249
250	if (entry == fAfterLastEntry)
251		return NULL;
252
253	return entry;
254}
255
256
257trace_entry*
258TracingMetaData::PreviousEntry(trace_entry* entry)
259{
260	if (entry == fFirstEntry)
261		return NULL;
262
263	if (entry == fBuffer) {
264		// beginning of buffer -- previous entry is a wrap entry
265		entry = fBuffer + kBufferSize - entry->previous_size;
266	}
267
268	return entry - entry->previous_size;
269}
270
271
272trace_entry*
273TracingMetaData::AllocateEntry(size_t size, uint16 flags)
274{
275	if (fAfterLastEntry == NULL || size == 0
276		|| size >= kMaxTracingEntryByteSize) {
277		return NULL;
278	}
279
280	InterruptsSpinLocker _(fLock);
281
282	size = (size + 3) >> 2;
283		// 4 byte aligned, don't store the lower 2 bits
284
285	TRACE(("AllocateEntry(%lu), start %p, end %p, buffer %p\n", size * 4,
286		fFirstEntry, fAfterLastEntry, fBuffer));
287
288	if (!_MakeSpace(size))
289		return NULL;
290
291	trace_entry* entry = fAfterLastEntry;
292	entry->size = size;
293	entry->flags = flags;
294	fAfterLastEntry += size;
295	fAfterLastEntry->previous_size = size;
296
297	if (!(flags & BUFFER_ENTRY))
298		fEntries++;
299
300	TRACE(("  entry: %p, end %p, start %p, entries %ld\n", entry,
301		fAfterLastEntry, fFirstEntry, fEntries));
302
303	return entry;
304}
305
306
307bool
308TracingMetaData::IsInBuffer(void* address, size_t size)
309{
310	if (fEntries == 0)
311		return false;
312
313	addr_t start = (addr_t)address;
314	addr_t end = start + size;
315
316	if (start < (addr_t)fBuffer || end > (addr_t)(fBuffer + kBufferSize))
317		return false;
318
319	if (fFirstEntry > fAfterLastEntry)
320		return start >= (addr_t)fFirstEntry || end <= (addr_t)fAfterLastEntry;
321
322	return start >= (addr_t)fFirstEntry && end <= (addr_t)fAfterLastEntry;
323}
324
325
326bool
327TracingMetaData::_FreeFirstEntry()
328{
329	TRACE(("  skip start %p, %lu*4 bytes\n", fFirstEntry, fFirstEntry->size));
330
331	trace_entry* newFirst = NextEntry(fFirstEntry);
332
333	if (fFirstEntry->flags & BUFFER_ENTRY) {
334		// a buffer entry -- just skip it
335	} else if (fFirstEntry->flags & ENTRY_INITIALIZED) {
336		// Fully initialized TraceEntry: We could destroy it, but don't do so
337		// for sake of robustness. The destructors of tracing entry classes
338		// should be empty anyway.
339		fEntries--;
340	} else {
341		// Not fully initialized TraceEntry. We can't free it, since
342		// then it's constructor might still write into the memory and
343		// overwrite data of the entry we're going to allocate.
344		// We can't do anything until this entry can be discarded.
345		return false;
346	}
347
348	if (newFirst == NULL) {
349		// everything is freed -- practically this can't happen, if
350		// the buffer is large enough to hold three max-sized entries
351		fFirstEntry = fAfterLastEntry = fBuffer;
352		TRACE(("_FreeFirstEntry(): all entries freed!\n"));
353	} else
354		fFirstEntry = newFirst;
355
356	return true;
357}
358
359
360/*!	Makes sure we have needed * 4 bytes of memory at fAfterLastEntry.
361	Returns \c false, if unable to free that much.
362*/
363bool
364TracingMetaData::_MakeSpace(size_t needed)
365{
366	// we need space for fAfterLastEntry, too (in case we need to wrap around
367	// later)
368	needed++;
369
370	// If there's not enough space (free or occupied) after fAfterLastEntry,
371	// we free all entries in that region and wrap around.
372	if (fAfterLastEntry + needed > fBuffer + kBufferSize) {
373		TRACE(("_MakeSpace(%lu), wrapping around: after last: %p\n", needed,
374			fAfterLastEntry));
375
376		// Free all entries after fAfterLastEntry and one more at the beginning
377		// of the buffer.
378		while (fFirstEntry > fAfterLastEntry) {
379			if (!_FreeFirstEntry())
380				return false;
381		}
382		if (fAfterLastEntry != fBuffer && !_FreeFirstEntry())
383			return false;
384
385		// just in case _FreeFirstEntry() freed the very last existing entry
386		if (fAfterLastEntry == fBuffer)
387			return true;
388
389		// mark as wrap entry and actually wrap around
390		trace_entry* wrapEntry = fAfterLastEntry;
391		wrapEntry->size = 0;
392		wrapEntry->flags = WRAP_ENTRY;
393		fAfterLastEntry = fBuffer;
394		fAfterLastEntry->previous_size = fBuffer + kBufferSize - wrapEntry;
395	}
396
397	if (fFirstEntry <= fAfterLastEntry) {
398		// buffer is empty or the space after fAfterLastEntry is unoccupied
399		return true;
400	}
401
402	// free the first entries, until there's enough space
403	size_t space = fFirstEntry - fAfterLastEntry;
404
405	if (space < needed) {
406		TRACE(("_MakeSpace(%lu), left %ld\n", needed, space));
407	}
408
409	while (space < needed) {
410		space += fFirstEntry->size;
411
412		if (!_FreeFirstEntry())
413			return false;
414	}
415
416	TRACE(("  out: start %p, entries %ld\n", fFirstEntry, fEntries));
417
418	return true;
419}
420
421
422/*static*/ status_t
423TracingMetaData::Create(TracingMetaData*& _metaData)
424{
425	// search meta data in memory (from previous session)
426	area_id area;
427	TracingMetaData* metaData;
428	status_t error = _CreateMetaDataArea(true, area, metaData);
429	if (error == B_OK) {
430		if (metaData->_InitPreviousTracingData()) {
431			_metaData = metaData;
432			return B_OK;
433		}
434
435		dprintf("Found previous tracing meta data, but failed to init.\n");
436
437		// invalidate the meta data
438		metaData->fMagic1 = 0;
439		metaData->fMagic2 = 0;
440		metaData->fMagic3 = 0;
441		delete_area(area);
442	} else
443		dprintf("No previous tracing meta data found.\n");
444
445	// no previous tracing data found -- create new one
446	error = _CreateMetaDataArea(false, area, metaData);
447	if (error != B_OK)
448		return error;
449
450	virtual_address_restrictions virtualRestrictions = {};
451	virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
452	physical_address_restrictions physicalRestrictions = {};
453	area = create_area_etc(B_SYSTEM_TEAM, "tracing log",
454		kTraceOutputBufferSize + MAX_TRACE_SIZE, B_CONTIGUOUS,
455		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT, 0,
456		&virtualRestrictions, &physicalRestrictions,
457		(void**)&metaData->fTraceOutputBuffer);
458	if (area < 0)
459		return area;
460
461	// get the physical address
462	physical_entry physicalEntry;
463	if (get_memory_map(metaData->fTraceOutputBuffer, B_PAGE_SIZE,
464			&physicalEntry, 1) == B_OK) {
465		metaData->fPhysicalAddress = physicalEntry.address;
466	} else {
467		dprintf("TracingMetaData::Create(): failed to get physical address "
468			"of tracing buffer\n");
469		metaData->fPhysicalAddress = 0;
470	}
471
472	metaData->fBuffer = (trace_entry*)(metaData->fTraceOutputBuffer
473		+ kTraceOutputBufferSize);
474	metaData->fFirstEntry = metaData->fBuffer;
475	metaData->fAfterLastEntry = metaData->fBuffer;
476
477	metaData->fEntries = 0;
478	metaData->fEntriesEver = 0;
479	B_INITIALIZE_SPINLOCK(&metaData->fLock);
480
481	metaData->fMagic1 = kMetaDataMagic1;
482	metaData->fMagic2 = kMetaDataMagic2;
483	metaData->fMagic3 = kMetaDataMagic3;
484
485	_metaData = metaData;
486	return B_OK;
487}
488
489
490/*static*/ status_t
491TracingMetaData::_CreateMetaDataArea(bool findPrevious, area_id& _area,
492	TracingMetaData*& _metaData)
493{
494	// search meta data in memory (from previous session)
495	TracingMetaData* metaData;
496	phys_addr_t metaDataAddress = kMetaDataBaseAddress;
497	for (; metaDataAddress <= kMetaDataBaseEndAddress;
498			metaDataAddress += kMetaDataAddressIncrement) {
499		virtual_address_restrictions virtualRestrictions = {};
500		virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
501		physical_address_restrictions physicalRestrictions = {};
502		physicalRestrictions.low_address = metaDataAddress;
503		physicalRestrictions.high_address = metaDataAddress + B_PAGE_SIZE;
504		area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing metadata",
505			B_PAGE_SIZE, B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
506			CREATE_AREA_DONT_CLEAR, 0, &virtualRestrictions,
507			&physicalRestrictions, (void**)&metaData);
508		if (area < 0)
509			continue;
510
511		if (!findPrevious) {
512			_area = area;
513			_metaData = metaData;
514			return B_OK;
515		}
516
517		if (metaData->fMagic1 == kMetaDataMagic1
518			&& metaData->fMagic2 == kMetaDataMagic2
519			&& metaData->fMagic3 == kMetaDataMagic3) {
520			_area = area;
521			_metaData = metaData;
522			return B_OK;
523		}
524
525		delete_area(area);
526	}
527
528	return B_ENTRY_NOT_FOUND;
529}
530
531
532bool
533TracingMetaData::_InitPreviousTracingData()
534{
535	// TODO: ATM re-attaching the previous tracing buffer doesn't work very
536	// well. The entries should be checked more thoroughly for validity -- e.g.
537	// the pointers to the entries' vtable pointers could be invalid, which can
538	// make the "traced" command quite unusable. The validity of the entries
539	// could be checked in a safe environment (i.e. with a fault handler) with
540	// typeid() and call of a virtual function.
541	return false;
542
543	addr_t bufferStart
544		= (addr_t)fTraceOutputBuffer + kTraceOutputBufferSize;
545	addr_t bufferEnd = bufferStart + MAX_TRACE_SIZE;
546
547	if (bufferStart > bufferEnd || (addr_t)fBuffer != bufferStart
548		|| (addr_t)fFirstEntry % sizeof(trace_entry) != 0
549		|| (addr_t)fFirstEntry < bufferStart
550		|| (addr_t)fFirstEntry + sizeof(trace_entry) >= bufferEnd
551		|| (addr_t)fAfterLastEntry % sizeof(trace_entry) != 0
552		|| (addr_t)fAfterLastEntry < bufferStart
553		|| (addr_t)fAfterLastEntry > bufferEnd
554		|| fPhysicalAddress == 0) {
555		dprintf("Failed to init tracing meta data: Sanity checks "
556			"failed.\n");
557		return false;
558	}
559
560	// re-map the previous tracing buffer
561	virtual_address_restrictions virtualRestrictions = {};
562	virtualRestrictions.address = fTraceOutputBuffer;
563	virtualRestrictions.address_specification = B_EXACT_ADDRESS;
564	physical_address_restrictions physicalRestrictions = {};
565	physicalRestrictions.low_address = fPhysicalAddress;
566	physicalRestrictions.high_address = fPhysicalAddress
567		+ ROUNDUP(kTraceOutputBufferSize + MAX_TRACE_SIZE, B_PAGE_SIZE);
568	area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing log",
569		kTraceOutputBufferSize + MAX_TRACE_SIZE, B_CONTIGUOUS,
570		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_CLEAR, 0,
571		&virtualRestrictions, &physicalRestrictions, NULL);
572	if (area < 0) {
573		dprintf("Failed to init tracing meta data: Mapping tracing log "
574			"buffer failed: %s\n", strerror(area));
575		return false;
576	}
577
578	dprintf("ktrace: Remapped tracing buffer at %p, size: %" B_PRIuSIZE "\n",
579		fTraceOutputBuffer, kTraceOutputBufferSize + MAX_TRACE_SIZE);
580
581	// verify/repair the tracing entry list
582	uint32 errorCount = 0;
583	uint32 entryCount = 0;
584	uint32 nonBufferEntryCount = 0;
585	uint32 previousEntrySize = 0;
586	trace_entry* entry = fFirstEntry;
587	while (errorCount <= kMaxRecoveringErrorCount) {
588		// check previous entry size
589		if (entry->previous_size != previousEntrySize) {
590			if (entry != fFirstEntry) {
591				dprintf("ktrace recovering: entry %p: fixing previous_size "
592					"size: %lu (should be %lu)\n", entry, entry->previous_size,
593					previousEntrySize);
594				errorCount++;
595			}
596			entry->previous_size = previousEntrySize;
597		}
598
599		if (entry == fAfterLastEntry)
600			break;
601
602		// check size field
603		if ((entry->flags & WRAP_ENTRY) == 0 && entry->size == 0) {
604			dprintf("ktrace recovering: entry %p: non-wrap entry size is 0\n",
605				entry);
606			errorCount++;
607			fAfterLastEntry = entry;
608			break;
609		}
610
611		if (entry->size > uint32(fBuffer + kBufferSize - entry)) {
612			dprintf("ktrace recovering: entry %p: size too big: %lu\n", entry,
613				entry->size);
614			errorCount++;
615			fAfterLastEntry = entry;
616			break;
617		}
618
619		if (entry < fAfterLastEntry && entry + entry->size > fAfterLastEntry) {
620			dprintf("ktrace recovering: entry %p: entry crosses "
621				"fAfterLastEntry (%p)\n", entry, fAfterLastEntry);
622			errorCount++;
623			fAfterLastEntry = entry;
624			break;
625		}
626
627		// check for wrap entry
628		if ((entry->flags & WRAP_ENTRY) != 0) {
629			if ((uint32)(fBuffer + kBufferSize - entry)
630					> kMaxTracingEntryByteSize / sizeof(trace_entry)) {
631				dprintf("ktrace recovering: entry %p: wrap entry at invalid "
632					"buffer location\n", entry);
633				errorCount++;
634			}
635
636			if (entry->size != 0) {
637				dprintf("ktrace recovering: entry %p: invalid wrap entry "
638					"size: %lu\n", entry, entry->size);
639				errorCount++;
640				entry->size = 0;
641			}
642
643			previousEntrySize = fBuffer + kBufferSize - entry;
644			entry = fBuffer;
645			continue;
646		}
647
648		if ((entry->flags & BUFFER_ENTRY) == 0) {
649			entry->flags |= CHECK_ENTRY;
650			nonBufferEntryCount++;
651		}
652
653		entryCount++;
654		previousEntrySize = entry->size;
655
656		entry += entry->size;
657	}
658
659	if (errorCount > kMaxRecoveringErrorCount) {
660		dprintf("ktrace recovering: Too many errors.\n");
661		fAfterLastEntry = entry;
662		fAfterLastEntry->previous_size = previousEntrySize;
663	}
664
665	dprintf("ktrace recovering: Recovered %lu entries + %lu buffer entries "
666		"from previous session. Expected %lu entries.\n", nonBufferEntryCount,
667		entryCount - nonBufferEntryCount, fEntries);
668	fEntries = nonBufferEntryCount;
669
670	B_INITIALIZE_SPINLOCK(&fLock);
671
672	// TODO: Actually check the entries! Do that when first accessing the
673	// tracing buffer from the kernel debugger (when sTracingDataRecovered is
674	// true).
675	sTracingDataRecovered = true;
676	return true;
677}
678
679
680#endif	// ENABLE_TRACING
681
682
683// #pragma mark -
684
685
686TraceOutput::TraceOutput(char* buffer, size_t bufferSize, uint32 flags)
687	: fBuffer(buffer),
688	  fCapacity(bufferSize),
689	  fFlags(flags)
690{
691	Clear();
692}
693
694
695void
696TraceOutput::Clear()
697{
698	if (fCapacity > 0)
699		fBuffer[0] = '\0';
700	fSize = 0;
701}
702
703
704void
705TraceOutput::PrintArgs(const char* format, va_list args)
706{
707#if ENABLE_TRACING
708	if (IsFull())
709		return;
710
711	size_t length = vsnprintf(fBuffer + fSize, fCapacity - fSize, format, args);
712	fSize += std::min(length, fCapacity - fSize - 1);
713#endif
714}
715
716
717void
718TraceOutput::PrintStackTrace(tracing_stack_trace* stackTrace)
719{
720#if ENABLE_TRACING
721	print_stack_trace(stackTrace, TraceOutputPrint(*this));
722	if (stackTrace == NULL || stackTrace->depth <= 0)
723		return;
724
725	for (int32 i = 0; i < stackTrace->depth; i++) {
726		addr_t address = stackTrace->return_addresses[i];
727
728		const char* symbol;
729		const char* imageName;
730		bool exactMatch;
731		addr_t baseAddress;
732
733		if (elf_debug_lookup_symbol_address(address, &baseAddress, &symbol,
734				&imageName, &exactMatch) == B_OK) {
735			Print("  %p  %s + 0x%lx (%s)%s\n", (void*)address, symbol,
736				address - baseAddress, imageName,
737				exactMatch ? "" : " (nearest)");
738		} else
739			Print("  %p\n", (void*)address);
740	}
741#endif
742}
743
744
745void
746TraceOutput::SetLastEntryTime(bigtime_t time)
747{
748	fLastEntryTime = time;
749}
750
751
752bigtime_t
753TraceOutput::LastEntryTime() const
754{
755	return fLastEntryTime;
756}
757
758
759//	#pragma mark -
760
761
762TraceEntry::TraceEntry()
763{
764}
765
766
767TraceEntry::~TraceEntry()
768{
769}
770
771
772void
773TraceEntry::Dump(TraceOutput& out)
774{
775#if ENABLE_TRACING
776	// to be overridden by subclasses
777	out.Print("ENTRY %p", this);
778#endif
779}
780
781
782void
783TraceEntry::DumpStackTrace(TraceOutput& out)
784{
785}
786
787
788void
789TraceEntry::Initialized()
790{
791#if ENABLE_TRACING
792	ToTraceEntry()->flags |= ENTRY_INITIALIZED;
793	sTracingMetaData->IncrementEntriesEver();
794#endif
795}
796
797
798void*
799TraceEntry::operator new(size_t size, const std::nothrow_t&) throw()
800{
801#if ENABLE_TRACING
802	trace_entry* entry = sTracingMetaData->AllocateEntry(
803		size + sizeof(trace_entry), 0);
804	return entry != NULL ? entry + 1 : NULL;
805#endif
806	return NULL;
807}
808
809
810//	#pragma mark -
811
812
813AbstractTraceEntry::~AbstractTraceEntry()
814{
815}
816
817
818void
819AbstractTraceEntry::Dump(TraceOutput& out)
820{
821	bigtime_t time = (out.Flags() & TRACE_OUTPUT_DIFF_TIME)
822		? fTime - out.LastEntryTime()
823		: fTime;
824
825	if (out.Flags() & TRACE_OUTPUT_TEAM_ID) {
826		out.Print("[%6" B_PRId32 ":%6" B_PRId32 "] %10" B_PRId64 ": ", fThread,
827			fTeam, time);
828	} else
829		out.Print("[%6" B_PRId32 "] %10" B_PRId64 ": ", fThread, time);
830
831	AddDump(out);
832
833	out.SetLastEntryTime(fTime);
834}
835
836
837void
838AbstractTraceEntry::AddDump(TraceOutput& out)
839{
840}
841
842
843void
844AbstractTraceEntry::_Init()
845{
846	Thread* thread = thread_get_current_thread();
847	if (thread != NULL) {
848		fThread = thread->id;
849		if (thread->team)
850			fTeam = thread->team->id;
851	}
852	fTime = system_time();
853}
854
855
856//	#pragma mark - AbstractTraceEntryWithStackTrace
857
858
859
860AbstractTraceEntryWithStackTrace::AbstractTraceEntryWithStackTrace(
861	size_t stackTraceDepth, size_t skipFrames, bool kernelOnly)
862{
863	fStackTrace = capture_tracing_stack_trace(stackTraceDepth, skipFrames + 1,
864		kernelOnly);
865}
866
867
868void
869AbstractTraceEntryWithStackTrace::DumpStackTrace(TraceOutput& out)
870{
871	out.PrintStackTrace(fStackTrace);
872}
873
874
875//	#pragma mark -
876
877
878#if ENABLE_TRACING
879
880class KernelTraceEntry : public AbstractTraceEntry {
881	public:
882		KernelTraceEntry(const char* message)
883		{
884			fMessage = alloc_tracing_buffer_strcpy(message, 256, false);
885
886#if KTRACE_PRINTF_STACK_TRACE
887			fStackTrace = capture_tracing_stack_trace(
888				KTRACE_PRINTF_STACK_TRACE, 1, false);
889#endif
890			Initialized();
891		}
892
893		virtual void AddDump(TraceOutput& out)
894		{
895			out.Print("kern: %s", fMessage);
896		}
897
898#if KTRACE_PRINTF_STACK_TRACE
899		virtual void DumpStackTrace(TraceOutput& out)
900		{
901			out.PrintStackTrace(fStackTrace);
902		}
903#endif
904
905	private:
906		char*	fMessage;
907#if KTRACE_PRINTF_STACK_TRACE
908		tracing_stack_trace* fStackTrace;
909#endif
910};
911
912
913class UserTraceEntry : public AbstractTraceEntry {
914	public:
915		UserTraceEntry(const char* message)
916		{
917			fMessage = alloc_tracing_buffer_strcpy(message, 256, true);
918
919#if KTRACE_PRINTF_STACK_TRACE
920			fStackTrace = capture_tracing_stack_trace(
921				KTRACE_PRINTF_STACK_TRACE, 1, false);
922#endif
923			Initialized();
924		}
925
926		virtual void AddDump(TraceOutput& out)
927		{
928			out.Print("user: %s", fMessage);
929		}
930
931#if KTRACE_PRINTF_STACK_TRACE
932		virtual void DumpStackTrace(TraceOutput& out)
933		{
934			out.PrintStackTrace(fStackTrace);
935		}
936#endif
937
938	private:
939		char*	fMessage;
940#if KTRACE_PRINTF_STACK_TRACE
941		tracing_stack_trace* fStackTrace;
942#endif
943};
944
945
946class TracingLogStartEntry : public AbstractTraceEntry {
947	public:
948		TracingLogStartEntry()
949		{
950			Initialized();
951		}
952
953		virtual void AddDump(TraceOutput& out)
954		{
955			out.Print("ktrace start");
956		}
957};
958
959#endif	// ENABLE_TRACING
960
961
962//	#pragma mark - trace filters
963
964
965TraceFilter::~TraceFilter()
966{
967}
968
969
970bool
971TraceFilter::Filter(const TraceEntry* entry, LazyTraceOutput& out)
972{
973	return false;
974}
975
976
977
978class ThreadTraceFilter : public TraceFilter {
979public:
980	virtual bool Filter(const TraceEntry* _entry, LazyTraceOutput& out)
981	{
982		const AbstractTraceEntry* entry
983			= dynamic_cast<const AbstractTraceEntry*>(_entry);
984		return (entry != NULL && entry->ThreadID() == fThread);
985	}
986};
987
988
989class TeamTraceFilter : public TraceFilter {
990public:
991	virtual bool Filter(const TraceEntry* _entry, LazyTraceOutput& out)
992	{
993		const AbstractTraceEntry* entry
994			= dynamic_cast<const AbstractTraceEntry*>(_entry);
995		return (entry != NULL && entry->TeamID() == fTeam);
996	}
997};
998
999
1000class PatternTraceFilter : public TraceFilter {
1001public:
1002	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1003	{
1004		return strstr(out.DumpEntry(entry), fString) != NULL;
1005	}
1006};
1007
1008
1009class DecimalPatternTraceFilter : public TraceFilter {
1010public:
1011	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1012	{
1013		// TODO: this is *very* slow
1014		char buffer[64];
1015		snprintf(buffer, sizeof(buffer), "%" B_PRId64, fValue);
1016		return strstr(out.DumpEntry(entry), buffer) != NULL;
1017	}
1018};
1019
1020class HexPatternTraceFilter : public TraceFilter {
1021public:
1022	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1023	{
1024		// TODO: this is *very* slow
1025		char buffer[64];
1026		snprintf(buffer, sizeof(buffer), "%" B_PRIx64, fValue);
1027		return strstr(out.DumpEntry(entry), buffer) != NULL;
1028	}
1029};
1030
1031class StringPatternTraceFilter : public TraceFilter {
1032public:
1033	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1034	{
1035		if (IS_KERNEL_ADDRESS(fValue))
1036			return strstr(out.DumpEntry(entry), (const char*)fValue) != NULL;
1037
1038		// TODO: this is *very* slow
1039		char buffer[64];
1040		user_strlcpy(buffer, (const char*)fValue, sizeof(buffer));
1041		return strstr(out.DumpEntry(entry), buffer) != NULL;
1042	}
1043};
1044
1045class NotTraceFilter : public TraceFilter {
1046public:
1047	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1048	{
1049		return !fSubFilters.first->Filter(entry, out);
1050	}
1051};
1052
1053
1054class AndTraceFilter : public TraceFilter {
1055public:
1056	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1057	{
1058		return fSubFilters.first->Filter(entry, out)
1059			&& fSubFilters.second->Filter(entry, out);
1060	}
1061};
1062
1063
1064class OrTraceFilter : public TraceFilter {
1065public:
1066	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1067	{
1068		return fSubFilters.first->Filter(entry, out)
1069			|| fSubFilters.second->Filter(entry, out);
1070	}
1071};
1072
1073
1074class TraceFilterParser {
1075public:
1076	static TraceFilterParser* Default()
1077	{
1078		return &sParser;
1079	}
1080
1081	bool Parse(int argc, const char* const* argv)
1082	{
1083		fTokens = argv;
1084		fTokenCount = argc;
1085		fTokenIndex = 0;
1086		fFilterCount = 0;
1087
1088		TraceFilter* filter = _ParseExpression();
1089		return fTokenIndex == fTokenCount && filter != NULL;
1090	}
1091
1092	TraceFilter* Filter()
1093	{
1094		return &fFilters[0];
1095	}
1096
1097private:
1098	TraceFilter* _ParseExpression()
1099	{
1100		const char* token = _NextToken();
1101		if (!token) {
1102			// unexpected end of expression
1103			return NULL;
1104		}
1105
1106		if (fFilterCount == MAX_FILTERS) {
1107			// too many filters
1108			return NULL;
1109		}
1110
1111		if (token[0] == '#') {
1112			TraceFilter* filter = new(&fFilters[fFilterCount++])
1113				PatternTraceFilter;
1114			filter->fString = token + 1;
1115			return filter;
1116		} else if (token[0] == 'd' && token[1] == '#') {
1117			TraceFilter* filter = new(&fFilters[fFilterCount++])
1118				DecimalPatternTraceFilter;
1119			filter->fValue = parse_expression(token + 2);
1120			return filter;
1121		} else if (token[0] == 'x' && token[1] == '#') {
1122			TraceFilter* filter = new(&fFilters[fFilterCount++])
1123				HexPatternTraceFilter;
1124			filter->fValue = parse_expression(token + 2);
1125			return filter;
1126		} else if (token[0] == 's' && token[1] == '#') {
1127			TraceFilter* filter = new(&fFilters[fFilterCount++])
1128				StringPatternTraceFilter;
1129			filter->fValue = parse_expression(token + 2);
1130			return filter;
1131		} else if (strcmp(token, "not") == 0) {
1132			TraceFilter* filter = new(&fFilters[fFilterCount++]) NotTraceFilter;
1133			if ((filter->fSubFilters.first = _ParseExpression()) != NULL)
1134				return filter;
1135			return NULL;
1136		} else if (strcmp(token, "and") == 0) {
1137			TraceFilter* filter = new(&fFilters[fFilterCount++]) AndTraceFilter;
1138			if ((filter->fSubFilters.first = _ParseExpression()) != NULL
1139				&& (filter->fSubFilters.second = _ParseExpression()) != NULL) {
1140				return filter;
1141			}
1142			return NULL;
1143		} else if (strcmp(token, "or") == 0) {
1144			TraceFilter* filter = new(&fFilters[fFilterCount++]) OrTraceFilter;
1145			if ((filter->fSubFilters.first = _ParseExpression()) != NULL
1146				&& (filter->fSubFilters.second = _ParseExpression()) != NULL) {
1147				return filter;
1148			}
1149			return NULL;
1150		} else if (strcmp(token, "thread") == 0) {
1151			const char* arg = _NextToken();
1152			if (arg == NULL) {
1153				// unexpected end of expression
1154				return NULL;
1155			}
1156
1157			TraceFilter* filter = new(&fFilters[fFilterCount++])
1158				ThreadTraceFilter;
1159			filter->fThread = strtol(arg, NULL, 0);
1160			return filter;
1161		} else if (strcmp(token, "team") == 0) {
1162			const char* arg = _NextToken();
1163			if (arg == NULL) {
1164				// unexpected end of expression
1165				return NULL;
1166			}
1167
1168			TraceFilter* filter = new(&fFilters[fFilterCount++])
1169				TeamTraceFilter;
1170			filter->fTeam = strtol(arg, NULL, 0);
1171			return filter;
1172		} else {
1173			// invalid token
1174			return NULL;
1175		}
1176	}
1177
1178	const char* _CurrentToken() const
1179	{
1180		if (fTokenIndex >= 1 && fTokenIndex <= fTokenCount)
1181			return fTokens[fTokenIndex - 1];
1182		return NULL;
1183	}
1184
1185	const char* _NextToken()
1186	{
1187		if (fTokenIndex >= fTokenCount)
1188			return NULL;
1189		return fTokens[fTokenIndex++];
1190	}
1191
1192private:
1193	enum { MAX_FILTERS = 32 };
1194
1195	const char* const*			fTokens;
1196	int							fTokenCount;
1197	int							fTokenIndex;
1198	TraceFilter					fFilters[MAX_FILTERS];
1199	int							fFilterCount;
1200
1201	static TraceFilterParser	sParser;
1202};
1203
1204
1205TraceFilterParser TraceFilterParser::sParser;
1206
1207
1208//	#pragma mark -
1209
1210
1211#if ENABLE_TRACING
1212
1213
1214TraceEntry*
1215TraceEntryIterator::Next()
1216{
1217	if (fIndex == 0) {
1218		fEntry = _NextNonBufferEntry(sTracingMetaData->FirstEntry());
1219		fIndex = 1;
1220	} else if (fEntry != NULL) {
1221		fEntry = _NextNonBufferEntry(sTracingMetaData->NextEntry(fEntry));
1222		fIndex++;
1223	}
1224
1225	return Current();
1226}
1227
1228
1229TraceEntry*
1230TraceEntryIterator::Previous()
1231{
1232	if (fIndex == (int32)sTracingMetaData->Entries() + 1)
1233		fEntry = sTracingMetaData->AfterLastEntry();
1234
1235	if (fEntry != NULL) {
1236		fEntry = _PreviousNonBufferEntry(
1237			sTracingMetaData->PreviousEntry(fEntry));
1238		fIndex--;
1239	}
1240
1241	return Current();
1242}
1243
1244
1245TraceEntry*
1246TraceEntryIterator::MoveTo(int32 index)
1247{
1248	if (index == fIndex)
1249		return Current();
1250
1251	if (index <= 0 || index > (int32)sTracingMetaData->Entries()) {
1252		fIndex = (index <= 0 ? 0 : sTracingMetaData->Entries() + 1);
1253		fEntry = NULL;
1254		return NULL;
1255	}
1256
1257	// get the shortest iteration path
1258	int32 distance = index - fIndex;
1259	int32 direction = distance < 0 ? -1 : 1;
1260	distance *= direction;
1261
1262	if (index < distance) {
1263		distance = index;
1264		direction = 1;
1265		fEntry = NULL;
1266		fIndex = 0;
1267	}
1268	if ((int32)sTracingMetaData->Entries() + 1 - fIndex < distance) {
1269		distance = sTracingMetaData->Entries() + 1 - fIndex;
1270		direction = -1;
1271		fEntry = NULL;
1272		fIndex = sTracingMetaData->Entries() + 1;
1273	}
1274
1275	// iterate to the index
1276	if (direction < 0) {
1277		while (fIndex != index)
1278			Previous();
1279	} else {
1280		while (fIndex != index)
1281			Next();
1282	}
1283
1284	return Current();
1285}
1286
1287
1288trace_entry*
1289TraceEntryIterator::_NextNonBufferEntry(trace_entry* entry)
1290{
1291	while (entry != NULL && (entry->flags & BUFFER_ENTRY) != 0)
1292		entry = sTracingMetaData->NextEntry(entry);
1293
1294	return entry;
1295}
1296
1297
1298trace_entry*
1299TraceEntryIterator::_PreviousNonBufferEntry(trace_entry* entry)
1300{
1301	while (entry != NULL && (entry->flags & BUFFER_ENTRY) != 0)
1302		entry = sTracingMetaData->PreviousEntry(entry);
1303
1304	return entry;
1305}
1306
1307
1308int
1309dump_tracing_internal(int argc, char** argv, WrapperTraceFilter* wrapperFilter)
1310{
1311	int argi = 1;
1312
1313	// variables in which we store our state to be continuable
1314	static int32 _previousCount = 0;
1315	static bool _previousHasFilter = false;
1316	static bool _previousPrintStackTrace = false;
1317	static int32 _previousMaxToCheck = 0;
1318	static int32 _previousFirstChecked = 1;
1319	static int32 _previousLastChecked = -1;
1320	static int32 _previousDirection = 1;
1321	static uint32 _previousEntriesEver = 0;
1322	static uint32 _previousEntries = 0;
1323	static uint32 _previousOutputFlags = 0;
1324	static TraceEntryIterator iterator;
1325
1326	uint32 entriesEver = sTracingMetaData->EntriesEver();
1327
1328	// Note: start and index are Pascal-like indices (i.e. in [1, Entries()]).
1329	int32 start = 0;	// special index: print the last count entries
1330	int32 count = 0;
1331	int32 maxToCheck = 0;
1332	int32 cont = 0;
1333
1334	bool hasFilter = false;
1335	bool printStackTrace = false;
1336
1337	uint32 outputFlags = 0;
1338	while (argi < argc) {
1339		if (strcmp(argv[argi], "--difftime") == 0) {
1340			outputFlags |= TRACE_OUTPUT_DIFF_TIME;
1341			argi++;
1342		} else if (strcmp(argv[argi], "--printteam") == 0) {
1343			outputFlags |= TRACE_OUTPUT_TEAM_ID;
1344			argi++;
1345		} else if (strcmp(argv[argi], "--stacktrace") == 0) {
1346			printStackTrace = true;
1347			argi++;
1348		} else
1349			break;
1350	}
1351
1352	if (argi < argc) {
1353		if (strcmp(argv[argi], "forward") == 0) {
1354			cont = 1;
1355			argi++;
1356		} else if (strcmp(argv[argi], "backward") == 0) {
1357			cont = -1;
1358			argi++;
1359		}
1360	} else
1361		cont = _previousDirection;
1362
1363	if (cont != 0) {
1364		if (argi < argc) {
1365			print_debugger_command_usage(argv[0]);
1366			return 0;
1367		}
1368		if (entriesEver == 0 || entriesEver != _previousEntriesEver
1369			|| sTracingMetaData->Entries() != _previousEntries) {
1370			kprintf("Can't continue iteration. \"%s\" has not been invoked "
1371				"before, or there were new entries written since the last "
1372				"invocation.\n", argv[0]);
1373			return 0;
1374		}
1375	}
1376
1377	// get start, count, maxToCheck
1378	int32* params[3] = { &start, &count, &maxToCheck };
1379	for (int i = 0; i < 3 && !hasFilter && argi < argc; i++) {
1380		if (strcmp(argv[argi], "filter") == 0) {
1381			hasFilter = true;
1382			argi++;
1383		} else if (argv[argi][0] == '#') {
1384			hasFilter = true;
1385		} else {
1386			*params[i] = parse_expression(argv[argi]);
1387			argi++;
1388		}
1389	}
1390
1391	// filter specification
1392	if (argi < argc) {
1393		hasFilter = true;
1394		if (strcmp(argv[argi], "filter") == 0)
1395			argi++;
1396
1397		if (!TraceFilterParser::Default()->Parse(argc - argi, argv + argi)) {
1398			print_debugger_command_usage(argv[0]);
1399			return 0;
1400		}
1401	}
1402
1403	int32 direction;
1404	int32 firstToCheck;
1405	int32 lastToCheck;
1406
1407	if (cont != 0) {
1408		// get values from the previous iteration
1409		direction = cont;
1410		count = _previousCount;
1411		maxToCheck = _previousMaxToCheck;
1412		hasFilter = _previousHasFilter;
1413		outputFlags = _previousOutputFlags;
1414		printStackTrace = _previousPrintStackTrace;
1415
1416		if (direction < 0)
1417			start = _previousFirstChecked - 1;
1418		else
1419			start = _previousLastChecked + 1;
1420	} else {
1421		// defaults for count and maxToCheck
1422		if (count == 0)
1423			count = 30;
1424		if (maxToCheck == 0 || !hasFilter)
1425			maxToCheck = count;
1426		else if (maxToCheck < 0)
1427			maxToCheck = sTracingMetaData->Entries();
1428
1429		// determine iteration direction
1430		direction = (start <= 0 || count < 0 ? -1 : 1);
1431
1432		// validate count and maxToCheck
1433		if (count < 0)
1434			count = -count;
1435		if (maxToCheck < 0)
1436			maxToCheck = -maxToCheck;
1437		if (maxToCheck > (int32)sTracingMetaData->Entries())
1438			maxToCheck = sTracingMetaData->Entries();
1439		if (count > maxToCheck)
1440			count = maxToCheck;
1441
1442		// validate start
1443		if (start <= 0 || start > (int32)sTracingMetaData->Entries())
1444			start = max_c(1, sTracingMetaData->Entries());
1445	}
1446
1447	if (direction < 0) {
1448		firstToCheck = max_c(1, start - maxToCheck + 1);
1449		lastToCheck = start;
1450	} else {
1451		firstToCheck = start;
1452		lastToCheck = min_c((int32)sTracingMetaData->Entries(),
1453			start + maxToCheck - 1);
1454	}
1455
1456	// reset the iterator, if something changed in the meantime
1457	if (entriesEver == 0 || entriesEver != _previousEntriesEver
1458		|| sTracingMetaData->Entries() != _previousEntries) {
1459		iterator.Reset();
1460	}
1461
1462	LazyTraceOutput out(sTracingMetaData->TraceOutputBuffer(),
1463		kTraceOutputBufferSize, outputFlags);
1464
1465	bool markedMatching = false;
1466	int32 firstToDump = firstToCheck;
1467	int32 lastToDump = lastToCheck;
1468
1469	TraceFilter* filter = NULL;
1470	if (hasFilter)
1471		filter = TraceFilterParser::Default()->Filter();
1472
1473	if (wrapperFilter != NULL) {
1474		wrapperFilter->Init(filter, direction, cont != 0);
1475		filter = wrapperFilter;
1476	}
1477
1478	if (direction < 0 && filter && lastToCheck - firstToCheck >= count) {
1479		// iteration direction is backwards
1480		markedMatching = true;
1481
1482		// From the last entry to check iterate backwards to check filter
1483		// matches.
1484		int32 matching = 0;
1485
1486		// move to the entry after the last entry to check
1487		iterator.MoveTo(lastToCheck + 1);
1488
1489		// iterate backwards
1490		firstToDump = -1;
1491		lastToDump = -1;
1492		while (iterator.Index() > firstToCheck) {
1493			TraceEntry* entry = iterator.Previous();
1494			if ((entry->Flags() & ENTRY_INITIALIZED) != 0) {
1495				out.Clear();
1496				if (filter->Filter(entry, out)) {
1497					entry->ToTraceEntry()->flags |= FILTER_MATCH;
1498					if (lastToDump == -1)
1499						lastToDump = iterator.Index();
1500					firstToDump = iterator.Index();
1501
1502					matching++;
1503					if (matching >= count)
1504						break;
1505				} else
1506					entry->ToTraceEntry()->flags &= ~FILTER_MATCH;
1507			}
1508		}
1509
1510		firstToCheck = iterator.Index();
1511
1512		// iterate to the previous entry, so that the next loop starts at the
1513		// right one
1514		iterator.Previous();
1515	}
1516
1517	out.SetLastEntryTime(0);
1518
1519	// set the iterator to the entry before the first one to dump
1520	iterator.MoveTo(firstToDump - 1);
1521
1522	// dump the entries matching the filter in the range
1523	// [firstToDump, lastToDump]
1524	int32 dumped = 0;
1525
1526	while (TraceEntry* entry = iterator.Next()) {
1527		int32 index = iterator.Index();
1528		if (index < firstToDump)
1529			continue;
1530		if (index > lastToDump || dumped >= count) {
1531			if (direction > 0)
1532				lastToCheck = index - 1;
1533			break;
1534		}
1535
1536		if ((entry->Flags() & ENTRY_INITIALIZED) != 0) {
1537			out.Clear();
1538			if (filter &&  (markedMatching
1539					? (entry->Flags() & FILTER_MATCH) == 0
1540					: !filter->Filter(entry, out))) {
1541				continue;
1542			}
1543
1544			// don't print trailing new line
1545			const char* dump = out.DumpEntry(entry);
1546			int len = strlen(dump);
1547			if (len > 0 && dump[len - 1] == '\n')
1548				len--;
1549
1550			kprintf("%5ld. %.*s\n", index, len, dump);
1551
1552			if (printStackTrace) {
1553				out.Clear();
1554				entry->DumpStackTrace(out);
1555				if (out.Size() > 0)
1556					kputs(out.Buffer());
1557			}
1558		} else if (!filter)
1559			kprintf("%5ld. ** uninitialized entry **\n", index);
1560
1561		dumped++;
1562	}
1563
1564	kprintf("printed %ld entries within range %ld to %ld (%ld of %ld total, "
1565		"%ld ever)\n", dumped, firstToCheck, lastToCheck,
1566		lastToCheck - firstToCheck + 1, sTracingMetaData->Entries(),
1567		entriesEver);
1568
1569	// store iteration state
1570	_previousCount = count;
1571	_previousMaxToCheck = maxToCheck;
1572	_previousHasFilter = hasFilter;
1573	_previousPrintStackTrace = printStackTrace;
1574	_previousFirstChecked = firstToCheck;
1575	_previousLastChecked = lastToCheck;
1576	_previousDirection = direction;
1577	_previousEntriesEver = entriesEver;
1578	_previousEntries = sTracingMetaData->Entries();
1579	_previousOutputFlags = outputFlags;
1580
1581	return cont != 0 ? B_KDEBUG_CONT : 0;
1582}
1583
1584
1585static int
1586dump_tracing_command(int argc, char** argv)
1587{
1588	return dump_tracing_internal(argc, argv, NULL);
1589}
1590
1591
1592#endif	// ENABLE_TRACING
1593
1594
1595extern "C" uint8*
1596alloc_tracing_buffer(size_t size)
1597{
1598#if	ENABLE_TRACING
1599	trace_entry* entry = sTracingMetaData->AllocateEntry(
1600		size + sizeof(trace_entry), BUFFER_ENTRY);
1601	if (entry == NULL)
1602		return NULL;
1603
1604	return (uint8*)(entry + 1);
1605#else
1606	return NULL;
1607#endif
1608}
1609
1610
1611uint8*
1612alloc_tracing_buffer_memcpy(const void* source, size_t size, bool user)
1613{
1614	if (user && !IS_USER_ADDRESS(source))
1615		return NULL;
1616
1617	uint8* buffer = alloc_tracing_buffer(size);
1618	if (buffer == NULL)
1619		return NULL;
1620
1621	if (user) {
1622		if (user_memcpy(buffer, source, size) != B_OK)
1623			return NULL;
1624	} else
1625		memcpy(buffer, source, size);
1626
1627	return buffer;
1628}
1629
1630
1631char*
1632alloc_tracing_buffer_strcpy(const char* source, size_t maxSize, bool user)
1633{
1634	if (source == NULL || maxSize == 0)
1635		return NULL;
1636
1637	if (user && !IS_USER_ADDRESS(source))
1638		return NULL;
1639
1640	// limit maxSize to the actual source string len
1641	if (user) {
1642		ssize_t size = user_strlcpy(NULL, source, 0);
1643			// there's no user_strnlen()
1644		if (size < 0)
1645			return 0;
1646		maxSize = min_c(maxSize, (size_t)size + 1);
1647	} else
1648		maxSize = strnlen(source, maxSize - 1) + 1;
1649
1650	char* buffer = (char*)alloc_tracing_buffer(maxSize);
1651	if (buffer == NULL)
1652		return NULL;
1653
1654	if (user) {
1655		if (user_strlcpy(buffer, source, maxSize) < B_OK)
1656			return NULL;
1657	} else
1658		strlcpy(buffer, source, maxSize);
1659
1660	return buffer;
1661}
1662
1663
1664tracing_stack_trace*
1665capture_tracing_stack_trace(int32 maxCount, int32 skipFrames, bool kernelOnly)
1666{
1667#if	ENABLE_TRACING
1668	// page_fault_exception() doesn't allow us to gracefully handle a bad
1669	// address in the stack trace, if interrupts are disabled, so we always
1670	// restrict the stack traces to the kernel only in this case. A bad address
1671	// in the kernel stack trace would still cause a panic(), but this is
1672	// probably even desired.
1673	if (!are_interrupts_enabled())
1674		kernelOnly = true;
1675
1676	tracing_stack_trace* stackTrace
1677		= (tracing_stack_trace*)alloc_tracing_buffer(
1678			sizeof(tracing_stack_trace) + maxCount * sizeof(addr_t));
1679
1680	if (stackTrace != NULL) {
1681		stackTrace->depth = arch_debug_get_stack_trace(
1682			stackTrace->return_addresses, maxCount, 0, skipFrames + 1,
1683			STACK_TRACE_KERNEL | (kernelOnly ? 0 : STACK_TRACE_USER));
1684	}
1685
1686	return stackTrace;
1687#else
1688	return NULL;
1689#endif
1690}
1691
1692
1693addr_t
1694tracing_find_caller_in_stack_trace(struct tracing_stack_trace* stackTrace,
1695	const addr_t excludeRanges[], uint32 excludeRangeCount)
1696{
1697	for (int32 i = 0; i < stackTrace->depth; i++) {
1698		addr_t returnAddress = stackTrace->return_addresses[i];
1699
1700		bool inRange = false;
1701		for (uint32 j = 0; j < excludeRangeCount; j++) {
1702			if (returnAddress >= excludeRanges[j * 2 + 0]
1703				&& returnAddress < excludeRanges[j * 2 + 1]) {
1704				inRange = true;
1705				break;
1706			}
1707		}
1708
1709		if (!inRange)
1710			return returnAddress;
1711	}
1712
1713	return 0;
1714}
1715
1716
1717void
1718tracing_print_stack_trace(struct tracing_stack_trace* stackTrace)
1719{
1720#if ENABLE_TRACING
1721	print_stack_trace(stackTrace, kprintf);
1722#endif
1723}
1724
1725
1726int
1727dump_tracing(int argc, char** argv, WrapperTraceFilter* wrapperFilter)
1728{
1729#if	ENABLE_TRACING
1730	return dump_tracing_internal(argc, argv, wrapperFilter);
1731#else
1732	return 0;
1733#endif
1734}
1735
1736
1737bool
1738tracing_is_entry_valid(AbstractTraceEntry* candidate, bigtime_t entryTime)
1739{
1740#if ENABLE_TRACING
1741	if (!sTracingMetaData->IsInBuffer(candidate, sizeof(*candidate)))
1742		return false;
1743
1744	if (entryTime < 0)
1745		return true;
1746
1747	TraceEntryIterator iterator;
1748	while (TraceEntry* entry = iterator.Next()) {
1749		AbstractTraceEntry* abstract = dynamic_cast<AbstractTraceEntry*>(entry);
1750		if (abstract == NULL)
1751			continue;
1752
1753		if (abstract != candidate && abstract->Time() > entryTime)
1754			return false;
1755
1756		return candidate->Time() == entryTime;
1757	}
1758#endif
1759
1760	return false;
1761}
1762
1763
1764void
1765lock_tracing_buffer()
1766{
1767#if ENABLE_TRACING
1768	sTracingMetaData->Lock();
1769#endif
1770}
1771
1772
1773void
1774unlock_tracing_buffer()
1775{
1776#if ENABLE_TRACING
1777	sTracingMetaData->Unlock();
1778#endif
1779}
1780
1781
1782extern "C" status_t
1783tracing_init(void)
1784{
1785#if	ENABLE_TRACING
1786	status_t result = TracingMetaData::Create(sTracingMetaData);
1787	if (result != B_OK) {
1788		sTracingMetaData = &sDummyTracingMetaData;
1789		return result;
1790	}
1791
1792	new(nothrow) TracingLogStartEntry();
1793
1794	add_debugger_command_etc("traced", &dump_tracing_command,
1795		"Dump recorded trace entries",
1796		"[ --printteam ] [ --difftime ] [ --stacktrace ] "
1797			"(\"forward\" | \"backward\") "
1798			"| ([ <start> [ <count> [ <range> ] ] ] "
1799			"[ #<pattern> | (\"filter\" <filter>) ])\n"
1800		"Prints recorded trace entries. If \"backward\" or \"forward\" is\n"
1801		"specified, the command continues where the previous invocation left\n"
1802		"off, i.e. printing the previous respectively next entries (as many\n"
1803		"as printed before). In this case the command is continuable, that is\n"
1804		"afterwards entering an empty line in the debugger will reinvoke it.\n"
1805		"If no arguments are given, the command continues in the direction\n"
1806		"of the last invocation.\n"
1807		"--printteam  - enables printing the entries' team IDs.\n"
1808		"--difftime   - print difference times for all but the first entry.\n"
1809		"--stacktrace - print stack traces for entries that captured one.\n"
1810		"  <start>    - The base index of the entries to print. Depending on\n"
1811		"               whether the iteration direction is forward or\n"
1812		"               backward this will be the first or last entry printed\n"
1813		"               (potentially, if a filter is specified). The index of\n"
1814		"               the first entry in the trace buffer is 1. If 0 is\n"
1815		"               specified, the last <count> recorded entries are\n"
1816		"               printed (iteration direction is backward). Defaults \n"
1817		"               to 0.\n"
1818		"  <count>    - The number of entries to be printed. Defaults to 30.\n"
1819		"               If negative, the -<count> entries before and\n"
1820		"               including <start> will be printed.\n"
1821		"  <range>    - Only relevant if a filter is specified. Specifies the\n"
1822		"               number of entries to be filtered -- depending on the\n"
1823		"               iteration direction the entries before or after\n"
1824		"               <start>. If more than <count> entries match the\n"
1825		"               filter, only the first (forward) or last (backward)\n"
1826		"               <count> matching entries will be printed. If 0 is\n"
1827		"               specified <range> will be set to <count>. If -1,\n"
1828		"               <range> will be set to the number of recorded\n"
1829		"               entries.\n"
1830		"  <pattern>  - If specified only entries containing this string are\n"
1831		"               printed.\n"
1832		"  <filter>   - If specified only entries matching this filter\n"
1833		"               expression are printed. The expression can consist of\n"
1834		"               prefix operators \"not\", \"and\", \"or\", and\n"
1835		"               filters \"'thread' <thread>\" (matching entries\n"
1836		"               with the given thread ID), \"'team' <team>\"\n"
1837						"(matching entries with the given team ID), and\n"
1838		"               \"#<pattern>\" (matching entries containing the given\n"
1839		"               string).\n", 0);
1840#endif	// ENABLE_TRACING
1841	return B_OK;
1842}
1843
1844
1845void
1846ktrace_printf(const char *format, ...)
1847{
1848#if	ENABLE_TRACING
1849	va_list list;
1850	va_start(list, format);
1851
1852	char buffer[256];
1853	vsnprintf(buffer, sizeof(buffer), format, list);
1854
1855	va_end(list);
1856
1857	new(nothrow) KernelTraceEntry(buffer);
1858#endif	// ENABLE_TRACING
1859}
1860
1861
1862void
1863_user_ktrace_output(const char *message)
1864{
1865#if	ENABLE_TRACING
1866	new(nothrow) UserTraceEntry(message);
1867#endif	// ENABLE_TRACING
1868}
1869
1870