1/*
2 * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Distributed under the terms of the MIT License.
4 */
5
6
7#include "EntryCache.h"
8
9#include <new>
10
11
12static const int32 kEntriesPerGeneration = 1024;
13
14static const int32 kEntryNotInArray = -1;
15static const int32 kEntryRemoved = -2;
16
17
18// #pragma mark - EntryCacheGeneration
19
20
21EntryCacheGeneration::EntryCacheGeneration()
22	:
23	next_index(0),
24	entries(NULL)
25{
26}
27
28
29EntryCacheGeneration::~EntryCacheGeneration()
30{
31	delete[] entries;
32}
33
34
35status_t
36EntryCacheGeneration::Init()
37{
38	entries = new(std::nothrow) EntryCacheEntry*[kEntriesPerGeneration];
39	if (entries == NULL)
40		return B_NO_MEMORY;
41
42	memset(entries, 0, sizeof(EntryCacheEntry*) * kEntriesPerGeneration);
43
44	return B_OK;
45}
46
47
48// #pragma mark - EntryCache
49
50
51EntryCache::EntryCache()
52	:
53	fCurrentGeneration(0)
54{
55	rw_lock_init(&fLock, "entry cache");
56
57	new(&fEntries) EntryTable;
58}
59
60
61EntryCache::~EntryCache()
62{
63	// delete entries
64	EntryCacheEntry* entry = fEntries.Clear(true);
65	while (entry != NULL) {
66		EntryCacheEntry* next = entry->hash_link;
67		free(entry);
68		entry = next;
69	}
70
71	rw_lock_destroy(&fLock);
72}
73
74
75status_t
76EntryCache::Init()
77{
78	status_t error = fEntries.Init();
79	if (error != B_OK)
80		return error;
81
82	for (int32 i = 0; i < kGenerationCount; i++) {
83		error = fGenerations[i].Init();
84		if (error != B_OK)
85			return error;
86	}
87
88	return B_OK;
89}
90
91
92status_t
93EntryCache::Add(ino_t dirID, const char* name, ino_t nodeID)
94{
95	EntryCacheKey key(dirID, name);
96
97	WriteLocker _(fLock);
98
99	EntryCacheEntry* entry = fEntries.Lookup(key);
100	if (entry != NULL) {
101		entry->node_id = nodeID;
102		if (entry->generation != fCurrentGeneration) {
103			if (entry->index >= 0) {
104				fGenerations[entry->generation].entries[entry->index] = NULL;
105				_AddEntryToCurrentGeneration(entry);
106			}
107		}
108		return B_OK;
109	}
110
111	entry = (EntryCacheEntry*)malloc(sizeof(EntryCacheEntry) + strlen(name));
112	if (entry == NULL)
113		return B_NO_MEMORY;
114
115	entry->node_id = nodeID;
116	entry->dir_id = dirID;
117	entry->generation = fCurrentGeneration;
118	entry->index = kEntryNotInArray;
119	strcpy(entry->name, name);
120
121	fEntries.Insert(entry);
122
123	_AddEntryToCurrentGeneration(entry);
124
125	return B_OK;
126}
127
128
129status_t
130EntryCache::Remove(ino_t dirID, const char* name)
131{
132	EntryCacheKey key(dirID, name);
133
134	WriteLocker writeLocker(fLock);
135
136	EntryCacheEntry* entry = fEntries.Lookup(key);
137	if (entry == NULL)
138		return B_ENTRY_NOT_FOUND;
139
140	fEntries.Remove(entry);
141
142	if (entry->index >= 0) {
143		// remove the entry from its generation and delete it
144		fGenerations[entry->generation].entries[entry->index] = NULL;
145		free(entry);
146	} else {
147		// We can't free it, since another thread is about to try to move it
148		// to another generation. We mark it removed and the other thread will
149		// take care of deleting it.
150		entry->index = kEntryRemoved;
151	}
152
153	return B_OK;
154}
155
156
157bool
158EntryCache::Lookup(ino_t dirID, const char* name, ino_t& _nodeID)
159{
160	EntryCacheKey key(dirID, name);
161
162	ReadLocker readLocker(fLock);
163
164	EntryCacheEntry* entry = fEntries.Lookup(key);
165	if (entry == NULL)
166		return false;
167
168	int32 oldGeneration = atomic_set(&entry->generation, fCurrentGeneration);
169	if (oldGeneration == fCurrentGeneration || entry->index < 0) {
170		// The entry is already in the current generation or is being moved to
171		// it by another thread.
172		_nodeID = entry->node_id;
173		return true;
174	}
175
176	// remove from old generation array
177	fGenerations[oldGeneration].entries[entry->index] = NULL;
178	entry->index = kEntryNotInArray;
179
180	// add to the current generation
181	int32 index = atomic_add(&fGenerations[oldGeneration].next_index, 1);
182	if (index < kEntriesPerGeneration) {
183		fGenerations[fCurrentGeneration].entries[index] = entry;
184		entry->index = index;
185		_nodeID = entry->node_id;
186		return true;
187	}
188
189	// The current generation is full, so we probably need to clear the oldest
190	// one to make room. We need the write lock for that.
191	readLocker.Unlock();
192	WriteLocker writeLocker(fLock);
193
194	if (entry->index == kEntryRemoved) {
195		// the entry has been removed in the meantime
196		free(entry);
197		return false;
198	}
199
200	_AddEntryToCurrentGeneration(entry);
201
202	_nodeID = entry->node_id;
203	return true;
204}
205
206
207const char*
208EntryCache::DebugReverseLookup(ino_t nodeID, ino_t& _dirID)
209{
210	for (EntryTable::Iterator it = fEntries.GetIterator();
211			EntryCacheEntry* entry = it.Next();) {
212		if (nodeID == entry->node_id && strcmp(entry->name, ".") != 0
213				&& strcmp(entry->name, "..") != 0) {
214			_dirID = entry->dir_id;
215			return entry->name;
216		}
217	}
218
219	return NULL;
220}
221
222
223void
224EntryCache::_AddEntryToCurrentGeneration(EntryCacheEntry* entry)
225{
226	// the generation might not be full yet
227	int32 index = fGenerations[fCurrentGeneration].next_index++;
228	if (index < kEntriesPerGeneration) {
229		fGenerations[fCurrentGeneration].entries[index] = entry;
230		entry->generation = fCurrentGeneration;
231		entry->index = index;
232		return;
233	}
234
235	// we have to clear the oldest generation
236	int32 newGeneration = (fCurrentGeneration + 1) % kGenerationCount;
237	for (int32 i = 0; i < kEntriesPerGeneration; i++) {
238		EntryCacheEntry* otherEntry = fGenerations[newGeneration].entries[i];
239		if (otherEntry == NULL)
240			continue;
241
242		fGenerations[newGeneration].entries[i] = NULL;
243		fEntries.Remove(otherEntry);
244		free(otherEntry);
245	}
246
247	// set the new generation and add the entry
248	fCurrentGeneration = newGeneration;
249	fGenerations[newGeneration].next_index = 1;
250	fGenerations[newGeneration].entries[0] = entry;
251	entry->generation = newGeneration;
252	entry->index = 0;
253}
254