Deleted Added
full compact
zalloc.c (39672) zalloc.c (39863)
1/*
2 * This module derived from code donated to the FreeBSD Project by
3 * Matthew Dillon <dillon@backplane.com>
4 *
5 * Copyright (c) 1998 The FreeBSD Project
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 12 unchanged lines hidden (view full) ---

21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
1/*
2 * This module derived from code donated to the FreeBSD Project by
3 * Matthew Dillon <dillon@backplane.com>
4 *
5 * Copyright (c) 1998 The FreeBSD Project
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 12 unchanged lines hidden (view full) ---

21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $Id: zalloc.c,v 1.2 1998/09/26 03:24:14 dillon Exp $
29 * $Id: zalloc.c,v 1.3 1998/09/26 10:48:50 dfr Exp $
30 */
31
32/*
33 * LIB/MEMORY/ZALLOC.C - self contained low-overhead memory pool/allocation
34 * subsystem
35 *
36 * This subsystem implements memory pools and memory allocation
37 * routines.

--- 26 unchanged lines hidden (view full) ---

64 * if you want. Reclaim does not function when z[n]xalloc() is used,
65 * only for z[n]alloc().
66 *
67 * Allocation and frees of 0 bytes are valid operations.
68 */
69
70#include "zalloc_defs.h"
71
30 */
31
32/*
33 * LIB/MEMORY/ZALLOC.C - self contained low-overhead memory pool/allocation
34 * subsystem
35 *
36 * This subsystem implements memory pools and memory allocation
37 * routines.

--- 26 unchanged lines hidden (view full) ---

64 * if you want. Reclaim does not function when z[n]xalloc() is used,
65 * only for z[n]alloc().
66 *
67 * Allocation and frees of 0 bytes are valid operations.
68 */
69
70#include "zalloc_defs.h"
71
72Prototype struct MemPool *DummyStructMemPool;
73Library void *znalloc(struct MemPool *mpool, iaddr_t bytes);
74Library void *zalloc(struct MemPool *mpool, iaddr_t bytes);
75Library void *zallocAlign(struct MemPool *mpool, iaddr_t bytes, iaddr_t align);
76Library void *zxalloc(struct MemPool *mp, void *addr1, void *addr2, iaddr_t bytes);
77Library void *znxalloc(struct MemPool *mp, void *addr1, void *addr2, iaddr_t bytes);
78Library char *zallocStr(struct MemPool *mpool, const char *s, int slen);
79Library void zfree(struct MemPool *mpool, void *ptr, iaddr_t bytes);
80Library void zfreeStr(struct MemPool *mpool, char *s);
81Library void zinitPool(struct MemPool *mp, const char *id, void (*fpanic)(const char *ctl, ...), int (*freclaim)(struct MemPool *memPool, iaddr_t bytes), void *pBase, iaddr_t pSize);
82Library void zclearPool(struct MemPool *mp);
83Library void znop(const char *ctl, ...);
84Library int znot(struct MemPool *memPool, iaddr_t bytes);
85Library void zallocstats(struct MemPool *mp);
86
87/*
72/*
88 * znop() - panic function if none supplied.
89 */
90
91void
92znop(const char *ctl, ...)
93{
94}
95
96/*
97 * znot() - reclaim function if none supplied
98 */
99
100int
101znot(struct MemPool *memPool, iaddr_t bytes)
102{
103 return(-1);
104}
105
106#ifndef MALLOCLIB
107
108/*
109 * zalloc() - allocate and zero memory from pool. Call reclaim
110 * and retry if appropriate, return NULL if unable to allocate
111 * memory.
112 */
113
114void *
115zalloc(MemPool *mp, iaddr_t bytes)
116{
117 void *ptr;
118
119 if ((ptr = znalloc(mp, bytes)) != NULL)
120 bzero(ptr, bytes);
121 return(ptr);
122}
123
124/*
125 * zallocAlign() - allocate and zero memory from pool, enforce specified
126 * alignment (must be power of 2) on allocated memory.
127 */
128
129void *
130zallocAlign(struct MemPool *mp, iaddr_t bytes, iaddr_t align)
131{
132 void *ptr;
133
134 --align;
135 bytes = (bytes + align) & ~align;
136
137 if ((ptr = znalloc(mp, bytes)) != NULL) {
138 bzero(ptr, bytes);
139 }
140 return(ptr);
141}
142
143#endif
144
145/*
146 * znalloc() - allocate memory (without zeroing) from pool. Call reclaim
147 * and retry if appropriate, return NULL if unable to allocate
148 * memory.
149 */
150
151void *
152znalloc(MemPool *mp, iaddr_t bytes)
153{
154 /*
155 * align according to pool object size (can be 0). This is
156 * inclusive of the MEMNODE_SIZE_MASK minimum alignment.
157 *
158 */
159 bytes = (bytes + MEMNODE_SIZE_MASK) & ~MEMNODE_SIZE_MASK;
160
161 if (bytes == 0)
162 return((void *)-1);
163
73 * znalloc() - allocate memory (without zeroing) from pool. Call reclaim
74 * and retry if appropriate, return NULL if unable to allocate
75 * memory.
76 */
77
78void *
79znalloc(MemPool *mp, iaddr_t bytes)
80{
81 /*
82 * align according to pool object size (can be 0). This is
83 * inclusive of the MEMNODE_SIZE_MASK minimum alignment.
84 *
85 */
86 bytes = (bytes + MEMNODE_SIZE_MASK) & ~MEMNODE_SIZE_MASK;
87
88 if (bytes == 0)
89 return((void *)-1);
90
164 do {
165 /*
166 * locate freelist entry big enough to hold the object. If all objects
167 * are the same size, this is a constant-time function.
168 */
169
170 if (bytes <= mp->mp_Size - mp->mp_Used) {
171 MemNode **pmn;
172 MemNode *mn;
173
174 for (pmn = &mp->mp_First; (mn=*pmn) != NULL; pmn = &mn->mr_Next) {
175 if (bytes > mn->mr_Bytes)
176 continue;
177
178 /*
179 * Cut a chunk of memory out of the beginning of this
180 * block and fixup the link appropriately.
181 */
182
183 {
184 char *ptr = (char *)mn;
185
186 if (mn->mr_Bytes == bytes) {
187 *pmn = mn->mr_Next;
188 } else {
189 mn = (MemNode *)((char *)mn + bytes);
190 mn->mr_Next = ((MemNode *)ptr)->mr_Next;
191 mn->mr_Bytes = ((MemNode *)ptr)->mr_Bytes - bytes;
192 *pmn = mn;
193 }
194 mp->mp_Used += bytes;
195 return(ptr);
196 }
197 }
198 }
199 } while (mp->mp_Reclaim(mp, bytes) == 0);
200
201 /*
91 /*
202 * Memory pool is full, return NULL.
92 * locate freelist entry big enough to hold the object. If all objects
93 * are the same size, this is a constant-time function.
203 */
204
94 */
95
205 return(NULL);
206}
207
208#ifndef MALLOCLIB
209
210/*
211 * z[n]xalloc() - allocate memory from within a specific address region.
212 * If allocating AT a specific address, then addr2 must be
213 * set to addr1 + bytes (and this only works if addr1 is
214 * already aligned). addr1 and addr2 are aligned by
215 * MEMNODE_SIZE_MASK + 1 (i.e. they wlill be 8 or 16 byte
216 * aligned depending on the machine core).
217 */
218
219void *
220zxalloc(MemPool *mp, void *addr1, void *addr2, iaddr_t bytes)
221{
222 void *ptr;
223
224 if ((ptr = znxalloc(mp, addr1, addr2, bytes)) != NULL)
225 bzero(ptr, bytes);
226 return(ptr);
227}
228
229void *
230znxalloc(MemPool *mp, void *addr1, void *addr2, iaddr_t bytes)
231{
232 /*
233 * align according to pool object size (can be 0). This is
234 * inclusive of the MEMNODE_SIZE_MASK minimum alignment.
235 */
236 bytes = (bytes + MEMNODE_SIZE_MASK) & ~MEMNODE_SIZE_MASK;
237 addr1= (void *)(((iaddr_t)addr1 + MEMNODE_SIZE_MASK) & ~MEMNODE_SIZE_MASK);
238 addr2= (void *)(((iaddr_t)addr2 + MEMNODE_SIZE_MASK) & ~MEMNODE_SIZE_MASK);
239
240 if (bytes == 0)
241 return((void *)addr1);
242
243 /*
244 * Locate freelist entry big enough to hold the object that is within
245 * the allowed address range.
246 */
247
248 if (bytes <= mp->mp_Size - mp->mp_Used) {
249 MemNode **pmn;
250 MemNode *mn;
251
96 if (bytes <= mp->mp_Size - mp->mp_Used) {
97 MemNode **pmn;
98 MemNode *mn;
99
252 for (pmn = &mp->mp_First; (mn = *pmn) != NULL; pmn = &mn->mr_Next) {
253 int mrbytes = mn->mr_Bytes;
254 int offset = 0;
100 for (pmn = &mp->mp_First; (mn=*pmn) != NULL; pmn = &mn->mr_Next) {
101 if (bytes > mn->mr_Bytes)
102 continue;
255
256 /*
103
104 /*
257 * offset from base of mn to satisfy addr1. 0 or positive
105 * Cut a chunk of memory out of the beginning of this
106 * block and fixup the link appropriately.
258 */
259
107 */
108
260 if ((char *)mn < (char *)addr1)
261 offset = (char *)addr1 - (char *)mn;
262
263 /*
264 * truncate mrbytes to satisfy addr2. mrbytes may go negative
265 * if the mn is beyond the last acceptable address.
266 */
267
268 if ((char *)mn + mrbytes > (char *)addr2)
269 mrbytes = (saddr_t)((iaddr_t)addr2 - (iaddr_t)mn); /* signed */
270
271 /*
272 * beyond last acceptable address.
273 *
274 * before first acceptable address (if offset > mrbytes, the
275 * second conditional will always succeed).
276 *
277 * area overlapping acceptable address range is not big enough.
278 */
279
280 if (mrbytes < 0)
281 break;
282
283 if (mrbytes - offset < bytes)
284 continue;
285
286 /*
287 * Cut a chunk of memory out of the block and fixup the link
288 * appropriately.
289 *
290 * If offset != 0, we have to cut a chunk out from the middle of
291 * the block.
292 */
293
294 if (offset) {
295 MemNode *mnew = (MemNode *)((char *)mn + offset);
296
297 mnew->mr_Bytes = mn->mr_Bytes - offset;
298 mnew->mr_Next = mn->mr_Next;
299 mn->mr_Bytes = offset;
300 mn->mr_Next = mnew;
301 pmn = &mn->mr_Next;
302 mn = mnew;
303 }
304 {
305 char *ptr = (char *)mn;
109 {
110 char *ptr = (char *)mn;
111
306 if (mn->mr_Bytes == bytes) {
307 *pmn = mn->mr_Next;
308 } else {
309 mn = (MemNode *)((char *)mn + bytes);
310 mn->mr_Next = ((MemNode *)ptr)->mr_Next;
311 mn->mr_Bytes = ((MemNode *)ptr)->mr_Bytes - bytes;
312 *pmn = mn;
313 }
314 mp->mp_Used += bytes;
315 return(ptr);
316 }
317 }
318 }
112 if (mn->mr_Bytes == bytes) {
113 *pmn = mn->mr_Next;
114 } else {
115 mn = (MemNode *)((char *)mn + bytes);
116 mn->mr_Next = ((MemNode *)ptr)->mr_Next;
117 mn->mr_Bytes = ((MemNode *)ptr)->mr_Bytes - bytes;
118 *pmn = mn;
119 }
120 mp->mp_Used += bytes;
121 return(ptr);
122 }
123 }
124 }
125
126 /*
127 * Memory pool is full, return NULL.
128 */
129
319 return(NULL);
320}
321
130 return(NULL);
131}
132
322#endif
323
324/*
325 * zfree() - free previously allocated memory
326 */
327
328void
329zfree(MemPool *mp, void *ptr, iaddr_t bytes)
330{
331 /*

--- 6 unchanged lines hidden (view full) ---

338 return;
339
340 /*
341 * panic if illegal pointer
342 */
343
344 if ((char *)ptr < (char *)mp->mp_Base ||
345 (char *)ptr + bytes > (char *)mp->mp_End ||
133/*
134 * zfree() - free previously allocated memory
135 */
136
137void
138zfree(MemPool *mp, void *ptr, iaddr_t bytes)
139{
140 /*

--- 6 unchanged lines hidden (view full) ---

147 return;
148
149 /*
150 * panic if illegal pointer
151 */
152
153 if ((char *)ptr < (char *)mp->mp_Base ||
154 (char *)ptr + bytes > (char *)mp->mp_End ||
346 ((iaddr_t)ptr & MEMNODE_SIZE_MASK) != 0
347 ) {
348 mp->mp_Panic(
349 "zfree(%s,0x%08lx,%d): wild pointer",
350 mp->mp_Ident,
351 (long)ptr,
352 bytes
353 );
354 }
155 ((iaddr_t)ptr & MEMNODE_SIZE_MASK) != 0)
156 panic("zfree(%p,%d): wild pointer", ptr, bytes);
355
356 /*
357 * free the segment
358 */
359
360 {
361 MemNode **pmn;
362 MemNode *mn;

--- 6 unchanged lines hidden (view full) ---

369 * - check range
370 * - check merge with next area
371 * - check merge with previous area
372 */
373 if ((char *)ptr <= (char *)mn) {
374 /*
375 * range check
376 */
157
158 /*
159 * free the segment
160 */
161
162 {
163 MemNode **pmn;
164 MemNode *mn;

--- 6 unchanged lines hidden (view full) ---

171 * - check range
172 * - check merge with next area
173 * - check merge with previous area
174 */
175 if ((char *)ptr <= (char *)mn) {
176 /*
177 * range check
178 */
377 if ((char *)ptr + bytes > (char *)mn) {
378 mp->mp_Panic("zfree(%s,0x%08lx,%d): corrupt memlist1",
379 mp->mp_Ident,
380 (long)ptr,
381 bytes
382 );
383 }
179 if ((char *)ptr + bytes > (char *)mn)
180 panic("zfree(%p,%d): corrupt memlist1",ptr, bytes);
384
385 /*
386 * merge against next area or create independant area
387 */
388
389 if ((char *)ptr + bytes == (char *)mn) {
390 ((MemNode *)ptr)->mr_Next = mn->mr_Next;
391 ((MemNode *)ptr)->mr_Bytes= bytes + mn->mr_Bytes;

--- 13 unchanged lines hidden (view full) ---

405 ((MemNode *)pmn)->mr_Next = mn->mr_Next;
406 ((MemNode *)pmn)->mr_Bytes += mn->mr_Bytes;
407 mn = (MemNode *)pmn;
408 }
409 }
410 return;
411 /* NOT REACHED */
412 }
181
182 /*
183 * merge against next area or create independant area
184 */
185
186 if ((char *)ptr + bytes == (char *)mn) {
187 ((MemNode *)ptr)->mr_Next = mn->mr_Next;
188 ((MemNode *)ptr)->mr_Bytes= bytes + mn->mr_Bytes;

--- 13 unchanged lines hidden (view full) ---

202 ((MemNode *)pmn)->mr_Next = mn->mr_Next;
203 ((MemNode *)pmn)->mr_Bytes += mn->mr_Bytes;
204 mn = (MemNode *)pmn;
205 }
206 }
207 return;
208 /* NOT REACHED */
209 }
413 if ((char *)ptr < (char *)mn + mn->mr_Bytes) {
414 mp->mp_Panic("zfree(%s,0x%08lx,%d): corrupt memlist2",
415 mp->mp_Ident,
416 (long)ptr,
417 bytes
418 );
419 }
210 if ((char *)ptr < (char *)mn + mn->mr_Bytes)
211 panic("zfree(%p,%d): corrupt memlist2", ptr, bytes);
420 }
421 /*
422 * We are beyond the last MemNode, append new MemNode. Merge against
423 * previous area if possible.
424 */
425 if (pmn == &mp->mp_First ||
426 (char *)pmn + ((MemNode *)pmn)->mr_Bytes != (char *)ptr
427 ) {
428 ((MemNode *)ptr)->mr_Next = NULL;
429 ((MemNode *)ptr)->mr_Bytes = bytes;
430 *pmn = (MemNode *)ptr;
431 mn = (MemNode *)ptr;
432 } else {
433 ((MemNode *)pmn)->mr_Bytes += bytes;
434 mn = (MemNode *)pmn;
435 }
436 }
437}
438
212 }
213 /*
214 * We are beyond the last MemNode, append new MemNode. Merge against
215 * previous area if possible.
216 */
217 if (pmn == &mp->mp_First ||
218 (char *)pmn + ((MemNode *)pmn)->mr_Bytes != (char *)ptr
219 ) {
220 ((MemNode *)ptr)->mr_Next = NULL;
221 ((MemNode *)ptr)->mr_Bytes = bytes;
222 *pmn = (MemNode *)ptr;
223 mn = (MemNode *)ptr;
224 } else {
225 ((MemNode *)pmn)->mr_Bytes += bytes;
226 mn = (MemNode *)pmn;
227 }
228 }
229}
230
439#ifndef MALLOCLIB
440
441/*
231/*
442 * zallocStr() - allocate memory and copy string.
443 */
444
445char *
446zallocStr(MemPool *mp, const char *s, int slen)
447{
448 char *ptr;
449
450 if (slen < 0)
451 slen = strlen(s);
452 if ((ptr = znalloc(mp, slen + 1)) != NULL) {
453 bcopy(s, ptr, slen);
454 ptr[slen] = 0;
455 }
456 return(ptr);
457}
458
459/*
460 * zfreeStr() - free memory associated with an allocated string.
461 */
462
463void
464zfreeStr(MemPool *mp, char *s)
465{
466 zfree(mp, s, strlen(s) + 1);
467}
468
469#endif
470
471/*
472 * zinitpool() - initialize a memory pool
473 */
474
475void
476zinitPool(
477 MemPool *mp,
478 const char *id,
479 void (*fpanic)(const char *ctl, ...),
480 int (*freclaim)(MemPool *memPool, iaddr_t bytes),
481 void *pBase,
482 iaddr_t pSize
483) {
484 if (fpanic == NULL)
485 fpanic = znop;
486 if (freclaim == NULL)
487 freclaim = znot;
488
489 if (id != (const char *)-1)
490 mp->mp_Ident = id;
491 mp->mp_Base = pBase;
492 mp->mp_End = (char *)pBase + pSize;
493 mp->mp_First = NULL;
494 mp->mp_Size = pSize;
495 mp->mp_Used = pSize;
496 mp->mp_Panic = fpanic;
497 mp->mp_Reclaim = freclaim;
498}
499
500/*
501 * zextendPool() - extend memory pool to cover additional space.
502 *
503 * Note: the added memory starts out as allocated, you
504 * must free it to make it available to the memory subsystem.
505 *
506 * Note: mp_Size may not reflect (mp_End - mp_Base) range
507 * due to other parts of the system doing their own sbrk()
508 * calls.

--- 19 unchanged lines hidden (view full) ---

528 /* mp->mp_Size += (char *)base - (char *)pend; */
529 mp->mp_Used += (char *)base - (char *)pend;
530 mp->mp_End = (char *)base;
531 }
532 }
533 mp->mp_Size += bytes;
534}
535
232 * zextendPool() - extend memory pool to cover additional space.
233 *
234 * Note: the added memory starts out as allocated, you
235 * must free it to make it available to the memory subsystem.
236 *
237 * Note: mp_Size may not reflect (mp_End - mp_Base) range
238 * due to other parts of the system doing their own sbrk()
239 * calls.

--- 19 unchanged lines hidden (view full) ---

259 /* mp->mp_Size += (char *)base - (char *)pend; */
260 mp->mp_Used += (char *)base - (char *)pend;
261 mp->mp_End = (char *)base;
262 }
263 }
264 mp->mp_Size += bytes;
265}
266
536#ifndef MALLOCLIB
537
538/*
539 * zclearpool() - Free all memory associated with a memory pool,
540 * destroying any previous allocations. Commonly
541 * called afte zinitPool() to make a pool available
542 * for use.
543 */
544
545void
546zclearPool(MemPool *mp)
547{
548 MemNode *mn = mp->mp_Base;
549
550 mn->mr_Next = NULL;
551 mn->mr_Bytes = mp->mp_Size;
552 mp->mp_First = mn;
553}
554
555#endif
556
557#ifdef ZALLOCDEBUG
558
559void
560zallocstats(MemPool *mp)
561{
562 int abytes = 0;
563 int hbytes = 0;
564 int fcount = 0;
565 MemNode *mn;
566
267#ifdef ZALLOCDEBUG
268
269void
270zallocstats(MemPool *mp)
271{
272 int abytes = 0;
273 int hbytes = 0;
274 int fcount = 0;
275 MemNode *mn;
276
567 printf("Pool %s, %d bytes reserved", mp->mp_Ident, (int) mp->mp_Size);
277 printf("%d bytes reserved", (int) mp->mp_Size);
568
569 mn = mp->mp_First;
570
571 if ((void *)mn != (void *)mp->mp_Base) {
572 abytes += (char *)mn - (char *)mp->mp_Base;
573 }
574
575 while (mn) {

--- 17 unchanged lines hidden ---
278
279 mn = mp->mp_First;
280
281 if ((void *)mn != (void *)mp->mp_Base) {
282 abytes += (char *)mn - (char *)mp->mp_Base;
283 }
284
285 while (mn) {

--- 17 unchanged lines hidden ---