Lines Matching refs:PAGESIZE

648                     if (size <= PAGESIZE / 2)
651 auto psz = psize / PAGESIZE;
652 auto newsz = (size + PAGESIZE - 1) / PAGESIZE;
689 alloc_size = newsz * PAGESIZE;
756 if (psize < PAGESIZE)
759 auto psz = psize / PAGESIZE;
760 auto minsz = (minsize + PAGESIZE - 1) / PAGESIZE;
761 auto maxsz = (maxsize + PAGESIZE - 1) / PAGESIZE;
779 debug (MEMSTOMP) memset(pool.baseAddr + (pagenum + psz) * PAGESIZE, 0xF0, sz * PAGESIZE);
784 return (psz + sz) * PAGESIZE;
868 debug (MEMSTOMP) memset(p, 0xF2, npages * PAGESIZE);
939 // 1) size is a power of 2 for less than PAGESIZE values
940 // 2) base of memory pool is aligned on PAGESIZE boundary
941 if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
951 // 1) size is a power of 2 for less than PAGESIZE values
952 // 2) base of memory pool is aligned on PAGESIZE boundary
953 if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
1208 stats.freeSize += PAGESIZE;
1210 stats.usedSize += PAGESIZE;
1231 { PAGESIZE = 4096,
1267 alias PageBits = GCBits.wordtype[PAGESIZE / 16 / GCBits.BITS_PER_WORD];
1268 static assert(PAGESIZE % (GCBits.BITS_PER_WORD * 16) == 0);
1535 size_t pn = offset / PAGESIZE;
1546 offset -= pageOffset * PAGESIZE;
1549 return pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
1609 size_t npages = (size + PAGESIZE - 1) / PAGESIZE;
1616 return pool.npages * PAGESIZE;
1665 return isLowOnMem(mappedPages * PAGESIZE);
1735 immutable npages = (size + PAGESIZE - 1) / PAGESIZE;
1797 auto p = pool.baseAddr + pn * PAGESIZE;
1800 alloc_size = npages * PAGESIZE;
1819 size_t minPages = (config.minPoolSize << 20) / PAGESIZE;
1825 if (n < size_t.max/PAGESIZE)
1836 n *= (1 << 20) / PAGESIZE; // convert MB to pages
1859 if (mappedPages * PAGESIZE > maxPoolMemory)
1860 maxPoolMemory = mappedPages * PAGESIZE;
1976 if ((cast(size_t)p & ~cast(size_t)(PAGESIZE-1)) == pcache)
1997 size_t pn = offset / PAGESIZE;
2026 pcache = cast(size_t)p & ~cast(size_t)(PAGESIZE-1);
2036 stack[stackPos++] = Range(base, base + pool.bPageOffsets[pn] * PAGESIZE);
2044 base = pool.baseAddr + (pn * PAGESIZE);
2045 biti = pn * (PAGESIZE >> pool.shiftBy);
2047 pcache = cast(size_t)p & ~cast(size_t)(PAGESIZE-1);
2052 stack[stackPos++] = Range(base, base + pool.bPageOffsets[pn] * PAGESIZE);
2187 void *p = pool.baseAddr + pn * PAGESIZE;
2193 size_t size = pool.bPageOffsets[pn] * PAGESIZE - SENTINEL_EXTRA;
2207 debug (MEMSTOMP) memset(p, 0xF3, PAGESIZE);
2221 { p += PAGESIZE;
2222 memset(p, 0xF3, PAGESIZE);
2239 void *p = pool.baseAddr + pn * PAGESIZE;
2240 void *ptop = p + PAGESIZE;
2241 immutable base = pn * (PAGESIZE/16);
2313 size_t bitbase = pn * (PAGESIZE / 16);
2314 size_t bittop = bitbase + (PAGESIZE / 16);
2330 p = pool.baseAddr + pn * PAGESIZE;
2331 for (u = 0; u < PAGESIZE; u += size)
2448 auto pn = offset / PAGESIZE;
2458 biti = pn * (PAGESIZE >> pool.shiftBy);
2569 size_t pn = offset / PAGESIZE;
2634 poolsize = npages * PAGESIZE;
2639 assert((cast(size_t)baseAddr & (PAGESIZE - 1)) == 0);
2693 result = os_mem_unmap(baseAddr, npages * PAGESIZE);
2822 immutable beg = pagenum * (PAGESIZE / 16 / GCBits.BITS_PER_WORD);
2860 return cast(size_t)(p - baseAddr) / PAGESIZE;
2901 //if (baseAddr + npages * PAGESIZE != topAddr)
2903 assert(baseAddr + npages * PAGESIZE == topAddr);
3016 return bPageOffsets[pagenum] * PAGESIZE;
3027 size_t pn = offset / PAGESIZE;
3035 info.base = baseAddr + pn * PAGESIZE;
3036 info.size = bPageOffsets[pn] * PAGESIZE;
3054 auto p = sentinel_add(baseAddr + pn * PAGESIZE);
3055 size_t size = bPageOffsets[pn] * PAGESIZE - SENTINEL_EXTRA;
3075 debug (MEMSTOMP) memset(baseAddr + pn * PAGESIZE, 0xF3, n * PAGESIZE);
3108 size_t pn = offset / PAGESIZE;
3131 auto p = baseAddr + pn * PAGESIZE;
3132 const ptop = p + PAGESIZE;
3133 immutable base = pn * (PAGESIZE/16);
3189 void* p = baseAddr + pn * PAGESIZE;
3190 void* ptop = p + PAGESIZE - size;