Lines Matching refs:allocation

63  *	data blocks for which quick allocation/deallocation is possible.
492 * The allocation log and all the related variables are protected by the zone lock for the zone_of_interest
558 * backtrace. Every free, we examine the table and determine if the allocation was being tracked,
563 * stores the backtrace associated with that allocation. This provides uniquing for the relatively large
581 * Counters for allocation statistics.
588 /* Times a new record lands on a spot previously occupied by a freed allocation */
602 * Structure for keeping track of an allocation
603 * An allocation bucket is in use if its element is not NULL
607 vm_size_t za_size; /* how much memory did this allocation take up? */
608 uint32_t za_trace_index; /* index into ztraces for backtrace associated with allocation */
806 * This function records the allocation in the allocations table,
809 * If the allocation slot is in use, the old allocation is replaced with the new allocation, and
812 * The refcount is incremented by the amount of memory the allocation consumes.
827 struct zallocation* allocation = &zallocations[hashaddr(addr, zleak_alloc_buckets)];
832 allocation->za_hit_count++;
836 * If the allocation bucket we want to be in is occupied, and if the occupier
839 if (allocation->za_element != (uintptr_t) 0 && trace_index == allocation->za_trace_index) {
876 /* STEP 2: Store the allocation record in the allocations array. */
878 if (allocation->za_element != (uintptr_t) 0) {
880 * Straight up replace any allocation record that was there. We don't want to do the work
881 * to preserve the allocation entries that were there, because we only record a subset of the
887 struct ztrace* associated_trace = &ztraces[allocation->za_trace_index];
888 /* Knock off old allocation's size, not the new allocation */
889 associated_trace->zt_size -= allocation->za_size;
890 } else if (allocation->za_trace_index != 0) {
895 allocation->za_element = addr;
896 allocation->za_trace_index = trace_index;
897 allocation->za_size = allocation_size;
909 * Free the allocation record and release the stacktrace.
919 struct zallocation* allocation = &zallocations[hashaddr(addr, zleak_alloc_buckets)];
925 if (allocation->za_element == addr && allocation->za_trace_index < zleak_trace_buckets) {
926 /* if the allocation was the one, grab the lock, check again, then delete it */
929 if (allocation->za_element == addr && allocation->za_trace_index < zleak_trace_buckets) {
933 if (allocation->za_size != allocation_size) {
935 (uintptr_t)allocation_size, (uintptr_t)allocation->za_size);
938 trace = &ztraces[allocation->za_trace_index];
945 /* A NULL element means the allocation bucket is unused */
946 allocation->za_element = 0;
1086 vm_size_t alloc, /* allocation size */
1114 * we look for an allocation size with less than 1% waste
1116 * otherwise, we look for an allocation size with least fragmentation
1219 * zinit'ed before we can do a kmem_alloc, so we have to defer allocation in that case. zlog_ready is set to
1221 * of the VM related zones that's set up early on, we will skip allocation of the log until zinit is called again
1386 * be more than the caller asked for since the memory allocation is
1674 uint32_t zleak_tracedepth = 0; /* log this allocation if nonzero */
1860 * If we're sampling this allocation, add it to the zleaks hash table.
1865 /* If it failed, roll back the counter so we sample the next allocation instead. */
1881 * Look for a place to record this new allocation. We implement two different logging strategies
1919 * Save a record of this allocation
2035 uint32_t zleak_tracedepth = 0; /* log this allocation if nonzero */
2063 * Zone leak detection: record the allocation
2068 /* If it failed, roll back the counter so we sample the next allocation instead. */
2234 * Zone leak detection: un-track the allocation
2647 * (i.e we need a whole allocation block's worth of free
2781 * If this is the last allocation on the page(s),