Lines Matching defs:cache

85     x86_cpu_cache_t	*cache;
89 cache = kalloc(sizeof(x86_cpu_cache_t) + (MAX_CPUS * sizeof(x86_lcpu_t *)));
90 if (cache == NULL)
93 cache = x86_caches;
94 x86_caches = cache->next;
95 cache->next = NULL;
98 bzero(cache, sizeof(x86_cpu_cache_t));
99 cache->next = NULL;
100 cache->maxcpus = MAX_CPUS;
101 for (i = 0; i < cache->maxcpus; i += 1) {
102 cache->cpus[i] = NULL;
107 return(cache);
134 * Save the number of CPUs sharing this cache.
147 * logical CPUs sharing the cache.
157 * threads that are sharing the cache.
249 x86_cache_free(x86_cpu_cache_t *cache)
252 if (cache->level > 0 && cache->level <= MAX_CACHE_DEPTH)
253 num_Lx_caches[cache->level - 1] -= 1;
254 cache->next = x86_caches;
255 x86_caches = cache;
259 * This returns a list of cache structures that represent the
273 * Cons up a list driven not by CPUID leaf 4 (deterministic cache params)
582 x86_cache_add_lcpu(x86_cpu_cache_t *cache, x86_lcpu_t *lcpu)
588 * Put the new CPU into the list of the cache.
590 cur_cache = lcpu->caches[cache->level - 1];
591 lcpu->caches[cache->level - 1] = cache;
592 cache->next = cur_cache;
593 cache->nlcpus += 1;
594 for (i = 0; i < cache->nlcpus; i += 1) {
595 if (cache->cpus[i] == NULL) {
596 cache->cpus[i] = lcpu;
617 * Add the cache data to the topology.
625 * Remove the cache from the front of the list.
633 * If the cache isn't shared then just put it where it
643 * have the same sharing. So if we have a cache already at
652 * This is a shared cache, so we have to figure out if
653 * this is the first time we've seen this cache. We do
655 * this cache is already described.
676 * If there's a cache on this logical CPU,
705 * If there's a cache on this logical CPU,
721 * If a shared cache wasn't found, then this logical CPU must