Lines Matching refs:uintptr_t

63 kmem_init_walkers(uintptr_t addr, const kmem_cache_t *c, void *ignored)
86 kmem_debug(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
106 wsp->walk_addr = (uintptr_t)sym.st_value;
132 uintptr_t caddr = (uintptr_t)wsp->walk_data;
147 kmem_slab_check(void *p, uintptr_t saddr, void *arg)
150 uintptr_t caddr = (uintptr_t)arg;
151 if ((uintptr_t)sp->slab_cache != caddr) {
161 kmem_partial_slab_check(void *p, uintptr_t saddr, void *arg)
179 kmem_complete_slab_check(void *p, uintptr_t saddr, void *arg)
197 uintptr_t kns_cache_addr;
202 kmem_nth_slab_check(void *p, uintptr_t saddr, void *arg)
217 uintptr_t caddr = wsp->walk_addr;
219 wsp->walk_addr = (uintptr_t)(caddr +
229 uintptr_t caddr = wsp->walk_addr;
231 wsp->walk_addr = (uintptr_t)(caddr +
241 uintptr_t caddr = wsp->walk_addr;
260 uintptr_t caddr = wsp->walk_addr;
267 wsp->walk_addr = (uintptr_t)(caddr +
277 uintptr_t caddr = wsp->walk_addr;
312 kmem_cache(uintptr_t addr, uint_t flags, int ac, const mdb_arg_t *argv)
432 kmem_first_slab(uintptr_t addr, const kmem_slab_t *sp, boolean_t *is_slab)
440 kmem_first_partial_slab(uintptr_t addr, const kmem_slab_t *sp,
470 kmem_slablist_stat(uintptr_t addr, const kmem_slab_t *sp,
522 kmem_slabs(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
744 uintptr_t p1 = *((uintptr_t *)lhs);
745 uintptr_t p2 = *((uintptr_t *)rhs);
770 uintptr_t *kmhw_table;
780 uintptr_t *hash;
782 uintptr_t haddr, addr = wsp->walk_addr;
806 hsize = nelems * sizeof (uintptr_t);
807 haddr = (uintptr_t)c.cache_hash_table;
826 uintptr_t addr = NULL;
828 if ((addr = (uintptr_t)kmhw->kmhw_cur.bc_next) == NULL) {
853 mdb_free(kmhw->kmhw_table, kmhw->kmhw_nelems * sizeof (uintptr_t));
862 kmem_hash_lookup(kmem_cache_t *cp, uintptr_t caddr, void *buf, uintptr_t *out)
864 uintptr_t bucket = (uintptr_t)KMEM_HASH(cp, buf);
876 (uintptr_t)bcp) == -1) {
881 *out = (uintptr_t)bcp;
894 uintptr_t addr = (uintptr_t)cp->cache_magtype;
926 kmem_estimate_slab(uintptr_t addr, const kmem_slab_t *sp, size_t *est)
938 kmem_estimate_allocated(uintptr_t addr, const kmem_cache_t *cp)
962 if (mdb_vread(mp, magbsize, (uintptr_t)kmp) == -1) { \
977 kmem_read_magazines(kmem_cache_t *cp, uintptr_t addr, int ncpus,
1057 (uintptr_t)ccp - (uintptr_t)cp + addr));
1095 kmem_walk_callback(mdb_walk_state_t *wsp, uintptr_t buf)
1101 bufctl_walk_callback(kmem_cache_t *cp, mdb_walk_state_t *wsp, uintptr_t buf)
1155 uintptr_t addr = wsp->walk_addr;
1192 (uintptr_t)&cp->cache_arena->vm_quantum) == -1 ||
1313 kmw->kmw_max * sizeof (uintptr_t));
1336 uintptr_t chunksize, slabsize;
1337 uintptr_t addr;
1369 return (kmem_walk_callback(wsp, (uintptr_t)buf));
1386 uintptr_t out;
1395 (uintptr_t)btp) == -1) {
1400 out = (uintptr_t)tag.bt_bufctl;
1408 ret = kmem_walk_callback(wsp, (uintptr_t)buf);
1440 (uintptr_t)kbase) == -1) {
1483 if (mdb_vread(&bc, sizeof (bc), (uintptr_t)bcp) == -1) {
1497 ((uintptr_t)bcp - (uintptr_t)kbase +
1498 (uintptr_t)ubase));
1503 ndx = ((uintptr_t)buf - (uintptr_t)kbase) / chunksize;
1526 (uintptr_t)bcp);
1528 ret = kmem_walk_callback(wsp, (uintptr_t)buf);
1570 ret = kmem_walk_callback(wsp, (uintptr_t)buf);
1579 uintptr_t chunksize;
1580 uintptr_t slabsize;
1602 kmem_walk_all(uintptr_t addr, const kmem_cache_t *c, mdb_walk_state_t *wsp)
1631 wsp->walk_addr = (uintptr_t)wsp->walk_arg;
1713 mdb_vread(&bcn, sizeof (bcn), (uintptr_t)bc.bc_lastlog) != -1 &&
1723 wsp->walk_addr = (uintptr_t)bc.bc_addr;
1733 uintptr_t addr = (uintptr_t)bhw->bhw_next;
1734 uintptr_t baseaddr = wsp->walk_addr;
1750 if ((uintptr_t)bc.bc_addr != baseaddr ||
1782 uintptr_t lp = wsp->walk_addr;
1815 (uintptr_t)lhp->lh_base) == -1) {
1827 ((uintptr_t)klw->klw_base + i * lhp->lh_chunksize);
1853 return (wsp->walk_callback((uintptr_t)bcp - (uintptr_t)klw->klw_base +
1854 (uintptr_t)klw->klw_lh.lh_base, bcp, wsp->walk_cbdata));
1869 uintptr_t abb_addr;
1875 uintptr_t abw_thread;
1883 allocdby_walk_bufctl(uintptr_t addr, const kmem_bufctl_audit_t *bcp,
1886 if ((uintptr_t)bcp->bc_thread != abw->abw_thread)
1911 allocdby_walk_cache(uintptr_t addr, const kmem_cache_t *c, allocdby_walk_t *abw)
1982 uintptr_t addr;
2008 allocdby_walk(uintptr_t addr, const kmem_bufctl_audit_t *bcp, void *ignored)
2022 c, bcp->bc_stack[i] - (uintptr_t)sym.st_value);
2031 allocdby_common(uintptr_t addr, uint_t flags, const char *w)
2048 allocdby(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
2055 freedby(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
2077 stack_active(const kthread_t *t, uintptr_t addr)
2079 uintptr_t panicstk;
2090 panicstk = (uintptr_t)sym.st_value;
2121 whatis_call_printer(mdb_dcmd_f *dcmd, uintptr_t addr)
2132 whatis_print_kmf_lite(uintptr_t btaddr, size_t count)
2176 whatis_print_kmem(whatis_info_t *wi, uintptr_t maddr, uintptr_t addr,
2177 uintptr_t baddr)
2183 uintptr_t btaddr = (uintptr_t)KMEM_BUFTAG(cp, addr);
2209 whatis_walk_kmem(uintptr_t addr, void *ignored, whatis_info_t *wi)
2213 uintptr_t cur;
2224 whatis_walk_bufctl(uintptr_t baddr, const kmem_bufctl_t *bcp, whatis_info_t *wi)
2228 uintptr_t cur;
2229 uintptr_t addr = (uintptr_t)bcp->bc_addr;
2239 whatis_walk_seg(uintptr_t addr, const vmem_seg_t *vs, whatis_info_t *wi)
2244 uintptr_t cur;
2277 whatis_walk_vmem(uintptr_t addr, const vmem_t *vmem, whatis_info_t *wi)
2304 whatis_walk_slab(uintptr_t saddr, const kmem_slab_t *sp, whatis_info_t *wi)
2310 (uintptr_t)sp->slab_base, wi->wi_slab_size)) {
2318 whatis_walk_cache(uintptr_t addr, const kmem_cache_t *c, whatis_info_t *wi)
2400 whatis_walk_touch(uintptr_t addr, const kmem_cache_t *c, whatis_info_t *wi)
2410 whatis_walk_metadata(uintptr_t addr, const kmem_cache_t *c, whatis_info_t *wi)
2419 whatis_walk_notouch(uintptr_t addr, const kmem_cache_t *c, whatis_info_t *wi)
2429 whatis_walk_thread(uintptr_t addr, const kthread_t *t, mdb_whatis_t *w)
2431 uintptr_t cur;
2432 uintptr_t saddr;
2457 saddr = (uintptr_t)t->t_stkbase;
2458 size = (uintptr_t)t->t_stk - saddr + 1;
2468 uintptr_t base, size_t size, const char *where)
2470 uintptr_t cur;
2481 whatis_walk_modctl(uintptr_t addr, const struct modctl *m, mdb_whatis_t *w)
2490 if (mdb_vread(&mod, sizeof (mod), (uintptr_t)m->mod_mp) == -1) {
2495 if (mdb_readstr(name, sizeof (name), (uintptr_t)m->mod_modname) == -1)
2499 (uintptr_t)mod.text, mod.text_size, "text segment");
2501 (uintptr_t)mod.data, mod.data_size, "data segment");
2503 (uintptr_t)mod.bss, mod.bss_size, "bss segment");
2505 if (mdb_vread(&shdr, sizeof (shdr), (uintptr_t)mod.symhdr) == -1) {
2511 (uintptr_t)mod.symtbl, mod.nsyms * shdr.sh_entsize, "symtab");
2513 (uintptr_t)mod.symspace, mod.symsize, "symtab");
2520 whatis_walk_memseg(uintptr_t addr, const struct memseg *seg, mdb_whatis_t *w)
2522 uintptr_t cur;
2524 uintptr_t base = (uintptr_t)seg->pages;
2525 size_t size = (uintptr_t)seg->epages - base;
2630 uintptr_t kmc_low;
2631 uintptr_t kmc_high;
2635 uintptr_t kmd_addr;
2640 kmem_log_walk(uintptr_t addr, const kmem_bufctl_audit_t *b,
2657 (uintptr_t)&b->bc_cache->cache_bufsize) == -1) {
2664 if (kmd->kmd_addr < (uintptr_t)b->bc_addr ||
2665 kmd->kmd_addr >= (uintptr_t)b->bc_addr + bufsize)
2682 kmem_log(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
2686 uintptr_t lhp, clhp;
2688 uintptr_t *cpu;
2716 clhp = lhp + ((uintptr_t)&lh.lh_cpu[0] - (uintptr_t)&lh);
2718 cpu = mdb_alloc(sizeof (uintptr_t) * NCPU, UM_SLEEP | UM_GC);
2725 if (sym.st_size != NCPU * sizeof (uintptr_t)) {
2727 NCPU * sizeof (uintptr_t), sym.st_size);
2731 if (mdb_vread(cpu, sym.st_size, (uintptr_t)sym.st_value) == -1) {
2752 (uintptr_t)lh.lh_base;
2753 kmc[i].kmc_high = (uintptr_t)clh.clh_current;
2801 bufctl_history_callback(uintptr_t addr, const void *ign, void *arg)
2837 bufctl(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
2843 uintptr_t caller = NULL, thread = NULL;
2844 uintptr_t laddr, haddr, baddr = NULL;
2928 &sym) != -1 && caller == (uintptr_t)sym.st_value) {
2933 laddr = (uintptr_t)sym.st_value;
2934 haddr = (uintptr_t)sym.st_value + sym.st_size;
2945 if (thread != NULL && (uintptr_t)bc.bc_thread != thread)
2954 if (baddr != 0 && (uintptr_t)bc.bc_addr != baddr)
3016 return ((uintptr_t)buf - (uintptr_t)buf_arg);
3025 verify_buftag(kmem_buftag_t *btp, uintptr_t pat)
3037 verify_free(uintptr_t addr, const void *data, void *private)
3062 addr, (uintptr_t)addr + corrupt);
3101 verify_alloc(uintptr_t addr, const void *data, void *private)
3175 kmem_verify(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
3267 uintptr_t vn_addr;
3280 uintptr_t vaddr, paddr;
3303 vaddr = (uintptr_t)vp->vn_vmem.vm_next;
3308 if ((paddr = (uintptr_t)vp->vn_vmem.vm_source) == NULL) {
3447 uintptr_t vsw_start;
3448 uintptr_t vsw_current;
3505 uintptr_t addr = vsw->vsw_current;
3524 vsw->vsw_current = (uintptr_t)seg.vs_anext;
3548 vmem(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
3552 uintptr_t paddr;
3574 for (paddr = (uintptr_t)v.vm_source; paddr != NULL; ident += 2) {
3580 paddr = (uintptr_t)parent.vm_source;
3627 vmem_seg(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
3631 uintptr_t sz;
3639 uintptr_t laddr, haddr;
3641 uintptr_t caller = NULL, thread = NULL;
3642 uintptr_t minsize = 0, maxsize = 0;
3733 caller == (uintptr_t)sym.st_value) {
3738 laddr = (uintptr_t)sym.st_value;
3739 haddr = (uintptr_t)sym.st_value + sym.st_size;
3751 if (thread != NULL && (uintptr_t)vs.vs_thread != thread)
3812 uintptr_t kma_addr;
3818 showbc(uintptr_t addr, const kmem_bufctl_audit_t *bcp, kmalog_data_t *kma)
3833 (uintptr_t)&bcp->bc_cache->cache_bufsize) == -1) {
3840 if (kma->kma_addr < (uintptr_t)bcp->bc_addr ||
3841 kma->kma_addr >= (uintptr_t)bcp->bc_addr + bufsize)
3848 if (mdb_readstr(name, sizeof (name), (uintptr_t)
3862 kmalog(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
3909 uintptr_t *kmc_caches; /* List of kmem_cache_t addrs */
3915 kmc_add(uintptr_t addr, const kmem_cache_t *cp, kmclist_t *kmc)
3928 p = mdb_alloc(sizeof (uintptr_t) * s, UM_SLEEP | UM_GC);
3931 sizeof (uintptr_t) * kmc->kmc_size);
3960 uintptr_t kmo_stack[KMEM_STACK_DEPTH]; /* Stack trace */
3964 uintptr_t kmu_addr; /* address of interest */
4055 kmause1(uintptr_t addr, const kmem_bufctl_audit_t *bcp, kmusers_t *kmu)
4068 kmause2(uintptr_t addr, const kmem_bufctl_audit_t *bcp, kmusers_t *kmu)
4077 else if (kmu->kmu_addr < (uintptr_t)bufctl.bc_addr ||
4078 kmu->kmu_addr >= (uintptr_t)bufctl.bc_addr +
4113 kmausers(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
4175 uintptr_t cp = kmc.kmc_caches[i];
4296 uintptr_t wt_target;
4301 whatthread_walk_thread(uintptr_t addr, const kthread_t *t, whatthread_t *w)
4303 uintptr_t current, data;
4321 for (current = (uintptr_t)t->t_stkbase; current < (uintptr_t)t->t_stk;
4322 current += sizeof (uintptr_t)) {
4344 whatthread(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)