Lines Matching defs:ta_p

86 td_err_e __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
116 td_read_hash_size(td_thragent_t *ta_p)
121 switch (ta_p->initialized) {
127 return (ta_p->hash_size);
130 if (ta_p->model == PR_MODEL_NATIVE) {
131 addr = ta_p->uberdata_addr + offsetof(uberdata_t, hash_size);
134 addr = ta_p->uberdata_addr + offsetof(uberdata32_t, hash_size);
139 if (ps_pdread(ta_p->ph_p, addr, &hash_size, sizeof (hash_size))
146 td_read_uberdata(td_thragent_t *ta_p)
148 struct ps_prochandle *ph_p = ta_p->ph_p;
150 if (ta_p->model == PR_MODEL_NATIVE) {
153 if (ps_pdread(ph_p, ta_p->uberdata_addr,
156 ta_p->primary_map = uberdata.primary_map;
157 ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
159 ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
161 ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
162 ta_p->hash_size = uberdata.hash_size;
164 ta_p->tdb_events, sizeof (ta_p->tdb_events)) != PS_OK)
173 if (ps_pdread(ph_p, ta_p->uberdata_addr,
176 ta_p->primary_map = uberdata.primary_map;
177 ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
179 ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
181 ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
182 ta_p->hash_size = uberdata.hash_size;
187 ta_p->tdb_events[i] = tdb_events[i];
192 if (ta_p->hash_size != 1) { /* multi-threaded */
193 ta_p->initialized = 2;
194 ta_p->single_lwpid = 0;
195 ta_p->single_ulwp_addr = NULL;
197 ta_p->initialized = 1;
202 if (ta_p->model == PR_MODEL_NATIVE) {
206 if (ps_pdread(ph_p, ta_p->hash_table_addr,
210 ta_p->initialized = 0;
215 ta_p->single_lwpid = lwpid;
216 ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
222 if (ps_pdread(ph_p, ta_p->hash_table_addr,
226 ta_p->initialized = 0;
231 ta_p->single_lwpid = lwpid;
232 ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
238 if (!ta_p->primary_map)
239 ta_p->initialized = 0;
244 td_read_bootstrap_data(td_thragent_t *ta_p)
246 struct ps_prochandle *ph_p = ta_p->ph_p;
253 switch (ta_p->initialized) {
257 if (td_read_hash_size(ta_p) == 1)
259 return (td_read_uberdata(ta_p));
264 * We set ta_p->initialized to -1 to cut off recursive calls
268 ta_p->initialized = -1;
291 if (ta_p->model == PR_MODEL_NATIVE) {
296 if ((ta_p->bootstrap_addr = psaddr) == NULL)
303 ta_p->bootstrap_addr = NULL;
306 ta_p->uberdata_addr = psaddr;
313 if ((ta_p->bootstrap_addr = (psaddr_t)psaddr) == NULL)
320 ta_p->bootstrap_addr = NULL;
323 ta_p->uberdata_addr = (psaddr_t)psaddr;
329 if ((return_val = td_read_uberdata(ta_p)) != TD_OK)
331 if (ta_p->bootstrap_addr == NULL)
332 ta_p->initialized = 0;
346 td_thragent_t *ta_p;
368 if ((ta_p = malloc(sizeof (*ta_p))) == NULL) {
377 (void) memset(ta_p, 0, sizeof (*ta_p));
378 ta_p->ph_p = ph_p;
379 (void) rwlock_init(&ta_p->rwlock, USYNC_THREAD, NULL);
380 ta_p->model = model;
381 return_val = td_read_bootstrap_data(ta_p);
392 psaddr_t psaddr = ta_p->tdb_register_sync_addr;
409 ta_p->sync_tracking = 1;
414 *ta_pp = ta_p;
416 free(ta_p);
428 ph_lock_ta(td_thragent_t *ta_p, td_err_e *err)
433 if (ta_p == NULL || ta_p->initialized == -1) {
435 } else if (rw_rdlock(&ta_p->rwlock) != 0) { /* can't happen? */
437 } else if ((ph_p = ta_p->ph_p) == NULL) {
438 (void) rw_unlock(&ta_p->rwlock);
440 } else if (ta_p->initialized != 2 &&
441 (error = td_read_bootstrap_data(ta_p)) != TD_OK) {
442 (void) rw_unlock(&ta_p->rwlock);
486 ph_unlock(td_thragent_t *ta_p)
488 (void) rw_unlock(&ta_p->rwlock);
507 __td_ta_delete(td_thragent_t *ta_p)
515 if (ta_p == NULL || rw_wrlock(&ta_p->rwlock) != 0)
517 if ((ph_p = ta_p->ph_p) == NULL) {
518 (void) rw_unlock(&ta_p->rwlock);
526 if (ta_p->sync_tracking == 0 &&
530 (void) ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
533 ta_p->ph_p = NULL;
534 (void) rw_unlock(&ta_p->rwlock);
544 __td_ta_get_ph(td_thragent_t *ta_p, struct ps_prochandle **ph_pp)
552 if ((*ph_pp = ph_lock_ta(ta_p, &return_val)) == NULL)
554 ph_unlock(ta_p);
566 __td_ta_setconcurrency(const td_thragent_t *ta_p, int level)
568 if (ta_p == NULL)
570 if (ta_p->ph_p == NULL)
580 __td_ta_get_nthreads(td_thragent_t *ta_p, int *nthread_p)
589 if (ta_p->model == PR_MODEL_NATIVE) {
590 nthreads_addr = ta_p->uberdata_addr +
592 nzombies_addr = ta_p->uberdata_addr +
596 nthreads_addr = ta_p->uberdata_addr +
598 nzombies_addr = ta_p->uberdata_addr +
608 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
614 ph_unlock(ta_p);
650 __td_ta_map_id2thr(td_thragent_t *ta_p, thread_t tid,
657 ta_p != NULL &&
658 ta_p->initialized == 1 &&
659 (td_read_hash_size(ta_p) == 1 ||
660 td_read_uberdata(ta_p) == TD_OK) &&
661 ta_p->initialized == 1 &&
662 ta_p->single_lwpid == tid) {
663 th_p->th_ta_p = ta_p;
664 if ((th_p->th_unique = ta_p->single_ulwp_addr) == 0)
674 if (ta_p == NULL)
683 return_val = __td_ta_thr_iter(ta_p,
702 __td_ta_map_addr2sync(td_thragent_t *ta_p, psaddr_t addr, td_synchandle_t *sh_p)
712 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
720 ph_unlock(ta_p);
723 ph_unlock(ta_p);
730 sh_p->sh_ta_p = (td_thragent_t *)ta_p;
743 __td_ta_tsd_iter(td_thragent_t *ta_p, td_key_iter_f *cb, void *cbdata_p)
755 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
758 ph_unlock(ta_p);
762 if (ta_p->model == PR_MODEL_NATIVE) {
766 ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
781 ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
798 ph_unlock(ta_p);
804 else if (ta_p->model == PR_MODEL_NATIVE) {
839 ph_unlock(ta_p);
864 * *ta_p - thread agent
888 __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
910 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
913 ph_unlock(ta_p);
925 if (ta_p->model == PR_MODEL_NATIVE) {
927 ta_p->uberdata_addr + offsetof(uberdata_t, all_lwps),
930 ta_p->uberdata_addr + offsetof(uberdata_t, all_zombies),
937 ta_p->uberdata_addr + offsetof(uberdata32_t, all_lwps),
941 ta_p->uberdata_addr + offsetof(uberdata32_t, all_zombies),
960 ph_unlock(ta_p);
965 ph_unlock(ta_p);
984 if (ta_p->model == PR_MODEL_NATIVE) {
1060 th.th_ta_p = (td_thragent_t *)ta_p;
1080 ph_unlock(ta_p);
1090 __td_ta_sync_tracking_enable(td_thragent_t *ta_p, int onoff)
1096 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1107 if (ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
1113 ta_p->sync_tracking = 1;
1114 ph_unlock(ta_p);
1131 __td_ta_sync_iter(td_thragent_t *ta_p, td_sync_iter_f *cb, void *cbdata)
1146 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1149 ph_unlock(ta_p);
1152 if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
1172 if (ta_p->model == PR_MODEL_NATIVE) {
1173 if (ps_pdread(ph_p, ta_p->uberdata_addr +
1183 if (ps_pdread(ph_p, ta_p->uberdata_addr +
1220 synchandle.sh_ta_p = ta_p;
1232 ph_unlock(ta_p);
1242 __td_ta_enable_stats(const td_thragent_t *ta_p, int onoff)
1253 __td_ta_reset_stats(const td_thragent_t *ta_p)
1264 __td_ta_get_stats(const td_thragent_t *ta_p, td_ta_stats_t *tstats)
1274 td_thr2to(td_thragent_t *ta_p, psaddr_t ts_addr,
1282 ti_p->ti_ta_p = ta_p;
1318 td_thr2to32(td_thragent_t *ta_p, psaddr_t ts_addr,
1326 ti_p->ti_ta_p = ta_p;
1369 td_thragent_t *ta_p;
1379 ta_p = th_p->th_ta_p;
1381 ph_unlock(ta_p);
1390 if (ta_p->model == PR_MODEL_NATIVE) {
1398 td_thr2to(ta_p, psaddr, &ulwp, ti_p);
1409 td_thr2to32(ta_p, psaddr, &ulwp, ti_p);
1416 ph_unlock(ta_p);
1427 __td_ta_event_addr(td_thragent_t *ta_p, td_event_e event, td_notify_t *notify_p)
1429 if (ta_p == NULL)
1437 notify_p->u.bptaddr = ta_p->tdb_events[event - TD_MIN_EVENT_NUM];
1566 td_ta_mod_event(td_thragent_t *ta_p, td_thr_events_t *events, int onoff)
1572 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1575 ph_unlock(ta_p);
1578 if (ps_pdread(ph_p, ta_p->tdb_eventmask_addr,
1586 if (ps_pdwrite(ph_p, ta_p->tdb_eventmask_addr,
1591 ph_unlock(ta_p);
1600 __td_ta_set_event(td_thragent_t *ta_p, td_thr_events_t *events)
1602 return (td_ta_mod_event(ta_p, events, 1));
1623 __td_ta_clear_event(td_thragent_t *ta_p, td_thr_events_t *events)
1625 return (td_ta_mod_event(ta_p, events, 0));
1738 __td_ta_event_getmsg(td_thragent_t *ta_p, td_event_msg_t *msg)
1742 if (ta_p == NULL)
1744 if (ta_p->ph_p == NULL)
1749 if ((return_val = __td_ta_thr_iter(ta_p, event_msg_cb, msg,
2118 td_thragent_t *ta_p;
2129 ta_p = th_p->th_ta_p;
2131 ph_unlock(ta_p);
2135 if (ta_p->model == PR_MODEL_NATIVE) {
2141 ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
2165 ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
2194 ph_unlock(ta_p);
2201 if (ta_p->model == PR_MODEL_NATIVE) {
2222 ph_unlock(ta_p);
2235 td_thragent_t *ta_p;
2243 ta_p = th_p->th_ta_p;
2245 ph_unlock(ta_p);
2249 if (ta_p->model == PR_MODEL_NATIVE) {
2256 ta_p->uberdata_addr + offsetof(uberdata_t, tls_metadata),
2291 ta_p->uberdata_addr + offsetof(uberdata32_t, tls_metadata),
2324 ph_unlock(ta_p);
2386 td_thragent_t *ta_p;
2395 ta_p = th_p->th_ta_p;
2396 ph_unlock(ta_p);
2401 return (__td_ta_sync_iter(ta_p, lowner_cb, &lcb));
2463 __td_ta_map_lwp2thr(td_thragent_t *ta_p, lwpid_t lwpid,
2466 return (__td_ta_map_id2thr(ta_p, lwpid, th_p));
2626 read_sync_stats(td_thragent_t *ta_p, psaddr_t hash_table,
2636 if (ta_p->model == PR_MODEL_LP64)
2644 if (ps_pdread(ta_p->ph_p, hash_table + ix * sizeof (uint64_t),
2653 if (ps_pdread(ta_p->ph_p, next_desc,
2673 td_thragent_t *ta_p;
2685 ta_p = sh_p->sh_ta_p;
2687 ph_unlock(ta_p);
2702 if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
2713 if (ta_p->model == PR_MODEL_NATIVE) {
2714 if (ps_pdread(ph_p, ta_p->uberdata_addr +
2724 if (ps_pdread(ph_p, ta_p->uberdata_addr +
2740 return_val = read_sync_stats(ta_p, hashaddr,
2768 if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
2769 (ix = sync_stats.sync_addr - ta_p->hash_table_addr)
2770 < ta_p->hash_size * sizeof (thr_hash_table_t))
2795 if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
2796 (ix = sync_stats.sync_addr - ta_p->hash_table_addr)
2797 < ta_p->hash_size * sizeof (thr_hash_table_t))
2855 ph_unlock(ta_p);
2971 td_thragent_t *ta_p = th_p->th_ta_p;
2972 struct ps_prochandle *ph_p = ta_p->ph_p;
2976 if (ta_p->model == PR_MODEL_NATIVE) {