Lines Matching defs:hal

51 static cmd1394_cmd_t *s1394_pending_q_remove(s1394_hal_t *hal);
53 static boolean_t s1394_process_pending_q(s1394_hal_t *hal);
55 static boolean_t s1394_pending_q_helper(s1394_hal_t *hal, cmd1394_cmd_t *cmd);
68 s1394_alloc_cmd(s1394_hal_t *hal, uint_t flags, cmd1394_cmd_t **cmdp)
100 *cmdp = kmem_cache_alloc(hal->hal_kmem_cachep, alloc_sleep);
107 sizeof (s1394_cmd_priv_t) + hal->halinfo.hal_overhead;
131 hal->hal_kstats->cmd_alloc++;
143 s1394_free_cmd(s1394_hal_t *hal, cmd1394_cmd_t **cmdp)
164 kmem_cache_free(hal->hal_kmem_cachep, *cmdp);
170 hal->hal_kstats->cmd_free++;
184 s1394_xfer_asynch_command(s1394_hal_t *hal, cmd1394_cmd_t *cmd, int *err)
196 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
198 mutex_enter(&hal->topology_tree_mutex);
199 state = hal->hal_state;
201 (hal->disable_requests_bit == 1)) {
202 *err = s1394_HAL_asynch_error(hal, cmd, state);
203 mutex_exit(&hal->topology_tree_mutex);
208 mutex_exit(&hal->topology_tree_mutex);
219 hal->hal_kstats->atreq_quad_rd++;
223 hal->hal_kstats->atreq_blk_rd++;
227 hal->hal_kstats->atreq_quad_wr++;
231 hal->hal_kstats->atreq_blk_wr++;
232 hal->hal_kstats->atreq_blk_wr_size += h_priv->mblk.length;
236 hal->hal_kstats->atreq_lock32++;
240 hal->hal_kstats->atreq_lock64++;
247 ret = HAL_CALL(hal).read(hal->halinfo.hal_private,
255 ret = HAL_CALL(hal).write(hal->halinfo.hal_private,
263 ret = HAL_CALL(hal).lock(hal->halinfo.hal_private,
301 dip = hal->halinfo.dip;
308 s1394_hal_shutdown(hal, B_TRUE);
314 dip = hal->halinfo.dip;
321 s1394_hal_shutdown(hal, B_TRUE);
351 s1394_setup_asynch_command(s1394_hal_t *hal, s1394_target_t *target,
367 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
399 /* Set up who sent command on which hal */
401 s_priv->sent_on_hal = (s1394_hal_t *)hal;
411 from_node = IEEE1394_NODE_NUM(hal->node_id);
414 if (cmd->bus_generation != hal->generation_count) {
423 cmd->bus_generation = hal->generation_count;
428 rw_enter(&hal->target_list_rwlock, RW_READER);
432 rw_exit(&hal->target_list_rwlock);
441 rw_exit(&hal->target_list_rwlock);
450 from_node = IEEE1394_NODE_NUM(hal->node_id);
477 s_priv->hal_cmd_private.speed = (int)s1394_speed_map_get(hal,
508 if (hal->topology_tree[to_node].cfgrom) {
510 hal->topology_tree[to_node].cfgrom[
530 rw_enter(&hal->target_list_rwlock, RW_READER);
532 rw_exit(&hal->target_list_rwlock);
564 s1394_insert_q_asynch_cmd(hal, cmd);
577 s1394_insert_q_asynch_cmd(s1394_hal_t *hal, cmd1394_cmd_t *cmd)
586 mutex_enter(&hal->outstanding_q_mutex);
592 if ((hal->outstanding_q_head == NULL) &&
593 (hal->outstanding_q_tail == NULL)) {
595 hal->outstanding_q_head = (cmd1394_cmd_t *)cmd;
596 hal->outstanding_q_tail = (cmd1394_cmd_t *)cmd;
601 s_priv->cmd_priv_next = hal->outstanding_q_head;
604 temp_cmd = (cmd1394_cmd_t *)hal->outstanding_q_head;
609 hal->outstanding_q_head = (cmd1394_cmd_t *)cmd;
612 mutex_exit(&hal->outstanding_q_mutex);
624 s1394_remove_q_asynch_cmd(s1394_hal_t *hal, cmd1394_cmd_t *cmd)
634 mutex_enter(&hal->outstanding_q_mutex);
651 if (hal->outstanding_q_head == (cmd1394_cmd_t *)cmd)
652 hal->outstanding_q_head = (cmd1394_cmd_t *)next_cmd;
661 if (hal->outstanding_q_tail == (cmd1394_cmd_t *)cmd)
662 hal->outstanding_q_tail = (cmd1394_cmd_t *)prev_cmd;
665 mutex_exit(&hal->outstanding_q_mutex);
680 s1394_atreq_cmd_complete(s1394_hal_t *hal, cmd1394_cmd_t *req, int status)
700 hal->hal_kstats->atresp_quad_rd_fail++;
704 hal->hal_kstats->atresp_blk_rd_fail++;
708 hal->hal_kstats->atresp_quad_wr_fail++;
712 hal->hal_kstats->atresp_blk_wr_fail++;
716 hal->hal_kstats->atresp_lock32_fail++;
720 hal->hal_kstats->atresp_lock64_fail++;
739 s1394_remove_q_asynch_cmd(hal, req);
740 s1394_pending_q_insert(hal, req, S1394_PENDING_Q_REAR);
791 dip = hal->halinfo.dip;
798 s1394_hal_shutdown(hal, B_TRUE);
810 s1394_remove_q_asynch_cmd(hal, req);
857 hal->hal_kstats->atreq_blk_rd_size +=
884 ret = s1394_xfer_asynch_command(hal, req, &err);
893 s1394_remove_q_asynch_cmd(hal, req);
894 s1394_pending_q_insert(hal, req,
905 s1394_remove_q_asynch_cmd(hal, req);
951 s1394_atresp_cmd_complete(s1394_hal_t *hal, cmd1394_cmd_t *resp, int status)
1021 dip = hal->halinfo.dip;
1028 s1394_hal_shutdown(hal, B_TRUE);
1065 dip = hal->halinfo.dip;
1072 s1394_hal_shutdown(hal, B_TRUE);
1087 HAL_CALL(hal).response_complete(hal->halinfo.hal_private, resp, h_priv);
1106 s1394_send_response(s1394_hal_t *hal, cmd1394_cmd_t *resp)
1135 HAL_CALL(hal).response_complete(hal->halinfo.hal_private,
1150 hal->hal_kstats->arresp_quad_rd_fail++;
1154 hal->hal_kstats->arresp_blk_rd_fail++;
1158 hal->hal_kstats->arresp_quad_wr_fail++;
1162 hal->hal_kstats->arresp_blk_wr_fail++;
1166 hal->hal_kstats->arresp_lock32_fail++;
1170 hal->hal_kstats->arresp_lock64_fail++;
1175 hal->hal_kstats->arreq_blk_rd_size +=
1187 ret = HAL_CALL(hal).read_response(hal->halinfo.hal_private,
1192 ret = HAL_CALL(hal).write_response(hal->halinfo.hal_private,
1197 ret = HAL_CALL(hal).lock_response(hal->halinfo.hal_private,
1202 dip = hal->halinfo.dip;
1209 s1394_hal_shutdown(hal, B_TRUE);
1238 HAL_CALL(hal).response_complete(hal->halinfo.hal_private,
1258 s1394_compare_swap(s1394_hal_t *hal, s1394_target_t *target, cmd1394_cmd_t *cmd)
1268 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
1271 mutex_enter(&hal->topology_tree_mutex);
1273 ret = s1394_setup_asynch_command(hal, target, cmd, S1394_CMD_LOCK,
1277 mutex_exit(&hal->topology_tree_mutex);
1294 mutex_enter(&hal->topology_tree_mutex);
1295 state = hal->hal_state;
1300 s1394_remove_q_asynch_cmd(hal, cmd);
1303 if (s1394_on_br_thread(hal) == B_TRUE) {
1306 mutex_exit(&hal->topology_tree_mutex);
1321 s1394_pending_q_insert(hal, cmd, S1394_PENDING_Q_FRONT);
1322 mutex_exit(&hal->topology_tree_mutex);
1331 mutex_exit(&hal->topology_tree_mutex);
1334 ret = s1394_xfer_asynch_command(hal, cmd, &err);
1339 s1394_remove_q_asynch_cmd(hal, cmd);
1340 s1394_pending_q_insert(hal, cmd, S1394_PENDING_Q_FRONT);
1351 s1394_remove_q_asynch_cmd(hal, cmd);
1382 s1394_split_lock_req(s1394_hal_t *hal, s1394_target_t *target,
1392 if (s1394_alloc_cmd(hal, T1394_ALLOC_CMD_NOSLEEP, &tmp_cmd) !=
1438 if (s1394_compare_swap(hal, target, tmp_cmd) != DDI_SUCCESS) {
1442 if (s1394_free_cmd(hal, &tmp_cmd) != DDI_SUCCESS)
1584 s1394_pending_q_insert(s1394_hal_t *hal, cmd1394_cmd_t *cmd, uint_t flags)
1593 mutex_enter(&hal->pending_q_mutex);
1599 if ((hal->pending_q_head == NULL) && (hal->pending_q_tail == NULL)) {
1601 hal->pending_q_head = (cmd1394_cmd_t *)cmd;
1602 hal->pending_q_tail = (cmd1394_cmd_t *)cmd;
1607 s_priv->cmd_priv_next = hal->pending_q_head;
1610 temp_cmd = (cmd1394_cmd_t *)hal->pending_q_head;
1615 hal->pending_q_head = (cmd1394_cmd_t *)cmd;
1618 s_priv->cmd_priv_prev = hal->pending_q_tail;
1621 temp_cmd = (cmd1394_cmd_t *)hal->pending_q_tail;
1626 hal->pending_q_tail = (cmd1394_cmd_t *)cmd;
1629 mutex_exit(&hal->pending_q_mutex);
1632 hal->hal_kstats->pending_q_insert++;
1644 s1394_pending_q_remove(s1394_hal_t *hal)
1654 mutex_enter(&hal->pending_q_mutex);
1656 cmd = (cmd1394_cmd_t *)hal->pending_q_tail;
1658 mutex_exit(&hal->pending_q_mutex);
1678 hal->pending_q_head = (cmd1394_cmd_t *)NULL;
1680 hal->pending_q_tail = (cmd1394_cmd_t *)prev_cmd;
1682 mutex_exit(&hal->pending_q_mutex);
1695 s1394_resend_pending_cmds(s1394_hal_t *hal)
1702 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
1705 done = s1394_process_pending_q(hal);
1708 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
1724 s1394_process_pending_q(s1394_hal_t *hal)
1739 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
1742 cmd = s1394_pending_q_remove(hal);
1758 if (h_priv->bus_generation == hal->generation_count) {
1759 ret = s1394_pending_q_helper(hal, cmd);
1790 if (h_priv->bus_generation == hal->generation_count) {
1791 ret = s1394_pending_q_helper(hal, cmd);
1795 if (s1394_lock_tree(hal) != DDI_SUCCESS)
1799 cmd->bus_generation = hal->generation_count;
1809 rw_enter(&hal->target_list_rwlock, RW_READER);
1814 rw_exit(&hal->target_list_rwlock);
1816 rw_exit(&hal->target_list_rwlock);
1824 s1394_unlock_tree(hal);
1841 s1394_unlock_tree(hal);
1848 s1394_unlock_tree(hal);
1865 from_node = IEEE1394_NODE_NUM(hal->node_id);
1879 (int)s1394_speed_map_get(hal, from_node,
1915 s1394_unlock_tree(hal);
1916 ret = s1394_pending_q_helper(hal, cmd);
1930 s1394_pending_q_helper(s1394_hal_t *hal, cmd1394_cmd_t *cmd)
1939 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
1945 s1394_insert_q_asynch_cmd(hal, cmd);
1948 ret = s1394_xfer_asynch_command(hal, cmd, &err);
1953 s1394_remove_q_asynch_cmd(hal, cmd);
1954 s1394_pending_q_insert(hal, cmd, S1394_PENDING_Q_FRONT);
1961 s1394_remove_q_asynch_cmd(hal, cmd);