Lines Matching refs:hi

39 	struct cs_hsi_iface	*hi;
123 static void cs_hsi_read_on_control(struct cs_hsi_iface *hi);
124 static void cs_hsi_read_on_data(struct cs_hsi_iface *hi);
211 struct cs_hsi_iface *hi = msg->context;
213 list_add_tail(&msg->link, &hi->cmdqueue);
218 struct cs_hsi_iface *hi = msg->context;
220 spin_lock(&hi->lock);
224 if (hi->iface_state != CS_STATE_CLOSED)
225 dev_err(&hi->cl->device, "Cmd flushed while driver active\n");
228 hi->control_state &=
231 hi->control_state & SSI_CHANNEL_STATE_WRITING)
232 hi->control_state &= ~SSI_CHANNEL_STATE_WRITING;
236 spin_unlock(&hi->lock);
264 static int cs_alloc_cmds(struct cs_hsi_iface *hi)
270 INIT_LIST_HEAD(&hi->cmdqueue);
283 msg->context = hi;
284 list_add_tail(&msg->link, &hi->cmdqueue);
290 cs_free_cmds(hi);
296 struct cs_hsi_iface *hi = msg->context;
301 spin_lock(&hi->lock);
302 if (hi->iface_state != CS_STATE_CLOSED)
306 hi->data_state &=
309 hi->data_state &= ~SSI_CHANNEL_STATE_WRITING;
312 if (unlikely(waitqueue_active(&hi->datawait)))
313 wake_up_interruptible(&hi->datawait);
315 spin_unlock(&hi->lock);
318 static int cs_hsi_alloc_data(struct cs_hsi_iface *hi)
330 rxmsg->context = hi;
339 txmsg->context = hi;
341 hi->data_rx_msg = rxmsg;
342 hi->data_tx_msg = txmsg;
359 static void cs_hsi_free_data(struct cs_hsi_iface *hi)
361 cs_hsi_free_data_msg(hi->data_rx_msg);
362 cs_hsi_free_data_msg(hi->data_tx_msg);
365 static inline void __cs_hsi_error_pre(struct cs_hsi_iface *hi,
369 spin_lock(&hi->lock);
370 dev_err(&hi->cl->device, "HSI %s error, msg %d, state %u\n",
374 static inline void __cs_hsi_error_post(struct cs_hsi_iface *hi)
376 spin_unlock(&hi->lock);
391 static void cs_hsi_control_read_error(struct cs_hsi_iface *hi,
394 __cs_hsi_error_pre(hi, msg, "control read", &hi->control_state);
396 __cs_hsi_error_read_bits(&hi->control_state);
397 __cs_hsi_error_post(hi);
400 static void cs_hsi_control_write_error(struct cs_hsi_iface *hi,
403 __cs_hsi_error_pre(hi, msg, "control write", &hi->control_state);
405 __cs_hsi_error_write_bits(&hi->control_state);
406 __cs_hsi_error_post(hi);
410 static void cs_hsi_data_read_error(struct cs_hsi_iface *hi, struct hsi_msg *msg)
412 __cs_hsi_error_pre(hi, msg, "data read", &hi->data_state);
413 __cs_hsi_error_read_bits(&hi->data_state);
414 __cs_hsi_error_post(hi);
417 static void cs_hsi_data_write_error(struct cs_hsi_iface *hi,
420 __cs_hsi_error_pre(hi, msg, "data write", &hi->data_state);
421 __cs_hsi_error_write_bits(&hi->data_state);
422 __cs_hsi_error_post(hi);
428 struct cs_hsi_iface *hi = msg->context;
430 spin_lock(&hi->lock);
431 hi->control_state &= ~SSI_CHANNEL_STATE_READING;
433 dev_err(&hi->cl->device, "Control RX error detected\n");
434 spin_unlock(&hi->lock);
435 cs_hsi_control_read_error(hi, msg);
438 dev_dbg(&hi->cl->device, "Read on control: %08X\n", cmd);
440 if (hi->flags & CS_FEAT_TSTAMP_RX_CTRL) {
443 &hi->mmap_cfg->tstamp_rx_ctrl;
450 spin_unlock(&hi->lock);
455 cs_hsi_read_on_control(hi);
460 struct cs_hsi_iface *hi = msg->context;
464 dev_err(&hi->cl->device, "Control peek RX error detected\n");
465 cs_hsi_control_read_error(hi, msg);
469 WARN_ON(!(hi->control_state & SSI_CHANNEL_STATE_READING));
471 dev_dbg(&hi->cl->device, "Peek on control complete, reading\n");
474 ret = hsi_async_read(hi->cl, msg);
476 cs_hsi_control_read_error(hi, msg);
479 static void cs_hsi_read_on_control(struct cs_hsi_iface *hi)
484 spin_lock(&hi->lock);
485 if (hi->control_state & SSI_CHANNEL_STATE_READING) {
486 dev_err(&hi->cl->device, "Control read already pending (%d)\n",
487 hi->control_state);
488 spin_unlock(&hi->lock);
491 if (hi->control_state & SSI_CHANNEL_STATE_ERROR) {
492 dev_err(&hi->cl->device, "Control read error (%d)\n",
493 hi->control_state);
494 spin_unlock(&hi->lock);
497 hi->control_state |= SSI_CHANNEL_STATE_READING;
498 dev_dbg(&hi->cl->device, "Issuing RX on control\n");
499 msg = cs_claim_cmd(hi);
500 spin_unlock(&hi->lock);
504 ret = hsi_async_read(hi->cl, msg);
506 cs_hsi_control_read_error(hi, msg);
511 struct cs_hsi_iface *hi = msg->context;
513 spin_lock(&hi->lock);
514 hi->control_state &= ~SSI_CHANNEL_STATE_WRITING;
516 spin_unlock(&hi->lock);
518 cs_hsi_control_write_error(hi, msg);
520 dev_err(&hi->cl->device,
526 static int cs_hsi_write_on_control(struct cs_hsi_iface *hi, u32 message)
531 spin_lock(&hi->lock);
532 if (hi->control_state & SSI_CHANNEL_STATE_ERROR) {
533 spin_unlock(&hi->lock);
536 if (hi->control_state & SSI_CHANNEL_STATE_WRITING) {
537 dev_err(&hi->cl->device,
539 spin_unlock(&hi->lock);
542 hi->control_state |= SSI_CHANNEL_STATE_WRITING;
543 msg = cs_claim_cmd(hi);
544 spin_unlock(&hi->lock);
549 dev_dbg(&hi->cl->device,
551 ret = hsi_async_write(hi->cl, msg);
553 dev_err(&hi->cl->device,
555 cs_hsi_control_write_error(hi, msg);
565 if (!(hi->control_state & SSI_CHANNEL_STATE_READING)) {
566 dev_err(&hi->cl->device, "Restarting control reads\n");
567 cs_hsi_read_on_control(hi);
575 struct cs_hsi_iface *hi = msg->context;
579 cs_hsi_data_read_error(hi, msg);
583 spin_lock(&hi->lock);
584 WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_READING));
585 hi->data_state &= ~SSI_CHANNEL_STATE_READING;
587 payload |= hi->rx_slot;
588 hi->rx_slot++;
589 hi->rx_slot %= hi->rx_ptr_boundary;
591 hi->mmap_cfg->rx_ptr = hi->rx_slot;
592 if (unlikely(waitqueue_active(&hi->datawait)))
593 wake_up_interruptible(&hi->datawait);
594 spin_unlock(&hi->lock);
596 cs_notify_data(payload, hi->rx_bufs);
597 cs_hsi_read_on_data(hi);
602 struct cs_hsi_iface *hi = msg->context;
607 cs_hsi_data_read_error(hi, msg);
610 if (unlikely(hi->iface_state != CS_STATE_CONFIGURED)) {
611 dev_err(&hi->cl->device, "Data received in invalid state\n");
612 cs_hsi_data_read_error(hi, msg);
616 spin_lock(&hi->lock);
617 WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_POLL));
618 hi->data_state &= ~SSI_CHANNEL_STATE_POLL;
619 hi->data_state |= SSI_CHANNEL_STATE_READING;
620 spin_unlock(&hi->lock);
622 address = (u32 *)(hi->mmap_base +
623 hi->rx_offsets[hi->rx_slot % hi->rx_bufs]);
624 sg_init_one(msg->sgt.sgl, address, hi->buf_size);
627 ret = hsi_async_read(hi->cl, msg);
629 cs_hsi_data_read_error(hi, msg);
650 static void cs_hsi_read_on_data(struct cs_hsi_iface *hi)
655 spin_lock(&hi->lock);
656 if (hi->data_state &
658 dev_dbg(&hi->cl->device, "Data read already pending (%u)\n",
659 hi->data_state);
660 spin_unlock(&hi->lock);
663 hi->data_state |= SSI_CHANNEL_STATE_POLL;
664 spin_unlock(&hi->lock);
666 rxmsg = hi->data_rx_msg;
667 sg_init_one(rxmsg->sgt.sgl, (void *)hi->mmap_base, 0);
671 ret = hsi_async_read(hi->cl, rxmsg);
673 cs_hsi_data_read_error(hi, rxmsg);
678 struct cs_hsi_iface *hi = msg->context;
681 spin_lock(&hi->lock);
682 hi->data_state &= ~SSI_CHANNEL_STATE_WRITING;
683 if (unlikely(waitqueue_active(&hi->datawait)))
684 wake_up_interruptible(&hi->datawait);
685 spin_unlock(&hi->lock);
687 cs_hsi_data_write_error(hi, msg);
691 static int cs_hsi_write_on_data(struct cs_hsi_iface *hi, unsigned int slot)
697 spin_lock(&hi->lock);
698 if (hi->iface_state != CS_STATE_CONFIGURED) {
699 dev_err(&hi->cl->device, "Not configured, aborting\n");
703 if (hi->data_state & SSI_CHANNEL_STATE_ERROR) {
704 dev_err(&hi->cl->device, "HSI error, aborting\n");
708 if (hi->data_state & SSI_CHANNEL_STATE_WRITING) {
709 dev_err(&hi->cl->device, "Write pending on data channel.\n");
713 hi->data_state |= SSI_CHANNEL_STATE_WRITING;
714 spin_unlock(&hi->lock);
716 hi->tx_slot = slot;
717 address = (u32 *)(hi->mmap_base + hi->tx_offsets[hi->tx_slot]);
718 txmsg = hi->data_tx_msg;
719 sg_init_one(txmsg->sgt.sgl, address, hi->buf_size);
721 ret = hsi_async_write(hi->cl, txmsg);
723 cs_hsi_data_write_error(hi, txmsg);
728 spin_unlock(&hi->lock);
730 cs_hsi_data_write_error(hi, hi->data_tx_msg);
735 static unsigned int cs_hsi_get_state(struct cs_hsi_iface *hi)
737 return hi->iface_state;
740 static int cs_hsi_command(struct cs_hsi_iface *hi, u32 cmd)
747 ret = cs_hsi_write_on_control(hi, cmd);
751 ret = cs_hsi_write_on_data(hi, cmd & CS_PARAM_MASK);
764 static void cs_hsi_set_wakeline(struct cs_hsi_iface *hi, bool new_state)
768 spin_lock_bh(&hi->lock);
769 if (hi->wakeline_state != new_state) {
770 hi->wakeline_state = new_state;
772 dev_dbg(&hi->cl->device, "setting wake line to %d (%p)\n",
773 new_state, hi->cl);
775 spin_unlock_bh(&hi->lock);
779 ssip_slave_start_tx(hi->master);
781 ssip_slave_stop_tx(hi->master);
784 dev_dbg(&hi->cl->device, "wake line set to %d (%p)\n",
785 new_state, hi->cl);
788 static void set_buffer_sizes(struct cs_hsi_iface *hi, int rx_bufs, int tx_bufs)
790 hi->rx_bufs = rx_bufs;
791 hi->tx_bufs = tx_bufs;
792 hi->mmap_cfg->rx_bufs = rx_bufs;
793 hi->mmap_cfg->tx_bufs = tx_bufs;
795 if (hi->flags & CS_FEAT_ROLLING_RX_COUNTER) {
803 hi->rx_ptr_boundary = (rx_bufs << RX_PTR_BOUNDARY_SHIFT);
804 hi->mmap_cfg->rx_ptr_boundary = hi->rx_ptr_boundary;
806 hi->rx_ptr_boundary = hi->rx_bufs;
810 static int check_buf_params(struct cs_hsi_iface *hi,
815 size_t ctrl_size_aligned = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg));
821 } else if ((buf_size_aligned + ctrl_size_aligned) >= hi->mmap_size) {
822 dev_err(&hi->cl->device, "No space for the requested buffer "
833 static int cs_hsi_data_sync(struct cs_hsi_iface *hi)
837 spin_lock_bh(&hi->lock);
839 if (!cs_state_xfer_active(hi->data_state)) {
840 dev_dbg(&hi->cl->device, "hsi_data_sync break, idle\n");
847 if (!cs_state_xfer_active(hi->data_state))
854 * prepare_to_wait must be called with hi->lock held
857 prepare_to_wait(&hi->datawait, &wait, TASK_INTERRUPTIBLE);
858 spin_unlock_bh(&hi->lock);
861 spin_lock_bh(&hi->lock);
862 finish_wait(&hi->datawait, &wait);
864 dev_dbg(&hi->cl->device,
873 spin_unlock_bh(&hi->lock);
874 dev_dbg(&hi->cl->device, "hsi_data_sync done with res %d\n", r);
879 static void cs_hsi_data_enable(struct cs_hsi_iface *hi,
884 BUG_ON(hi->buf_size == 0);
886 set_buffer_sizes(hi, buf_cfg->rx_bufs, buf_cfg->tx_bufs);
888 hi->slot_size = L1_CACHE_ALIGN(hi->buf_size);
889 dev_dbg(&hi->cl->device,
891 hi->slot_size, hi->buf_size, L1_CACHE_BYTES);
893 data_start = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg));
894 dev_dbg(&hi->cl->device,
896 data_start, sizeof(*hi->mmap_cfg), L1_CACHE_BYTES);
898 for (i = 0; i < hi->mmap_cfg->rx_bufs; i++) {
899 hi->rx_offsets[i] = data_start + i * hi->slot_size;
900 hi->mmap_cfg->rx_offsets[i] = hi->rx_offsets[i];
901 dev_dbg(&hi->cl->device, "DL buf #%u at %u\n",
902 i, hi->rx_offsets[i]);
904 for (i = 0; i < hi->mmap_cfg->tx_bufs; i++) {
905 hi->tx_offsets[i] = data_start +
906 (i + hi->mmap_cfg->rx_bufs) * hi->slot_size;
907 hi->mmap_cfg->tx_offsets[i] = hi->tx_offsets[i];
908 dev_dbg(&hi->cl->device, "UL buf #%u at %u\n",
909 i, hi->rx_offsets[i]);
912 hi->iface_state = CS_STATE_CONFIGURED;
915 static void cs_hsi_data_disable(struct cs_hsi_iface *hi, int old_state)
918 dev_dbg(&hi->cl->device,
920 hi->iface_state = CS_STATE_OPENED;
924 static int cs_hsi_buf_config(struct cs_hsi_iface *hi,
928 unsigned int old_state = hi->iface_state;
930 spin_lock_bh(&hi->lock);
933 hi->iface_state = CS_STATE_OPENED;
934 spin_unlock_bh(&hi->lock);
940 r = cs_hsi_data_sync(hi);
944 WARN_ON(cs_state_xfer_active(hi->data_state));
946 spin_lock_bh(&hi->lock);
947 r = check_buf_params(hi, buf_cfg);
951 hi->buf_size = buf_cfg->buf_size;
952 hi->mmap_cfg->buf_size = hi->buf_size;
953 hi->flags = buf_cfg->flags;
955 hi->rx_slot = 0;
956 hi->tx_slot = 0;
957 hi->slot_size = 0;
959 if (hi->buf_size)
960 cs_hsi_data_enable(hi, buf_cfg);
962 cs_hsi_data_disable(hi, old_state);
964 spin_unlock_bh(&hi->lock);
966 if (old_state != hi->iface_state) {
967 if (hi->iface_state == CS_STATE_CONFIGURED) {
968 cpu_latency_qos_add_request(&hi->pm_qos_req,
971 cs_hsi_read_on_data(hi);
974 cpu_latency_qos_remove_request(&hi->pm_qos_req);
980 spin_unlock_bh(&hi->lock);
984 static int cs_hsi_start(struct cs_hsi_iface **hi, struct hsi_client *cl,
1040 BUG_ON(!hi);
1041 *hi = hsi_if;
1059 static void cs_hsi_stop(struct cs_hsi_iface *hi)
1061 dev_dbg(&hi->cl->device, "cs_hsi_stop\n");
1062 cs_hsi_set_wakeline(hi, 0);
1063 ssip_slave_put_master(hi->master);
1066 hi->iface_state = CS_STATE_CLOSED;
1067 hsi_release_port(hi->cl);
1074 WARN_ON(!cs_state_idle(hi->control_state));
1075 WARN_ON(!cs_state_idle(hi->data_state));
1077 if (cpu_latency_qos_request_active(&hi->pm_qos_req))
1078 cpu_latency_qos_remove_request(&hi->pm_qos_req);
1080 spin_lock_bh(&hi->lock);
1081 cs_hsi_free_data(hi);
1082 cs_free_cmds(hi);
1083 spin_unlock_bh(&hi->lock);
1084 kfree(hi);
1192 err = cs_hsi_command(csdata->hi, data);
1209 state = cs_hsi_get_state(csdata->hi);
1228 cs_hsi_set_wakeline(csdata->hi, !!state);
1247 r = cs_hsi_buf_config(csdata->hi, &buf_cfg);
1295 ret = cs_hsi_start(&cs_char_data.hi, cs_char_data.cl, p, CS_MMAP_SIZE);
1338 cs_hsi_stop(csdata->hi);
1340 csdata->hi = NULL;
1378 cs_char_data.hi = NULL;
1407 struct cs_hsi_iface *hi;
1412 hi = cs_char_data.hi;
1413 cs_char_data.hi = NULL;
1415 if (hi)
1416 cs_hsi_stop(hi);