Lines Matching refs:irq_ptr

125 		nr += q->irq_ptr->nr_input_qs;
127 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
138 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x",
143 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
149 q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr,
150 q->first_to_check, count, q->irq_ptr->int_parm);
176 nr += q->irq_ptr->nr_input_qs;
178 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
188 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
195 q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr,
196 q->first_to_check, count, q->irq_ptr->int_parm);
271 static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
276 for_each_input_queue(irq_ptr, q, i)
279 for_each_output_queue(irq_ptr, q, i)
287 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
291 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
295 schid = q->irq_ptr->sch_token;
326 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
339 schid = q->irq_ptr->sch_token;
357 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
359 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
366 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
370 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
374 schid = q->irq_ptr->sch_token;
387 if (qdio_need_siga_sync(q->irq_ptr))
418 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start);
454 if (qdio_need_siga_sync(q->irq_ptr))
463 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr,
469 if (q->irq_ptr->perf_stat_enabled)
473 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in err:%1d %02x", q->nr,
481 if (q->irq_ptr->perf_stat_enabled)
485 if (q->irq_ptr->perf_stat_enabled)
487 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
494 dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
533 if (qdio_need_siga_sync(q->irq_ptr))
556 if (qdio_need_siga_sync(q->irq_ptr))
569 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
573 if (q->irq_ptr->perf_stat_enabled)
577 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out error:%1d %02x",
583 if (q->irq_ptr->perf_stat_enabled)
588 if (q->irq_ptr->perf_stat_enabled)
590 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
598 dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
636 if (!qdio_need_siga_out(q->irq_ptr))
639 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
656 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
673 static inline void qdio_set_state(struct qdio_irq *irq_ptr,
676 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
678 irq_ptr->state = state;
682 static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
685 DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
692 static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
694 if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
697 qdio_deliver_irq(irq_ptr);
698 irq_ptr->last_data_irq_time = S390_lowcore.int_clock;
701 static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
707 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
712 if (irq_ptr->nr_input_qs)
713 first_to_check = irq_ptr->input_qs[0]->first_to_check;
715 irq_ptr->error_handler(irq_ptr->cdev, QDIO_ERROR_ACTIVATE, 0,
716 first_to_check, 0, irq_ptr->int_parm);
717 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
725 static int qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
728 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
738 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
742 DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
744 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
752 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
756 if (!intparm || !irq_ptr) {
762 if (irq_ptr->perf_stat_enabled)
763 irq_ptr->perf_stat.qdio_int++;
766 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
767 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
771 qdio_irq_check_sense(irq_ptr, irb);
777 switch (irq_ptr->state) {
779 rc = qdio_establish_handle_irq(irq_ptr, cstat, dstat, dcc);
782 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
787 qdio_int_handler_pci(irq_ptr);
791 qdio_handle_activate_check(irq_ptr, intparm, cstat,
803 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qint retry");
804 rc = ccw_device_start(cdev, irq_ptr->ccw, intparm, 0, 0);
807 DBF_ERROR("%4x RETRY ERR", irq_ptr->schid.sch_no);
809 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
874 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
878 if (!irq_ptr)
885 mutex_lock(&irq_ptr->setup_mutex);
890 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
891 mutex_unlock(&irq_ptr->setup_mutex);
898 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
900 qdio_shutdown_debug_entries(irq_ptr);
902 rc = qdio_cancel_ccw(irq_ptr, how);
903 qdio_shutdown_thinint(irq_ptr);
904 qdio_shutdown_irq(irq_ptr);
906 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
907 mutex_unlock(&irq_ptr->setup_mutex);
920 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
923 if (!irq_ptr)
928 DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
929 mutex_lock(&irq_ptr->setup_mutex);
931 irq_ptr->debug_area = NULL;
933 mutex_unlock(&irq_ptr->setup_mutex);
935 qdio_free_queues(irq_ptr);
936 free_page((unsigned long) irq_ptr->qdr);
937 free_page(irq_ptr->chsc_page);
938 kfree(irq_ptr->ccw);
939 free_page((unsigned long) irq_ptr);
954 struct qdio_irq *irq_ptr;
964 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL);
965 if (!irq_ptr)
968 irq_ptr->ccw = kmalloc(sizeof(*irq_ptr->ccw), GFP_KERNEL | GFP_DMA);
969 if (!irq_ptr->ccw)
972 /* kmemleak doesn't scan the page-allocated irq_ptr: */
973 kmemleak_not_leak(irq_ptr->ccw);
975 irq_ptr->cdev = cdev;
976 mutex_init(&irq_ptr->setup_mutex);
977 if (qdio_allocate_dbf(irq_ptr))
980 DBF_DEV_EVENT(DBF_ERR, irq_ptr, "alloc niq:%1u noq:%1u", no_input_qs,
989 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
990 if (!irq_ptr->chsc_page)
994 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
995 if (!irq_ptr->qdr)
998 rc = qdio_allocate_qs(irq_ptr, no_input_qs, no_output_qs);
1002 cdev->private->qdio_data = irq_ptr;
1003 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1007 free_page((unsigned long) irq_ptr->qdr);
1009 free_page(irq_ptr->chsc_page);
1012 kfree(irq_ptr->ccw);
1014 free_page((unsigned long) irq_ptr);
1043 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1052 if (!irq_ptr)
1055 if (init_data->no_input_qs > irq_ptr->max_input_qs ||
1056 init_data->no_output_qs > irq_ptr->max_output_qs)
1079 mutex_lock(&irq_ptr->setup_mutex);
1080 qdio_trace_init_data(irq_ptr, init_data);
1081 qdio_setup_irq(irq_ptr, init_data);
1083 rc = qdio_establish_thinint(irq_ptr);
1088 irq_ptr->ccw->cmd_code = ciw->cmd;
1089 irq_ptr->ccw->flags = CCW_FLAG_SLI;
1090 irq_ptr->ccw->count = ciw->count;
1091 irq_ptr->ccw->cda = virt_to_dma32(irq_ptr->qdr);
1096 rc = ccw_device_start(cdev, irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1099 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1105 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1106 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1112 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1117 qdio_setup_ssqd_info(irq_ptr);
1120 qdio_init_buf_states(irq_ptr);
1122 mutex_unlock(&irq_ptr->setup_mutex);
1123 qdio_print_subchannel_info(irq_ptr);
1124 qdio_setup_debug_entries(irq_ptr);
1128 qdio_cancel_ccw(irq_ptr, QDIO_FLAG_CLEANUP_USING_CLEAR);
1131 qdio_shutdown_thinint(irq_ptr);
1133 qdio_shutdown_irq(irq_ptr);
1134 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1135 mutex_unlock(&irq_ptr->setup_mutex);
1146 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1154 if (!irq_ptr)
1163 mutex_lock(&irq_ptr->setup_mutex);
1164 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1169 irq_ptr->ccw->cmd_code = ciw->cmd;
1170 irq_ptr->ccw->flags = CCW_FLAG_SLI;
1171 irq_ptr->ccw->count = ciw->count;
1172 irq_ptr->ccw->cda = 0;
1177 rc = ccw_device_start(cdev, irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1181 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1189 switch (irq_ptr->state) {
1195 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1199 mutex_unlock(&irq_ptr->setup_mutex);
1227 if (qdio_need_siga_in(q->irq_ptr))
1243 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1248 if (!irq_ptr)
1251 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "addi b:%02x c:%02x", bufnr, count);
1253 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1258 return handle_inbound(irq_ptr->input_qs[q_nr], bufnr, count);
1288 } else if (qdio_need_siga_sync(q->irq_ptr)) {
1314 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1319 if (!irq_ptr)
1322 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "addo b:%02x c:%02x", bufnr, count);
1324 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1329 return handle_outbound(irq_ptr->output_qs[q_nr], bufnr, count, aob);
1344 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1347 if (!irq_ptr)
1350 for_each_input_queue(irq_ptr, q, i)
1353 clear_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
1359 if (test_nonshared_ind(irq_ptr))
1362 for_each_input_queue(irq_ptr, q, i) {
1370 if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
1388 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1390 if (!irq_ptr)
1393 if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))