Lines Matching refs:ch

188  * ch	:	The channel, the error belongs to.
191 void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg)
195 CTCM_FUNTAIL, ch->id, msg, rc);
199 ch->id);
200 fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch);
204 ch->id);
205 fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch);
210 fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch);
248 struct channel *ch = arg;
249 struct net_device *dev = ch->netdev;
257 CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
259 duration = done_stamp - ch->prof.send_stamp;
260 if (duration > ch->prof.tx_time)
261 ch->prof.tx_time = duration;
263 if (ch->irb->scsw.cmd.count != 0)
266 CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count);
267 fsm_deltimer(&ch->timer);
268 while ((skb = skb_dequeue(&ch->io_queue))) {
278 spin_lock(&ch->collect_lock);
279 clear_normalized_cda(&ch->ccw[4]);
280 if (ch->collect_len > 0) {
283 if (ctcm_checkalloc_buffer(ch)) {
284 spin_unlock(&ch->collect_lock);
287 ch->trans_skb->data = ch->trans_skb_data;
288 skb_reset_tail_pointer(ch->trans_skb);
289 ch->trans_skb->len = 0;
290 if (ch->prof.maxmulti < (ch->collect_len + 2))
291 ch->prof.maxmulti = ch->collect_len + 2;
292 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
293 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
294 *((__u16 *)skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
296 while ((skb = skb_dequeue(&ch->collect_queue))) {
298 skb_put(ch->trans_skb, skb->len), skb->len);
305 ch->collect_len = 0;
306 spin_unlock(&ch->collect_lock);
307 ch->ccw[1].count = ch->trans_skb->len;
308 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
309 ch->prof.send_stamp = jiffies;
310 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0);
311 ch->prof.doios_multi++;
315 fsm_deltimer(&ch->timer);
316 ctcm_ccw_check_rc(ch, rc, "chained TX");
319 spin_unlock(&ch->collect_lock);
336 struct channel *ch = arg;
337 struct net_device *dev = ch->netdev;
340 CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
342 fsm_deltimer(&ch->timer);
344 fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev);
357 struct channel *ch = arg;
358 struct net_device *dev = ch->netdev;
360 int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
361 struct sk_buff *skb = ch->trans_skb;
366 fsm_deltimer(&ch->timer);
375 if (len > ch->max_bufsize) {
378 CTCM_FUNTAIL, dev->name, len, ch->max_bufsize);
387 switch (ch->protocol) {
410 ctcm_unpack_skb(ch, skb);
413 skb->data = ch->trans_skb_data;
416 if (ctcm_checkalloc_buffer(ch))
418 ch->ccw[1].count = ch->max_bufsize;
419 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0);
421 ctcm_ccw_check_rc(ch, rc, "normal RX");
434 struct channel *ch = arg;
439 CTCM_FUNTAIL, ch->id, fsmstate);
441 ch->sense_rc = 0; /* reset unit check report control */
445 CTCM_FUNTAIL, ch->id);
446 fsm_deltimer(&ch->timer);
447 if (ctcm_checkalloc_buffer(ch))
450 (ch->protocol == CTCM_PROTO_OS390)) {
452 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
453 *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
454 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC,
455 CTC_EVENT_TIMER, ch);
458 struct net_device *dev = ch->netdev;
470 if ((CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) ||
471 (ch->protocol != CTCM_PROTO_S390))
472 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
474 *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
475 ch->ccw[1].count = 2; /* Transfer only length */
477 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
479 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0);
481 fsm_deltimer(&ch->timer);
483 ctcm_ccw_check_rc(ch, rc, "init IO");
492 if ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) &&
493 (ch->protocol == CTCM_PROTO_S390)) {
494 struct net_device *dev = ch->netdev;
511 struct channel *ch = arg;
512 struct net_device *dev = ch->netdev;
517 fsm_deltimer(&ch->timer);
518 buflen = *((__u16 *)ch->trans_skb->data);
523 if (ctcm_checkalloc_buffer(ch))
525 ch->ccw[1].count = ch->max_bufsize;
527 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0);
530 ctcm_ccw_check_rc(ch, rc, "initial RX");
550 struct channel *ch = arg;
555 fsm_deltimer(&ch->timer);
556 if (IS_MPC(ch)) {
558 CTCM_PR_DEBUG("enter %s: cp=%i ch=0x%p id=%s\n",
559 __func__, smp_processor_id(), ch, ch->id);
561 fsm_addtimer(&ch->timer, timeout, CTC_EVENT_TIMER, ch);
563 CTCM_CCW_DUMP((char *)&ch->ccw[6], sizeof(struct ccw1) * 2);
566 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
570 rc = ccw_device_start(ch->cdev, &ch->ccw[6], 0, 0xff, 0);
572 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
574 fsm_deltimer(&ch->timer);
576 ctcm_ccw_check_rc(ch, rc, "set Mode");
578 ch->retry = 0;
590 struct channel *ch = arg;
595 CTCM_FUNTAIL, ch->id,
596 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX");
598 if (ch->trans_skb != NULL) {
599 clear_normalized_cda(&ch->ccw[1]);
600 dev_kfree_skb(ch->trans_skb);
601 ch->trans_skb = NULL;
603 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
604 ch->ccw[1].cmd_code = CCW_CMD_READ;
605 ch->ccw[1].flags = CCW_FLAG_SLI;
606 ch->ccw[1].count = 0;
608 ch->ccw[1].cmd_code = CCW_CMD_WRITE;
609 ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
610 ch->ccw[1].count = 0;
612 if (ctcm_checkalloc_buffer(ch)) {
616 CTCM_FUNTAIL, ch->id,
617 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
620 ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
621 ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
622 ch->ccw[0].count = 0;
623 ch->ccw[0].cda = 0;
624 ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */
625 ch->ccw[2].flags = CCW_FLAG_SLI;
626 ch->ccw[2].count = 0;
627 ch->ccw[2].cda = 0;
628 memcpy(&ch->ccw[3], &ch->ccw[0], sizeof(struct ccw1) * 3);
629 ch->ccw[4].cda = 0;
630 ch->ccw[4].flags &= ~CCW_FLAG_IDA;
633 fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
634 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
635 rc = ccw_device_halt(ch->cdev, 0);
636 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
639 fsm_deltimer(&ch->timer);
640 ctcm_ccw_check_rc(ch, rc, "initial HaltIO");
653 struct channel *ch = arg;
658 fsm_deltimer(&ch->timer);
659 if (IS_MPC(ch))
660 fsm_deltimer(&ch->sweep_timer);
662 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
665 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
670 rc = ccw_device_halt(ch->cdev, 0);
673 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
677 fsm_deltimer(&ch->timer);
680 ctcm_ccw_check_rc(ch, rc, (char *)__func__);
691 * ch The channel to operate on.
694 struct channel *ch)
696 struct net_device *dev = ch->netdev;
701 CTCM_FUNTAIL, dev->name, ch->id, state);
703 fsm_deltimer(&ch->timer);
704 if (IS_MPC(ch))
705 fsm_deltimer(&ch->sweep_timer);
708 if (state == CTC_STATE_STOPPED && ch->trans_skb != NULL) {
709 clear_normalized_cda(&ch->ccw[1]);
710 dev_kfree_skb_any(ch->trans_skb);
711 ch->trans_skb = NULL;
714 ch->th_seg = 0x00;
715 ch->th_seq_num = 0x00;
716 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
717 skb_queue_purge(&ch->io_queue);
720 ctcm_purge_skb_queue(&ch->io_queue);
721 if (IS_MPC(ch))
722 ctcm_purge_skb_queue(&ch->sweep_queue);
723 spin_lock(&ch->collect_lock);
724 ctcm_purge_skb_queue(&ch->collect_queue);
725 ch->collect_len = 0;
726 spin_unlock(&ch->collect_lock);
780 struct channel *ch = arg;
781 struct net_device *dev = ch->netdev;
793 fsm_deltimer(&ch->timer);
794 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
795 if (!IS_MPC(ch) &&
796 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) {
797 int rc = ccw_device_halt(ch->cdev, 0);
799 ctcm_ccw_check_rc(ch, rc,
808 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX",
811 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
829 struct channel *ch = arg;
830 struct net_device *dev = ch->netdev;
837 CTCM_FUNTAIL, ch->id, event, dev->name);
839 fsm_deltimer(&ch->timer);
841 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
845 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
849 rc = ccw_device_halt(ch->cdev, 0);
851 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
854 fsm_deltimer(&ch->timer);
857 ctcm_ccw_check_rc(ch, rc, "HaltIO in ctcm_chx_restart");
871 struct channel *ch = arg;
872 struct net_device *dev = ch->netdev;
878 fsm_deltimer(&ch->timer);
879 if (ch->retry++ < 3)
887 "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id,
906 struct channel *ch = arg;
907 struct net_device *dev = ch->netdev;
912 CTCM_FUNTAIL, dev->name, ch->id);
926 struct channel *ch = arg;
928 struct net_device *dev = ch->netdev;
934 fsm_deltimer(&ch->timer);
945 ccw_device_halt(ch->cdev, 0);
958 struct channel *ch = arg;
959 struct net_device *dev = ch->netdev;
963 fsm_deltimer(&ch->timer);
964 if (ch->retry++ < 3)
972 "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id,
990 struct channel *ch = arg;
991 struct net_device *dev = ch->netdev;
995 CTCM_PR_DEBUG("Enter: %s: cp=%i ch=0x%p id=%s\n",
996 __func__, smp_processor_id(), ch, ch->id);
998 fsm_deltimer(&ch->timer);
999 if (ch->retry++ > 3) {
1003 CTCM_FUNTAIL, ch->id);
1014 CTCM_FUNTAIL, ch->id, ch->retry);
1015 skb = skb_peek(&ch->io_queue);
1019 clear_normalized_cda(&ch->ccw[4]);
1020 ch->ccw[4].count = skb->len;
1021 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
1024 CTCM_FUNTAIL, ch->id);
1029 fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
1031 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1036 ctcmpc_dumpit((char *)&ch->ccw[3],
1039 rc = ccw_device_start(ch->cdev, &ch->ccw[3], 0, 0xff, 0);
1041 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
1044 fsm_deltimer(&ch->timer);
1045 ctcm_ccw_check_rc(ch, rc, "TX in chx_txretry");
1046 ctcm_purge_skb_queue(&ch->io_queue);
1062 struct channel *ch = arg;
1063 struct net_device *dev = ch->netdev;
1065 int rd = CHANNEL_DIRECTION(ch->flags);
1067 fsm_deltimer(&ch->timer);
1070 CTCM_FUNTAIL, ch->id, rd == CTCM_READ ? "RX" : "TX");
1072 if (IS_MPC(ch)) {
1212 struct channel *ch = arg;
1213 struct net_device *dev = ch->netdev;
1230 duration = done_stamp - ch->prof.send_stamp;
1231 if (duration > ch->prof.tx_time)
1232 ch->prof.tx_time = duration;
1234 if (ch->irb->scsw.cmd.count != 0)
1237 CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count);
1238 fsm_deltimer(&ch->timer);
1239 while ((skb = skb_dequeue(&ch->io_queue))) {
1249 spin_lock(&ch->collect_lock);
1250 clear_normalized_cda(&ch->ccw[4]);
1251 if ((ch->collect_len <= 0) || (grp->in_sweep != 0)) {
1252 spin_unlock(&ch->collect_lock);
1257 if (ctcm_checkalloc_buffer(ch)) {
1258 spin_unlock(&ch->collect_lock);
1261 ch->trans_skb->data = ch->trans_skb_data;
1262 skb_reset_tail_pointer(ch->trans_skb);
1263 ch->trans_skb->len = 0;
1264 if (ch->prof.maxmulti < (ch->collect_len + TH_HEADER_LENGTH))
1265 ch->prof.maxmulti = ch->collect_len + TH_HEADER_LENGTH;
1266 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
1267 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
1276 while ((skb = skb_dequeue(&ch->collect_queue))) {
1277 skb_put_data(ch->trans_skb, skb->data, skb->len);
1279 (skb_tail_pointer(ch->trans_skb) - skb->len);
1287 __func__, ch->trans_skb->len);
1292 ch->collect_len -= skb->len;
1298 peekskb = skb_peek(&ch->collect_queue);
1307 header = skb_push(ch->trans_skb, TH_HEADER_LENGTH);
1311 ch->th_seq_num++;
1312 header->th_seq_num = ch->th_seq_num;
1315 __func__, ch->th_seq_num);
1318 __func__, ch->trans_skb->len);
1321 CTCM_D3_DUMP((char *)ch->trans_skb->data,
1322 min_t(int, ch->trans_skb->len, 50));
1324 spin_unlock(&ch->collect_lock);
1325 clear_normalized_cda(&ch->ccw[1]);
1328 (void *)(u64)dma32_to_u32(ch->ccw[1].cda),
1329 ch->trans_skb->data);
1330 ch->ccw[1].count = ch->max_bufsize;
1332 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
1333 dev_kfree_skb_any(ch->trans_skb);
1334 ch->trans_skb = NULL;
1337 CTCM_FUNTAIL, ch->id);
1343 (void *)(u64)dma32_to_u32(ch->ccw[1].cda),
1344 ch->trans_skb->data);
1346 ch->ccw[1].count = ch->trans_skb->len;
1347 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
1348 ch->prof.send_stamp = jiffies;
1350 ctcmpc_dumpit((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
1351 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0);
1352 ch->prof.doios_multi++;
1356 fsm_deltimer(&ch->timer);
1357 ctcm_ccw_check_rc(ch, rc, "chained TX");
1374 struct channel *ch = arg;
1375 struct net_device *dev = ch->netdev;
1378 struct sk_buff *skb = ch->trans_skb;
1381 int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
1385 ch->id, ch->max_bufsize, len);
1386 fsm_deltimer(&ch->timer);
1405 new_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC);
1422 skb_queue_tail(&ch->io_queue, new_skb);
1423 tasklet_schedule(&ch->ch_tasklet);
1427 skb_queue_tail(&ch->io_queue, new_skb);
1428 tasklet_hi_schedule(&ch->ch_tasklet);
1438 if (ctcm_checkalloc_buffer(ch))
1440 ch->trans_skb->data = ch->trans_skb_data;
1441 skb_reset_tail_pointer(ch->trans_skb);
1442 ch->trans_skb->len = 0;
1443 ch->ccw[1].count = ch->max_bufsize;
1445 ctcmpc_dumpit((char *)&ch->ccw[0],
1450 get_ccwdev_lock(ch->cdev), saveflags);
1451 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0);
1454 get_ccwdev_lock(ch->cdev), saveflags);
1456 ctcm_ccw_check_rc(ch, rc, "normal RX");
1462 CTCM_PR_DEBUG("Exit %s: %s, ch=0x%p, id=%s\n",
1463 __func__, dev->name, ch, ch->id);
1476 struct channel *ch = arg;
1477 struct net_device *dev = ch->netdev;
1481 CTCM_PR_DEBUG("Enter %s: id=%s, ch=0x%p\n",
1482 __func__, ch->id, ch);
1486 CTCM_FUNTAIL, ch->id, fsm_getstate(fi),
1487 fsm_getstate(gptr->fsm), ch->protocol);
1492 fsm_deltimer(&ch->timer);
1493 if (ctcm_checkalloc_buffer(ch))
1499 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
1510 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
1514 CTCM_PR_DEBUG("Exit %s: id=%s, ch=0x%p\n",
1515 __func__, ch->id, ch);
1530 struct channel *ch = arg;
1531 struct net_device *dev = ch->netdev;
1537 fsm_deltimer(&ch->timer);
1539 __func__, ch->id, dev->name, smp_processor_id(),
1548 if (ctcm_checkalloc_buffer(ch))
1550 ch->trans_skb->data = ch->trans_skb_data;
1551 skb_reset_tail_pointer(ch->trans_skb);
1552 ch->trans_skb->len = 0;
1553 ch->ccw[1].count = ch->max_bufsize;
1554 CTCM_CCW_DUMP((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
1557 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1558 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0);
1561 get_ccwdev_lock(ch->cdev), saveflags);
1564 ctcm_ccw_check_rc(ch, rc, "initial RX");
1584 struct channel *ch = arg;
1585 struct net_device *dev = ch->netdev;
1589 CTCM_PR_DEBUG("%s(%s): %s(ch=0x%p), cp=%i, ChStat:%s, GrpStat:%s\n",
1590 __func__, dev->name, ch->id, ch, smp_processor_id(),
1591 fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm));
1596 if (!ch->in_mpcgroup)
1598 if (fsm_getstate(ch->fsm) == CH_XID0_PENDING) {
1603 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
1605 } else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1)
1607 fsm_newstate(ch->fsm, CH_XID7_PENDING1);
1612 /* attn rcvd before xid0 processed on ch
1614 if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1)
1615 fsm_newstate(ch->fsm, CH_XID7_PENDING1);
1621 switch (fsm_getstate(ch->fsm)) {
1623 fsm_newstate(ch->fsm, CH_XID7_PENDING1);
1626 fsm_newstate(ch->fsm, CH_XID7_PENDING3);
1643 struct channel *ch = arg;
1644 struct net_device *dev = ch->netdev;
1649 __func__, dev->name, ch->id,
1650 fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm));
1652 fsm_deltimer(&ch->timer);
1677 if (fsm_getstate(ch->fsm) == CH_XID0_INPROGRESS) {
1678 fsm_newstate(ch->fsm, CH_XID0_PENDING) ;
1711 if (ch->in_mpcgroup)
1712 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
1716 CTCM_FUNTAIL, dev->name, ch->id);
1729 struct channel *ch = arg;
1730 struct net_device *dev = ch->netdev;
1734 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
1756 CTCM_PR_DEBUG("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n",
2066 struct channel *ch = priv->channel[direction];
2067 fsm_event(ch->fsm, CTC_EVENT_START, ch);
2088 struct channel *ch = priv->channel[direction];
2089 fsm_event(ch->fsm, CTC_EVENT_STOP, ch);
2090 ch->th_seq_num = 0x00;
2092 __func__, ch->th_seq_num);