Lines Matching defs:cqr

652  * Add profiling information for cqr before execution.
655 struct dasd_ccw_req *cqr,
690 device = cqr->startdev;
709 * Add profiling information for cqr after execution.
767 struct dasd_ccw_req *cqr,
777 device = cqr->startdev;
784 if (!cqr->buildclk || !cqr->startclk ||
785 !cqr->stopclk || !cqr->endclk ||
789 strtime = ((cqr->startclk - cqr->buildclk) >> 12);
790 irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
791 endtime = ((cqr->endclk - cqr->stopclk) >> 12);
792 tottime = ((cqr->endclk - cqr->buildclk) >> 12);
811 cqr->startdev != block->base,
812 cqr->cpmode == 1,
829 cqr->startdev != block->base,
830 cqr->cpmode == 1,
847 cqr->startdev != block->base,
848 cqr->cpmode == 1,
1108 #define dasd_profile_start(block, cqr, req) do {} while (0)
1109 #define dasd_profile_end(block, cqr, req) do {} while (0)
1180 struct dasd_ccw_req *cqr)
1190 if (!cqr)
1191 size += (sizeof(*cqr) + 7L) & -8L;
1198 if (!cqr) {
1199 cqr = (void *) data;
1200 data += (sizeof(*cqr) + 7L) & -8L;
1202 memset(cqr, 0, sizeof(*cqr));
1203 cqr->mem_chunk = chunk;
1205 cqr->cpaddr = data;
1207 memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
1210 cqr->data = data;
1211 memset(cqr->data, 0, datasize);
1213 cqr->magic = magic;
1214 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1216 return cqr;
1224 struct dasd_ccw_req *cqr;
1229 cqr_size = (sizeof(*cqr) + 7L) & -8L;
1237 cqr = dasd_alloc_chunk(&device->ese_chunks, size);
1239 if (!cqr)
1241 memset(cqr, 0, sizeof(*cqr));
1242 data = (char *)cqr + cqr_size;
1243 cqr->cpaddr = NULL;
1245 cqr->cpaddr = data;
1247 memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
1249 cqr->data = NULL;
1251 cqr->data = data;
1252 memset(cqr->data, 0, datasize);
1255 cqr->magic = magic;
1256 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1259 return cqr;
1263 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
1268 dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk);
1274 void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
1279 dasd_free_chunk(&device->ese_chunks, cqr);
1286 * Check discipline magic in cqr.
1288 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
1292 if (cqr == NULL)
1294 device = cqr->startdev;
1295 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
1299 cqr->magic,
1312 int dasd_term_IO(struct dasd_ccw_req *cqr)
1317 /* Check the cqr */
1318 rc = dasd_check_cqr(cqr);
1322 device = (struct dasd_device *) cqr->startdev;
1323 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
1324 rc = ccw_device_clear(device->cdev, (long) cqr);
1327 cqr->status = DASD_CQR_CLEAR_PENDING;
1328 cqr->stopclk = get_tod_clock();
1329 cqr->starttime = 0;
1331 "terminate cqr %p successful",
1332 cqr);
1343 cqr->status = DASD_CQR_CLEARED;
1344 cqr->stopclk = get_tod_clock();
1345 cqr->starttime = 0;
1347 cqr->retries = -1;
1370 int dasd_start_IO(struct dasd_ccw_req *cqr)
1375 /* Check the cqr */
1376 rc = dasd_check_cqr(cqr);
1378 cqr->intrc = rc;
1381 device = (struct dasd_device *) cqr->startdev;
1382 if (((cqr->block &&
1383 test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) ||
1385 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
1387 "because of stolen lock", cqr);
1388 cqr->status = DASD_CQR_ERROR;
1389 cqr->intrc = -EPERM;
1392 if (cqr->retries < 0) {
1395 cqr->status = DASD_CQR_ERROR;
1398 cqr->startclk = get_tod_clock();
1399 cqr->starttime = jiffies;
1400 cqr->retries--;
1401 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
1402 cqr->lpm &= dasd_path_get_opm(device);
1403 if (!cqr->lpm)
1404 cqr->lpm = dasd_path_get_opm(device);
1410 if (cqr->block)
1411 cqr->trkcount = atomic_read(&cqr->block->trkcount);
1413 if (cqr->cpmode == 1) {
1414 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
1415 (long) cqr, cqr->lpm);
1417 rc = ccw_device_start(device->cdev, cqr->cpaddr,
1418 (long) cqr, cqr->lpm, 0);
1422 cqr->status = DASD_CQR_IN_IO;
1436 if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
1439 cqr->lpm);
1440 } else if (cqr->lpm != dasd_path_get_opm(device)) {
1441 cqr->lpm = dasd_path_get_opm(device);
1460 dasd_handle_autoquiesce(device, cqr, DASD_EER_STARTIO);
1477 cqr->intrc = rc;
1527 struct dasd_ccw_req *cqr;
1532 cqr = (struct dasd_ccw_req *) intparm;
1533 if (cqr->status != DASD_CQR_IN_IO) {
1536 "%02x", cqr->status);
1547 if (!cqr->startdev ||
1548 device != cqr->startdev ||
1549 strncmp(cqr->startdev->discipline->ebcname,
1550 (char *) &cqr->magic, 4)) {
1558 cqr->status = DASD_CQR_QUEUED;
1623 struct dasd_ccw_req *cqr, *next, *fcqr;
1632 cqr = (struct dasd_ccw_req *) intparm;
1636 if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) {
1637 device = cqr->startdev;
1638 cqr->status = DASD_CQR_CLEARED;
1660 if (!cqr ||
1663 if (cqr)
1664 memcpy(&cqr->irb, irb, sizeof(*irb));
1683 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
1685 test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
1692 dasd_generic_space_exhaust(device, cqr);
1693 device->discipline->ext_pool_exhaust(device, cqr);
1702 device->discipline->dump_sense(device, cqr, irb);
1703 device->discipline->check_for_device_change(device, cqr, irb);
1717 if (!cqr)
1720 device = (struct dasd_device *) cqr->startdev;
1722 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1728 if (dasd_ese_needs_format(cqr->block, irb)) {
1729 req = dasd_get_callback_data(cqr);
1731 cqr->status = DASD_CQR_ERROR;
1735 device->discipline->ese_read(cqr, irb);
1736 cqr->status = DASD_CQR_SUCCESS;
1737 cqr->stopclk = now;
1742 fcqr = device->discipline->ese_format(device, cqr, irb);
1745 cqr->status = DASD_CQR_ERROR;
1752 cqr->status = DASD_CQR_QUEUED;
1757 cqr->status = DASD_CQR_QUEUED;
1765 if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1767 cqr->status = DASD_CQR_CLEARED;
1775 if (cqr->status != DASD_CQR_IN_IO) {
1777 "status %02x", dev_name(&cdev->dev), cqr->status);
1786 cqr->status = DASD_CQR_SUCCESS;
1787 cqr->stopclk = now;
1789 if (cqr->devlist.next != &device->ccw_queue) {
1790 next = list_entry(cqr->devlist.next,
1798 if (cqr->cpmode && dasd_check_hpf_error(irb) &&
1805 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
1806 cqr->retries > 0) {
1807 if (cqr->lpm == dasd_path_get_opm(device))
1811 cqr->retries);
1812 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
1813 cqr->lpm = dasd_path_get_opm(device);
1814 cqr->status = DASD_CQR_QUEUED;
1815 next = cqr;
1817 cqr->status = DASD_CQR_ERROR;
1863 struct dasd_ccw_req *cqr;
1872 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1873 if (cqr->status == DASD_CQR_QUEUED &&
1874 ref_cqr->block == cqr->block) {
1875 cqr->status = DASD_CQR_CLEARED;
1888 struct dasd_ccw_req *cqr;
1892 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1895 if (cqr->status == DASD_CQR_QUEUED ||
1896 cqr->status == DASD_CQR_IN_IO ||
1897 cqr->status == DASD_CQR_CLEAR_PENDING)
1899 if (cqr->status == DASD_CQR_ERROR) {
1900 __dasd_device_recovery(device, cqr);
1903 list_move_tail(&cqr->devlist, final_queue);
1908 struct dasd_ccw_req *cqr)
1910 switch (cqr->status) {
1912 cqr->status = DASD_CQR_DONE;
1915 cqr->status = DASD_CQR_NEED_ERP;
1918 cqr->status = DASD_CQR_TERMINATED;
1922 "Unexpected CQR status %02x", cqr->status);
1925 if (cqr->callback)
1926 cqr->callback(cqr, cqr->callback_data);
1937 struct dasd_ccw_req *cqr;
1941 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1942 list_del_init(&cqr->devlist);
1943 block = cqr->block;
1945 __dasd_process_cqr(device, cqr);
1948 __dasd_process_cqr(device, cqr);
1958 struct dasd_ccw_req *cqr)
1960 if ((device->default_retries - cqr->retries) >= device->aq_timeouts)
1961 dasd_handle_autoquiesce(device, cqr, DASD_EER_TIMEOUTS);
1970 struct dasd_ccw_req *cqr;
1974 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1975 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
1976 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
1982 cqr->retries++;
1984 if (device->discipline->term_IO(cqr) != 0) {
1988 (cqr->expires / HZ));
1989 cqr->expires += 5*HZ;
1994 (cqr->expires / HZ), cqr->retries);
1996 __dasd_device_check_autoquiesce_timeout(device, cqr);
2004 struct dasd_ccw_req *cqr)
2021 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
2037 struct dasd_ccw_req *cqr;
2042 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
2043 if (cqr->status != DASD_CQR_QUEUED)
2046 if (__dasd_device_is_unusable(device, cqr)) {
2047 cqr->intrc = -EAGAIN;
2048 cqr->status = DASD_CQR_CLEARED;
2053 rc = device->discipline->start_IO(cqr);
2055 dasd_device_set_timer(device, cqr->expires);
2100 struct dasd_ccw_req *cqr, *n;
2107 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
2109 switch (cqr->status) {
2111 rc = device->discipline->term_IO(cqr);
2121 cqr->stopclk = get_tod_clock();
2122 cqr->status = DASD_CQR_CLEARED;
2127 list_move_tail(&cqr->devlist, &flush_queue);
2136 list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
2138 (cqr->status != DASD_CQR_CLEAR_PENDING));
2207 void dasd_add_request_head(struct dasd_ccw_req *cqr)
2212 device = cqr->startdev;
2214 cqr->status = DASD_CQR_QUEUED;
2215 list_add(&cqr->devlist, &device->ccw_queue);
2226 void dasd_add_request_tail(struct dasd_ccw_req *cqr)
2231 device = cqr->startdev;
2233 cqr->status = DASD_CQR_QUEUED;
2234 list_add_tail(&cqr->devlist, &device->ccw_queue);
2244 void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
2246 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
2247 cqr->callback_data = DASD_SLEEPON_END_TAG;
2248 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
2253 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
2258 device = cqr->startdev;
2260 rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
2268 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
2273 if (cqr->status == DASD_CQR_FILLED)
2275 device = cqr->startdev;
2276 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
2277 if (cqr->status == DASD_CQR_TERMINATED) {
2278 device->discipline->handle_terminated_request(cqr);
2281 if (cqr->status == DASD_CQR_NEED_ERP) {
2282 erp_fn = device->discipline->erp_action(cqr);
2283 erp_fn(cqr);
2286 if (cqr->status == DASD_CQR_FAILED)
2287 dasd_log_sense(cqr, &cqr->irb);
2288 if (cqr->refers) {
2289 __dasd_process_erp(device, cqr);
2296 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
2298 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
2299 if (cqr->refers) /* erp is not done yet */
2301 return ((cqr->status != DASD_CQR_DONE) &&
2302 (cqr->status != DASD_CQR_FAILED));
2304 return (cqr->status == DASD_CQR_FILLED);
2312 struct dasd_ccw_req *cqr;
2318 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr);
2319 cqr = list_first_entry(&ccw_queue,
2322 if (__dasd_sleep_on_erp(cqr))
2324 if (cqr->status != DASD_CQR_FILLED) /* could be failed */
2327 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2328 cqr->status = DASD_CQR_FAILED;
2329 cqr->intrc = -EPERM;
2334 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2336 cqr->status = DASD_CQR_FAILED;
2337 cqr->intrc = -ENOLINK;
2345 cqr->status = DASD_CQR_FAILED;
2346 cqr->intrc = -ENODEV;
2353 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
2358 cqr->status = DASD_CQR_FAILED;
2365 if (!cqr->callback)
2366 cqr->callback = dasd_wakeup_cb;
2368 cqr->callback_data = DASD_SLEEPON_START_TAG;
2369 dasd_add_request_tail(cqr);
2372 generic_waitq, _wait_for_wakeup(cqr));
2374 dasd_cancel_req(cqr);
2377 _wait_for_wakeup(cqr));
2378 cqr->status = DASD_CQR_FAILED;
2383 wait_event(generic_waitq, _wait_for_wakeup(cqr));
2401 struct dasd_ccw_req *cqr;
2403 list_for_each_entry(cqr, ccw_queue, blocklist) {
2404 if (cqr->callback_data != DASD_SLEEPON_END_TAG)
2414 struct dasd_ccw_req *cqr, *n;
2419 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
2420 device = cqr->startdev;
2421 if (cqr->status != DASD_CQR_FILLED) /*could be failed*/
2425 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2426 cqr->status = DASD_CQR_FAILED;
2427 cqr->intrc = -EPERM;
2432 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2434 cqr->status = DASD_CQR_FAILED;
2435 cqr->intrc = -EAGAIN;
2444 cqr->status = DASD_CQR_FAILED;
2445 cqr->intrc = rc;
2451 if (!cqr->callback)
2452 cqr->callback = dasd_wakeup_cb;
2453 cqr->callback_data = DASD_SLEEPON_START_TAG;
2454 dasd_add_request_tail(cqr);
2460 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
2467 sense = dasd_get_sense(&cqr->irb);
2469 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags))
2471 if (scsw_cstat(&cqr->irb.scsw) == 0x40 &&
2472 test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags))
2480 if (cqr->startdev != cqr->basedev && !cqr->refers &&
2481 (cqr->status == DASD_CQR_TERMINATED ||
2482 cqr->status == DASD_CQR_NEED_ERP))
2486 if (__dasd_sleep_on_erp(cqr))
2498 int dasd_sleep_on(struct dasd_ccw_req *cqr)
2500 return _dasd_sleep_on(cqr, 0);
2526 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
2528 return _dasd_sleep_on(cqr, 1);
2540 struct dasd_ccw_req *cqr;
2545 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
2546 rc = device->discipline->term_IO(cqr);
2553 cqr->retries++;
2557 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
2562 device = cqr->startdev;
2564 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2565 cqr->status = DASD_CQR_FAILED;
2566 cqr->intrc = -EPERM;
2575 cqr->callback = dasd_wakeup_cb;
2576 cqr->callback_data = DASD_SLEEPON_START_TAG;
2577 cqr->status = DASD_CQR_QUEUED;
2580 * first the terminated cqr needs to be finished
2582 list_add(&cqr->devlist, device->ccw_queue.next);
2589 wait_event(generic_waitq, _wait_for_wakeup(cqr));
2591 if (cqr->status == DASD_CQR_DONE)
2593 else if (cqr->intrc)
2594 rc = cqr->intrc;
2616 static int __dasd_cancel_req(struct dasd_ccw_req *cqr)
2618 struct dasd_device *device = cqr->startdev;
2621 switch (cqr->status) {
2624 cqr->status = DASD_CQR_CLEARED;
2628 rc = device->discipline->term_IO(cqr);
2633 cqr->stopclk = get_tod_clock();
2643 int dasd_cancel_req(struct dasd_ccw_req *cqr)
2645 struct dasd_device *device = cqr->startdev;
2650 rc = __dasd_cancel_req(cqr);
2703 struct dasd_ccw_req *cqr)
2707 if (cqr->status == DASD_CQR_DONE)
2711 erp_fn = device->discipline->erp_postaction(cqr);
2712 erp_fn(cqr);
2715 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
2722 req = (struct request *) cqr->callback_data;
2723 dasd_profile_end(cqr->block, cqr, req);
2725 proc_bytes = cqr->proc_bytes;
2726 status = cqr->block->base->discipline->free_cp(cqr, req);
2730 switch (cqr->intrc) {
2782 struct dasd_ccw_req *cqr;
2790 cqr = list_entry(l, struct dasd_ccw_req, blocklist);
2791 if (cqr->status != DASD_CQR_DONE &&
2792 cqr->status != DASD_CQR_FAILED &&
2793 cqr->status != DASD_CQR_NEED_ERP &&
2794 cqr->status != DASD_CQR_TERMINATED)
2797 if (cqr->status == DASD_CQR_TERMINATED) {
2798 base->discipline->handle_terminated_request(cqr);
2803 if (cqr->status == DASD_CQR_NEED_ERP) {
2804 erp_fn = base->discipline->erp_action(cqr);
2805 if (IS_ERR(erp_fn(cqr)))
2811 if (cqr->status == DASD_CQR_FAILED) {
2812 dasd_log_sense(cqr, &cqr->irb);
2819 if (cqr->status == DASD_CQR_FAILED &&
2820 dasd_handle_autoquiesce(base, cqr, DASD_EER_FATALERROR)) {
2821 cqr->status = DASD_CQR_FILLED;
2822 cqr->retries = 255;
2829 if (cqr->refers) {
2830 __dasd_process_erp(base, cqr);
2835 cqr->endclk = get_tod_clock();
2836 list_move_tail(&cqr->blocklist, final_queue);
2840 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
2842 dasd_schedule_block_bh(cqr->block);
2847 struct dasd_ccw_req *cqr;
2855 list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
2856 if (cqr->status != DASD_CQR_FILLED)
2859 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2860 cqr->status = DASD_CQR_FAILED;
2861 cqr->intrc = -EPERM;
2867 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2869 cqr->status = DASD_CQR_FAILED;
2870 cqr->intrc = -ENOLINK;
2879 if (!cqr->startdev)
2880 cqr->startdev = block->base;
2883 cqr->callback = dasd_return_cqr_cb;
2885 dasd_add_request_tail(cqr);
2899 struct dasd_ccw_req *cqr;
2911 cqr = list_entry(l, struct dasd_ccw_req, blocklist);
2912 dq = cqr->dq;
2914 list_del_init(&cqr->blocklist);
2915 __dasd_cleanup_cqr(cqr);
2929 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
2938 static void _dasd_requeue_request(struct dasd_ccw_req *cqr)
2946 if (cqr->refers)
2948 spin_lock_irq(&cqr->dq->lock);
2949 req = (struct request *) cqr->callback_data;
2951 spin_unlock_irq(&cqr->dq->lock);
2959 struct dasd_ccw_req *cqr, *n;
2966 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
2968 if (cqr->status >= DASD_CQR_QUEUED)
2969 rc = dasd_cancel_req(cqr);
2977 cqr->callback = _dasd_wake_block_flush_cb;
2978 for (i = 0; cqr; cqr = cqr->refers, i++)
2979 list_move_tail(&cqr->blocklist, flush_queue);
2996 struct dasd_ccw_req *cqr, *n;
3006 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
3007 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
3009 if (cqr->refers) {
3011 __dasd_process_erp(block->base, cqr);
3018 spin_lock_irqsave(&cqr->dq->lock, flags);
3019 cqr->endclk = get_tod_clock();
3020 list_del_init(&cqr->blocklist);
3021 __dasd_cleanup_cqr(cqr);
3022 spin_unlock_irqrestore(&cqr->dq->lock, flags);
3057 struct dasd_ccw_req *cqr;
3099 cqr = basedev->discipline->build_cp(basedev, block, req);
3100 if (IS_ERR(cqr)) {
3101 if (PTR_ERR(cqr) == -EBUSY ||
3102 PTR_ERR(cqr) == -ENOMEM ||
3103 PTR_ERR(cqr) == -EAGAIN) {
3109 PTR_ERR(cqr), req);
3117 cqr->callback_data = req;
3118 cqr->status = DASD_CQR_FILLED;
3119 cqr->dq = dq;
3123 list_add_tail(&cqr->blocklist, &block->ccw_queue);
3124 INIT_LIST_HEAD(&cqr->devlist);
3125 dasd_profile_start(block, cqr, req);
3146 struct dasd_ccw_req *cqr;
3150 cqr = blk_mq_rq_to_pdu(req);
3151 if (!cqr)
3154 spin_lock_irqsave(&cqr->dq->lock, flags);
3155 device = cqr->startdev ? cqr->startdev : block->base;
3157 spin_unlock_irqrestore(&cqr->dq->lock, flags);
3161 " dasd_times_out cqr %p status %x",
3162 cqr, cqr->status);
3166 cqr->retries = -1;
3167 cqr->intrc = -ETIMEDOUT;
3168 if (cqr->status >= DASD_CQR_QUEUED) {
3169 rc = __dasd_cancel_req(cqr);
3170 } else if (cqr->status == DASD_CQR_FILLED ||
3171 cqr->status == DASD_CQR_NEED_ERP) {
3172 cqr->status = DASD_CQR_TERMINATED;
3173 } else if (cqr->status == DASD_CQR_IN_ERP) {
3181 if (tmpcqr != cqr)
3183 /* searchcqr is an ERP request for cqr */
3205 spin_unlock_irqrestore(&cqr->dq->lock, flags);
3680 struct dasd_ccw_req *cqr;
3691 list_for_each_entry(cqr, &device->ccw_queue, devlist)
3692 if ((cqr->status == DASD_CQR_IN_IO) ||
3693 (cqr->status == DASD_CQR_CLEAR_PENDING)) {
3694 cqr->status = DASD_CQR_QUEUED;
3695 cqr->retries++;
3839 struct dasd_ccw_req *cqr)
3847 if (cqr->status == DASD_CQR_IN_IO ||
3848 cqr->status == DASD_CQR_CLEAR_PENDING) {
3849 cqr->status = DASD_CQR_QUEUED;
3850 cqr->retries++;
3883 struct dasd_ccw_req *cqr, *n;
3894 list_for_each_entry_safe(cqr, n, &requeue_queue, blocklist) {
3895 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
3897 if (cqr->refers) {
3899 __dasd_process_erp(block->base, cqr);
3906 _dasd_requeue_request(cqr);
3907 list_del_init(&cqr->blocklist);
3908 cqr->block->base->discipline->free_cp(
3909 cqr, (struct request *) cqr->callback_data);
3937 struct dasd_ccw_req *cqr,
3942 dasd_eer_write(device, cqr, reason);
3965 struct dasd_ccw_req *cqr;
3968 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device,
3971 if (IS_ERR(cqr)) {
3974 return cqr;
3977 ccw = cqr->cpaddr;
3979 ccw->cda = virt_to_dma32(cqr->data);
3982 cqr->startdev = device;
3983 cqr->memdev = device;
3984 cqr->expires = 10*HZ;
3985 cqr->retries = 256;
3986 cqr->buildclk = get_tod_clock();
3987 cqr->status = DASD_CQR_FILLED;
3988 return cqr;
3996 struct dasd_ccw_req *cqr;
3998 cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic);
3999 if (IS_ERR(cqr))
4000 return PTR_ERR(cqr);
4002 ret = dasd_sleep_on(cqr);
4004 memcpy(rdc_buffer, cqr->data, rdc_buffer_size);
4005 dasd_sfree_request(cqr, cqr->memdev);