Lines Matching refs:WRITE

66 	/* Total Number of queued bios on READ and WRITE lists */
333 INIT_LIST_HEAD(&sq->queued[WRITE]);
356 for (rw = READ; rw <= WRITE; rw++) {
363 tg->bps[WRITE][LIMIT_MAX] = U64_MAX;
365 tg->iops[WRITE][LIMIT_MAX] = UINT_MAX;
367 tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX;
369 tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX;
424 for (rw = READ; rw <= WRITE; rw++) {
457 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
458 tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) {
479 tg->bps[WRITE][LIMIT_LOW] = 0;
481 tg->iops[WRITE][LIMIT_LOW] = 0;
822 if (tg->service_queue.nr_queued[WRITE])
823 __tg_update_carryover(tg, WRITE);
827 tg->carryover_bytes[READ], tg->carryover_bytes[WRITE],
828 tg->carryover_ios[READ], tg->carryover_ios[WRITE]);
1019 bio = throtl_peek_queued(&sq->queued[WRITE]);
1108 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
1111 tg_dispatch_one_bio(tg, WRITE);
1142 if (sq->nr_queued[READ] || sq->nr_queued[WRITE])
1201 sq->nr_queued[READ] + sq->nr_queued[WRITE],
1202 sq->nr_queued[READ], sq->nr_queued[WRITE]);
1263 for (rw = READ; rw <= WRITE; rw++)
1320 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1321 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
1362 throtl_start_new_slice(tg, WRITE, false);
1453 .private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]),
1465 .private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]),
1515 tg->bps_conf[WRITE][off] == bps_dft &&
1517 tg->iops_conf[WRITE][off] == iops_dft &&
1526 if (tg->bps_conf[WRITE][off] != U64_MAX)
1528 tg->bps_conf[WRITE][off]);
1532 if (tg->iops_conf[WRITE][off] != UINT_MAX)
1534 tg->iops_conf[WRITE][off]);
1584 v[1] = tg->bps_conf[WRITE][index];
1586 v[3] = tg->iops_conf[WRITE][index];
1630 tg->bps_conf[WRITE][index] = v[1];
1632 tg->iops_conf[WRITE][index] = v[3];
1636 tg->bps[WRITE][index] = v[1];
1638 tg->iops[WRITE][index] = v[3];
1642 tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW],
1643 tg->bps_conf[WRITE][LIMIT_MAX]);
1646 tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
1647 tg->iops_conf[WRITE][LIMIT_MAX]);
1653 tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) ||
1657 tg->bps[WRITE][LIMIT_LOW] = 0;
1659 tg->iops[WRITE][LIMIT_LOW] = 0;
1771 if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
1772 wtime = tg->last_low_overflow_time[WRITE];
1794 !parent->bps[WRITE][LIMIT_LOW] &&
1795 !parent->iops[WRITE][LIMIT_LOW])
1846 * cgroup reaches low limit when low limit of READ and WRITE are
1851 throtl_low_limit_reached(tg, WRITE))
2023 if (tg->bps[WRITE][LIMIT_LOW]) {
2024 bps = tg->last_bytes_disp[WRITE] * HZ;
2026 if (bps >= tg->bps[WRITE][LIMIT_LOW])
2027 tg->last_low_overflow_time[WRITE] = now;
2036 if (tg->iops[WRITE][LIMIT_LOW]) {
2037 iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
2038 if (iops >= tg->iops[WRITE][LIMIT_LOW])
2039 tg->last_low_overflow_time[WRITE] = now;
2050 tg->last_bytes_disp[WRITE] = 0;
2052 tg->last_io_disp[WRITE] = 0;
2086 for (rw = READ; rw <= WRITE; rw++) {
2117 for (rw = READ; rw <= WRITE; rw++) {
2145 td->avg_buckets[WRITE][i].latency,
2146 td->avg_buckets[WRITE][i].valid);
2253 sq->nr_queued[READ], sq->nr_queued[WRITE]);
2383 td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) *
2385 if (!td->latency_buckets[WRITE]) {
2406 free_percpu(td->latency_buckets[WRITE]);
2421 free_percpu(q->td->latency_buckets[WRITE]);
2442 td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY;