Lines Matching defs:job

76  * received by the AIO job so far.
82 static void t4_aio_cancel_active(struct kaiocb *job);
83 static void t4_aio_cancel_queued(struct kaiocb *job);
164 ddp_complete_one(struct kaiocb *job, int error)
169 * If this job had copied data out of the socket buffer before
173 copied = job->aio_received;
175 aio_complete(job, copied, 0);
177 aio_complete(job, -1, error);
275 if (db->job) {
282 if (!aio_clear_cancel_function(db->job))
283 ddp_complete_one(db->job, 0);
285 db->job = NULL;
356 MPASS(toep->ddp.db[i].job == NULL);
381 KASSERT(toep->ddp.db[db_idx ^ 1].job == NULL,
401 db->job = NULL;
467 struct kaiocb *job;
505 job = db->job;
506 copied = job->aio_received;
508 if (placed > job->uaiocb.aio_nbytes - copied)
509 placed = job->uaiocb.aio_nbytes - copied;
511 job->msgrcv = 1;
515 if (!aio_clear_cancel_function(job)) {
521 job->aio_received += placed;
525 __func__, job, copied, placed);
527 aio_complete(job, copied + placed, 0);
528 } else if (aio_set_cancel_function(job, t4_aio_cancel_queued)) {
529 TAILQ_INSERT_HEAD(&toep->ddp.aiojobq, job, list);
532 aio_cancel(job);
648 struct kaiocb *job;
665 job = db->job;
674 if (aio_clear_cancel_function(job))
675 ddp_complete_one(job, ECONNRESET);
724 job->msgrcv = 1;
729 * Update the job's length but defer completion to the
732 job->aio_received += len;
734 } else if (!aio_clear_cancel_function(job)) {
739 job->aio_received += len;
741 copied = job->aio_received;
745 __func__, toep->tid, job, copied, len);
747 aio_complete(job, copied + len, 0);
1023 struct kaiocb *job;
1046 * handle_ddp_data() should leave the job around until
1050 MPASS(db->job != NULL);
1063 job = db->job;
1064 copied = job->aio_received;
1066 CTR2(KTR_CXGBE, "%s: cancelling %p", __func__, job);
1067 aio_cancel(job);
1070 __func__, job, copied);
1071 aio_complete(job, copied, 0);
1095 struct kaiocb *job;
1137 job = db->job;
1138 copied = job->aio_received;
1140 if (placed > job->uaiocb.aio_nbytes - copied)
1141 placed = job->uaiocb.aio_nbytes - copied;
1143 job->msgrcv = 1;
1147 if (!aio_clear_cancel_function(job)) {
1153 job->aio_received += placed;
1157 aio_complete(job, copied + placed, 0);
2165 hold_aio(struct toepcb *toep, struct kaiocb *job, struct pageset **pps)
2180 vm = job->userproc->p_vmspace;
2182 start = (uintptr_t)job->uaiocb.aio_buf;
2184 end = round_page(start + job->uaiocb.aio_nbytes);
2199 __func__, toep->tid, (unsigned long)job->uaiocb.aio_nbytes,
2201 job->uaiocb.aio_nbytes = end - (start + pgoff);
2213 job->uaiocb.aio_nbytes) == 0) {
2256 ps->len = job->uaiocb.aio_nbytes;
2261 CTR5(KTR_CXGBE, "%s: tid %d, new pageset %p for job %p, npages %d",
2262 __func__, toep->tid, ps, job, ps->npages);
2270 struct kaiocb *job;
2275 job = TAILQ_FIRST(&toep->ddp.aiojobq);
2276 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list);
2278 if (aio_clear_cancel_function(job))
2279 ddp_complete_one(job, error);
2284 aio_ddp_cancel_one(struct kaiocb *job)
2289 * If this job had copied data out of the socket buffer before
2293 copied = job->aio_received;
2295 aio_complete(job, copied, 0);
2297 aio_cancel(job);
2301 * Called when the main loop wants to requeue a job to retry it later.
2302 * Deals with the race of the job being cancelled while it was being
2306 aio_ddp_requeue_one(struct toepcb *toep, struct kaiocb *job)
2311 aio_set_cancel_function(job, t4_aio_cancel_queued)) {
2312 TAILQ_INSERT_HEAD(&toep->ddp.aiojobq, job, list);
2315 aio_ddp_cancel_one(job);
2325 struct kaiocb *job;
2348 job = TAILQ_FIRST(&toep->ddp.aiojobq);
2349 so = job->fd_file->f_data;
2367 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list);
2368 if (!aio_clear_cancel_function(job)) {
2374 * If this job has previously copied some data, report
2378 copied = job->aio_received;
2381 aio_complete(job, copied, 0);
2387 aio_complete(job, -1, error);
2435 /* Take the next job to prep it for DDP. */
2437 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list);
2438 if (!aio_clear_cancel_function(job))
2440 toep->ddp.queueing = job;
2443 error = hold_aio(toep, job, &ps);
2445 ddp_complete_one(job, error);
2452 copied = job->aio_received;
2456 aio_complete(job, copied, 0);
2465 aio_complete(job, -1, error);
2479 aio_ddp_requeue_one(toep, job);
2483 ddp_complete_one(job, 0);
2502 offset = ps->offset + job->aio_received;
2503 MPASS(job->aio_received <= job->uaiocb.aio_nbytes);
2504 resid = job->uaiocb.aio_nbytes - job->aio_received;
2538 job->aio_received += copied;
2539 job->msgrcv = 1;
2540 copied = job->aio_received;
2545 * the AIO job should keep 'sb' and 'inp' stable.
2572 aio_complete(job, copied, 0);
2585 aio_ddp_requeue_one(toep, job);
2602 aio_ddp_requeue_one(toep, job);
2614 if (toep->ddp.db[0].job == NULL) {
2617 MPASS(toep->ddp.db[1].job == NULL);
2652 * end, the AIO job holds a reference on this end of the socket
2654 * after the job is completed.
2657 job->aio_received, ddp_flags, ddp_flags_mask);
2660 aio_ddp_requeue_one(toep, job);
2675 if (!aio_set_cancel_function(job, t4_aio_cancel_active)) {
2678 aio_ddp_cancel_one(job);
2686 toep->tid, job, db_idx, ddp_flags, ddp_flags_mask);
2692 db->job = job;
2732 t4_aio_cancel_active(struct kaiocb *job)
2734 struct socket *so = job->fd_file->f_data;
2742 if (aio_cancel_cleared(job)) {
2744 aio_ddp_cancel_one(job);
2749 if (toep->ddp.db[i].job == job) {
2750 /* Should only ever get one cancel request for a job. */
2765 __func__, job);
2773 t4_aio_cancel_queued(struct kaiocb *job)
2775 struct socket *so = job->fd_file->f_data;
2780 if (!aio_cancel_cleared(job)) {
2781 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list);
2786 CTR2(KTR_CXGBE, "%s: request %p cancelled", __func__, job);
2789 aio_ddp_cancel_one(job);
2793 t4_aio_queue_ddp(struct socket *so, struct kaiocb *job)
2800 if (job->uaiocb.aio_lio_opcode != LIO_READ)
2830 CTR3(KTR_CXGBE, "%s: queueing %p for tid %u", __func__, job, toep->tid);
2832 if (!aio_set_cancel_function(job, t4_aio_cancel_queued))
2833 panic("new job was cancelled");
2834 TAILQ_INSERT_TAIL(&toep->ddp.aiojobq, job, list);