Lines Matching refs:queue

14  * Device Driver Control Block (DDCB) queue support. Definition of
15 * interrupt handlers for queue support as well as triggering the
40 * Situation (1): Empty queue
82 static int queue_empty(struct ddcb_queue *queue)
84 return queue->ddcb_next == queue->ddcb_act;
87 static int queue_enqueued_ddcbs(struct ddcb_queue *queue)
89 if (queue->ddcb_next >= queue->ddcb_act)
90 return queue->ddcb_next - queue->ddcb_act;
92 return queue->ddcb_max - (queue->ddcb_act - queue->ddcb_next);
95 static int queue_free_ddcbs(struct ddcb_queue *queue)
97 int free_ddcbs = queue->ddcb_max - queue_enqueued_ddcbs(queue) - 1;
106 * Use of the PRIV field in the DDCB for queue debugging:
163 static void print_ddcb_info(struct genwqe_dev *cd, struct ddcb_queue *queue)
174 cd->card_idx, queue->ddcb_act, queue->ddcb_next);
176 pddcb = queue->ddcb_vaddr;
177 for (i = 0; i < queue->ddcb_max; i++) {
180 i == queue->ddcb_act ? '>' : ' ',
237 * This function will also return true if the state of the queue is
252 * @queue: queue this operation should be done on
256 * Start execution of DDCB by tapping or append to queue via NEXT
263 * 2 if DDCB queue is tapped via register/simulation
265 static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue,
282 prev_no = (ddcb_no == 0) ? queue->ddcb_max - 1 : ddcb_no - 1;
283 prev_ddcb = &queue->ddcb_vaddr[prev_no];
304 return RET_DDCB_APPENDED; /* appended to queue */
312 __genwqe_writeq(cd, queue->IO_QUEUE_OFFSET, num); /* start queue */
332 struct ddcb_queue *queue = req->queue;
333 struct ddcb *pddcb = &queue->ddcb_vaddr[req->num];
348 queue->ddcb_max - 1 : ddcb_no - 1;
349 struct ddcb *prev_pddcb = &queue->ddcb_vaddr[prev_no];
359 * genwqe_check_ddcb_queue() - Checks DDCB queue for completed work requests.
361 * @queue: queue to be checked
366 struct ddcb_queue *queue)
372 spin_lock_irqsave(&queue->ddcb_lock, flags);
375 while (!queue_empty(queue) && (ddcbs_finished < queue->ddcb_max)) {
381 pddcb = &queue->ddcb_vaddr[queue->ddcb_act];
390 req = queue->ddcb_req[queue->ddcb_act];
401 * In case of seeing the queue in inconsistent state
402 * we read the errcnts and the queue status to provide
408 u64 ddcb_offs = (u64)pddcb - (u64)queue->ddcb_vaddr;
410 errcnts = __genwqe_readq(cd, queue->IO_QUEUE_ERRCNTS);
411 status = __genwqe_readq(cd, queue->IO_QUEUE_STATUS);
417 queue->ddcb_daddr + ddcb_offs);
420 copy_ddcb_results(req, queue->ddcb_act);
421 queue->ddcb_req[queue->ddcb_act] = NULL; /* take from queue */
442 queue->ddcbs_completed++;
443 queue->ddcbs_in_flight--;
446 processes on the busy queue */
447 wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]);
448 wake_up_interruptible(&queue->busy_waitq);
451 queue->ddcb_act = (queue->ddcb_act + 1) % queue->ddcb_max;
456 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
475 * queue.
481 struct ddcb_queue *queue;
487 queue = req->queue;
488 if (queue == NULL)
492 if (ddcb_no >= queue->ddcb_max)
495 rc = wait_event_interruptible_timeout(queue->ddcb_waitqs[ddcb_no],
506 struct ddcb_queue *queue = req->queue;
514 genwqe_check_ddcb_queue(cd, req->queue);
524 __genwqe_readq(cd, queue->IO_QUEUE_STATUS));
526 pddcb = &queue->ddcb_vaddr[req->num];
529 print_ddcb_info(cd, req->queue);
559 * @queue: DDCB queue
568 struct ddcb_queue *queue,
574 if (queue_free_ddcbs(queue) == 0) /* queue is full */
578 pddcb = &queue->ddcb_vaddr[queue->ddcb_next];
585 *num = queue->ddcb_next; /* internal DDCB number */
586 queue->ddcb_next = (queue->ddcb_next + 1) % queue->ddcb_max;
601 pddcb->seqnum_16 = cpu_to_be16(queue->ddcb_seq++);
625 struct ddcb_queue *queue = req->queue;
638 pddcb = &queue->ddcb_vaddr[req->num];
642 spin_lock_irqsave(&queue->ddcb_lock, flags);
665 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
681 queue->ddcbs_in_flight--;
682 queue->ddcb_req[req->num] = NULL; /* delete from array */
689 * DDCB in the queue. To do that, we must update
697 (queue->ddcb_act == req->num)) {
698 queue->ddcb_act = ((queue->ddcb_act + 1) %
699 queue->ddcb_max);
702 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
707 * If the card is dead and the queue is forced to stop, we
708 * might see this in the queue status register.
710 queue_status = __genwqe_readq(cd, queue->IO_QUEUE_STATUS);
720 print_ddcb_info(cd, req->queue);
759 struct ddcb_queue *queue;
772 queue = req->queue = &cd->queue;
778 genwqe_check_ddcb_queue(cd, queue);
785 spin_lock_irqsave(&queue->ddcb_lock, flags);
787 pddcb = get_next_ddcb(cd, queue, &req->num); /* get ptr and num */
791 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
794 queue->return_on_busy++;
798 queue->wait_on_busy++;
799 rc = wait_event_interruptible(queue->busy_waitq,
800 queue_free_ddcbs(queue) != 0);
809 if (queue->ddcb_req[req->num] != NULL) {
810 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
818 queue->ddcb_req[req->num] = req;
826 * stop the queue in those cases for this command. XDIR = 1
892 enqueue_ddcb(cd, queue, pddcb, req->num);
893 queue->ddcbs_in_flight++;
895 if (queue->ddcbs_in_flight > queue->ddcbs_max_in_flight)
896 queue->ddcbs_max_in_flight = queue->ddcbs_in_flight;
899 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
975 * We use this as condition for our wait-queue code.
981 struct ddcb_queue *queue = &cd->queue;
983 spin_lock_irqsave(&queue->ddcb_lock, flags);
985 if (queue_empty(queue)) { /* empty queue */
986 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
990 pddcb = &queue->ddcb_vaddr[queue->ddcb_act];
992 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
996 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
1005 * queue. This is needed for statistics as well as condition if we want
1012 struct ddcb_queue *queue = &cd->queue;
1014 spin_lock_irqsave(&queue->ddcb_lock, flags);
1015 ddcbs_in_flight += queue->ddcbs_in_flight;
1016 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
1021 static int setup_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue)
1034 queue->ddcbs_in_flight = 0; /* statistics */
1035 queue->ddcbs_max_in_flight = 0;
1036 queue->ddcbs_completed = 0;
1037 queue->return_on_busy = 0;
1038 queue->wait_on_busy = 0;
1040 queue->ddcb_seq = 0x100; /* start sequence number */
1041 queue->ddcb_max = GENWQE_DDCB_MAX;
1042 queue->ddcb_vaddr = __genwqe_alloc_consistent(cd, queue_size,
1043 &queue->ddcb_daddr);
1044 if (queue->ddcb_vaddr == NULL) {
1049 queue->ddcb_req = kcalloc(queue->ddcb_max, sizeof(struct ddcb_requ *),
1051 if (!queue->ddcb_req) {
1056 queue->ddcb_waitqs = kcalloc(queue->ddcb_max,
1059 if (!queue->ddcb_waitqs) {
1064 for (i = 0; i < queue->ddcb_max; i++) {
1065 pddcb = &queue->ddcb_vaddr[i]; /* DDCBs */
1069 queue->ddcb_req[i] = NULL; /* requests */
1070 init_waitqueue_head(&queue->ddcb_waitqs[i]); /* waitqueues */
1073 queue->ddcb_act = 0;
1074 queue->ddcb_next = 0; /* queue is empty */
1076 spin_lock_init(&queue->ddcb_lock);
1077 init_waitqueue_head(&queue->busy_waitq);
1079 val64 = ((u64)(queue->ddcb_max - 1) << 8); /* lastptr */
1080 __genwqe_writeq(cd, queue->IO_QUEUE_CONFIG, 0x07); /* iCRC/vCRC */
1081 __genwqe_writeq(cd, queue->IO_QUEUE_SEGMENT, queue->ddcb_daddr);
1082 __genwqe_writeq(cd, queue->IO_QUEUE_INITSQN, queue->ddcb_seq);
1083 __genwqe_writeq(cd, queue->IO_QUEUE_WRAP, val64);
1087 kfree(queue->ddcb_req);
1088 queue->ddcb_req = NULL;
1090 __genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr,
1091 queue->ddcb_daddr);
1092 queue->ddcb_vaddr = NULL;
1093 queue->ddcb_daddr = 0ull;
1098 static int ddcb_queue_initialized(struct ddcb_queue *queue)
1100 return queue->ddcb_vaddr != NULL;
1103 static void free_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue)
1107 queue_size = roundup(queue->ddcb_max * sizeof(struct ddcb), PAGE_SIZE);
1109 kfree(queue->ddcb_req);
1110 queue->ddcb_req = NULL;
1112 if (queue->ddcb_vaddr) {
1113 __genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr,
1114 queue->ddcb_daddr);
1115 queue->ddcb_vaddr = NULL;
1116 queue->ddcb_daddr = 0ull;
1127 * In case of fatal FIR error the queue is stopped, such that
1134 * Checking for errors before kicking the queue might be
1180 * genwqe_card_thread() - Work thread for the DDCB queue
1195 genwqe_check_ddcb_queue(cd, &cd->queue);
1221 * genwqe_setup_service_layer() - Setup DDCB queue
1231 struct ddcb_queue *queue;
1244 queue = &cd->queue;
1245 queue->IO_QUEUE_CONFIG = IO_SLC_QUEUE_CONFIG;
1246 queue->IO_QUEUE_STATUS = IO_SLC_QUEUE_STATUS;
1247 queue->IO_QUEUE_SEGMENT = IO_SLC_QUEUE_SEGMENT;
1248 queue->IO_QUEUE_INITSQN = IO_SLC_QUEUE_INITSQN;
1249 queue->IO_QUEUE_OFFSET = IO_SLC_QUEUE_OFFSET;
1250 queue->IO_QUEUE_WRAP = IO_SLC_QUEUE_WRAP;
1251 queue->IO_QUEUE_WTIME = IO_SLC_QUEUE_WTIME;
1252 queue->IO_QUEUE_ERRCNTS = IO_SLC_QUEUE_ERRCNTS;
1253 queue->IO_QUEUE_LRW = IO_SLC_QUEUE_LRW;
1255 rc = setup_ddcb_queue(cd, queue);
1303 free_ddcb_queue(cd, queue);
1320 struct ddcb_queue *queue = &cd->queue;
1322 spin_lock_irqsave(&queue->ddcb_lock, flags);
1324 for (i = 0; i < queue->ddcb_max; i++)
1325 wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]);
1327 wake_up_interruptible(&queue->busy_waitq);
1328 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
1347 struct ddcb_queue *queue = &cd->queue;
1349 if (!ddcb_queue_initialized(queue))
1356 /* Wake up all requests in the DDCB queue such that they
1368 " DEBUG [%d/%d] waiting for queue to get empty: %d requests!\n",
1373 * 16 DDCB queues, each queue has e.g. 32 entries,
1381 dev_err(&pci_dev->dev, " [%s] err: queue is not empty!!\n",
1389 * genwqe_release_service_layer() - Shutdown DDCB queue
1398 if (!ddcb_queue_initialized(&cd->queue))
1409 free_ddcb_queue(cd, &cd->queue);