Lines Matching refs:iq

51 	struct octeon_instr_queue *iq;
73 iq = oct->instr_queue[iq_no];
75 iq->oct_dev = oct;
77 iq->base_addr = lio_dma_alloc(oct, q_size, &iq->base_addr_dma);
78 if (!iq->base_addr) {
84 iq->max_count = num_descs;
89 iq->request_list = vzalloc_node(array_size(num_descs, sizeof(*iq->request_list)),
91 if (!iq->request_list)
92 iq->request_list = vzalloc(array_size(num_descs, sizeof(*iq->request_list)));
93 if (!iq->request_list) {
94 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
101 iq_no, iq->base_addr, &iq->base_addr_dma, iq->max_count);
103 iq->txpciq.u64 = txpciq.u64;
104 iq->fill_threshold = (u32)conf->db_min;
105 iq->fill_cnt = 0;
106 iq->host_write_index = 0;
107 iq->octeon_read_index = 0;
108 iq->flush_index = 0;
109 iq->last_db_time = 0;
110 iq->do_auto_flush = 1;
111 iq->db_timeout = (u32)conf->db_timeout;
112 atomic_set(&iq->instr_pending, 0);
113 iq->pkts_processed = 0;
116 spin_lock_init(&iq->lock);
118 iq->allow_soft_cmds = true;
119 spin_lock_init(&iq->post_lock);
121 iq->allow_soft_cmds = false;
124 spin_lock_init(&iq->iq_flush_running_lock);
126 oct->io_qmask.iq |= BIT_ULL(iq_no);
130 iq->iqcmd_64B = (conf->instr_type == 64);
138 vfree(iq->request_list);
139 iq->request_list = NULL;
140 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
141 dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n",
159 struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
174 vfree(iq->request_list);
176 if (iq->base_addr) {
177 q_size = iq->max_count * desc_size;
178 lio_dma_free(oct, (u32)q_size, iq->base_addr,
179 iq->base_addr_dma);
180 oct->io_qmask.iq &= ~(1ULL << iq_no);
244 if (!(oct->io_qmask.iq & BIT_ULL(i)))
265 ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq)
268 writel(iq->fill_cnt, iq->doorbell_reg);
270 iq->fill_cnt = 0;
271 iq->last_db_time = jiffies;
279 struct octeon_instr_queue *iq;
281 iq = oct->instr_queue[iq_no];
282 spin_lock(&iq->post_lock);
283 if (iq->fill_cnt)
284 ring_doorbell(oct, iq);
285 spin_unlock(&iq->post_lock);
289 static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq,
294 cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
295 iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
301 __post_command2(struct octeon_instr_queue *iq, u8 *cmd)
310 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1)) {
316 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 2))
319 __copy_cmd_into_iq(iq, cmd);
322 st.index = iq->host_write_index;
323 iq->host_write_index = incr_index(iq->host_write_index, 1,
324 iq->max_count);
325 iq->fill_cnt++;
332 atomic_inc(&iq->instr_pending);
354 __add_to_request_list(struct octeon_instr_queue *iq,
357 iq->request_list[idx].buf = buf;
358 iq->request_list[idx].reqtype = reqtype;
364 struct octeon_instr_queue *iq, u32 napi_budget)
369 u32 old = iq->flush_index;
375 while (old != iq->octeon_read_index) {
376 reqtype = iq->request_list[old].reqtype;
377 buf = iq->request_list[old].buf;
416 iq->request_list[old].buf = NULL;
417 iq->request_list[old].reqtype = 0;
421 old = incr_index(old, 1, iq->max_count);
427 octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl,
429 iq->flush_index = old;
441 octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
448 if (!spin_trylock(&iq->iq_flush_running_lock))
451 spin_lock_bh(&iq->lock);
453 iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq);
457 if (iq->flush_index == iq->octeon_read_index)
462 lio_process_iq_request_list(oct, iq,
467 lio_process_iq_request_list(oct, iq, 0);
470 iq->pkts_processed += inst_processed;
471 atomic_sub(inst_processed, &iq->instr_pending);
472 iq->stats.instr_processed += inst_processed;
481 iq->last_db_time = jiffies;
483 spin_unlock_bh(&iq->lock);
485 spin_unlock(&iq->iq_flush_running_lock);
495 struct octeon_instr_queue *iq;
501 iq = oct->instr_queue[iq_no];
502 if (!iq)
506 if (!atomic_read(&iq->instr_pending))
509 next_time = iq->last_db_time + iq->db_timeout;
512 iq->last_db_time = jiffies;
515 octeon_flush_iq(oct, iq, 0);
517 lio_enable_irq(NULL, iq);
542 struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
547 if (iq->allow_soft_cmds)
548 spin_lock_bh(&iq->post_lock);
550 st = __post_command2(iq, cmd);
554 __add_to_request_list(iq, st.index, buf, reqtype);
558 if (iq->fill_cnt >= MAX_OCTEON_FILL_COUNT || force_db ||
560 ring_doorbell(oct, iq);
565 if (iq->allow_soft_cmds)
566 spin_unlock_bh(&iq->post_lock);
687 struct octeon_instr_queue *iq;
693 iq = oct->instr_queue[sc->iq_no];
694 if (!iq->allow_soft_cmds) {