Lines Matching defs:p_ent

141 					      struct ecore_spq_entry *p_ent,
148 comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
176 struct ecore_spq_entry *p_ent,
187 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, false);
193 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
210 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
214 comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
223 OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
224 p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
225 OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
236 struct ecore_spq_entry *p_ent)
238 p_ent->flags = 0;
240 switch (p_ent->comp_mode) {
243 p_ent->comp_cb.function = ecore_spq_blocking_cb;
249 p_ent->comp_mode);
255 p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
256 p_ent->elem.hdr.protocol_id,
257 p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
258 D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
321 struct ecore_spq_entry *p_ent)
328 p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
335 *elem = p_ent->elem; /* Struct assignment */
732 struct ecore_spq_entry *p_ent = OSAL_NULL;
739 p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
740 if (!p_ent) {
745 p_ent->queue = &p_spq->unlimited_pending;
747 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
750 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
751 p_ent->queue = &p_spq->pending;
754 *pp_ent = p_ent;
763 struct ecore_spq_entry *p_ent)
765 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
769 struct ecore_spq_entry *p_ent)
772 __ecore_spq_return_entry(p_hwfn, p_ent);
785 * @param p_ent
791 struct ecore_spq_entry *p_ent,
796 if (p_ent->queue == &p_spq->unlimited_pending) {
799 OSAL_LIST_PUSH_TAIL(&p_ent->list,
817 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
819 *p_en2 = *p_ent;
821 /* EBLOCK responsible to free the allocated p_ent */
822 if (p_ent->comp_mode != ECORE_SPQ_MODE_EBLOCK)
823 OSAL_FREE(p_hwfn->p_dev, p_ent);
825 p_ent = p_en2;
832 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
836 OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
875 struct ecore_spq_entry *p_ent =
877 if (p_ent != OSAL_NULL) {
881 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
882 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->completion_pending);
885 rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
887 OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
889 __ecore_spq_return_entry(p_hwfn, p_ent);
901 struct ecore_spq_entry *p_ent = OSAL_NULL;
908 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
911 if (!p_ent)
917 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
919 ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
927 struct ecore_spq_entry *p_ent,
937 if (!p_ent) {
945 p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
955 rc = ecore_spq_fill_entry(p_hwfn, p_ent);
962 rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
978 if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
981 * access p_ent here to see whether it's successful or not.
984 rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code,
985 p_ent->queue == &p_spq->unlimited_pending);
987 if (p_ent->queue == &p_spq->unlimited_pending) {
988 /* This is an allocated p_ent which does not need to
991 OSAL_FREE(p_hwfn->p_dev, p_ent);
993 /* TBD: handle error flow and remove p_ent from
1003 ecore_spq_return_entry(p_hwfn, p_ent);
1009 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
1015 __ecore_spq_return_entry(p_hwfn, p_ent);
1027 struct ecore_spq_entry *p_ent = OSAL_NULL;
1041 OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
1047 if (p_ent->elem.hdr.echo == echo) {
1048 OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
1066 found = p_ent;
1076 OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
1094 p_ent->comp_cb.function, p_ent->comp_cb.cookie);