Searched refs:wq (Results 1 - 13 of 13) sorted by relevance

/barrelfish-master/lib/devif/backends/net/mlx4/include/linux/
H A Dworkqueue.h58 static int enqueue(struct workqueue_struct *wq, struct work_struct *w) { argument
59 thread_mutex_lock(&wq->work_list_lock);
60 list_add_tail(&w->list, &wq->work_list);
61 thread_mutex_unlock(&wq->work_list_lock);
63 thread_cond_signal(&wq->cond);
67 static struct work_struct *dequeue(struct workqueue_struct *wq) { argument
69 if (list_empty(&wq->work_list)) {
70 thread_cond_wait(&wq->cond, NULL);
72 thread_mutex_lock(&wq->work_list_lock);
73 w = list_entry(wq
80 exec_work(void *wq) argument
136 queue_work(struct workqueue_struct *wq, struct work_struct *work) argument
179 struct workqueue_struct *wq; local
[all...]
H A Dlinux_compat.c956 struct workqueue_struct *wq; local
958 wq = kmalloc(sizeof(*wq), M_WAITOK);
959 wq->taskqueue = taskqueue_create(name, M_WAITOK,
960 taskqueue_thread_enqueue, &wq->taskqueue);
961 atomic_set(&wq->draining, 0);
962 taskqueue_start_threads(&wq->taskqueue, cpus, PWAIT, "%s", name);
964 return (wq);
968 destroy_workqueue(struct workqueue_struct *wq) argument
970 taskqueue_free(wq
[all...]
/barrelfish-master/lib/devif/backends/net/mlx4/drivers/
H A Dtest_wq.c25 struct workqueue_struct *wq; local
30 wq = create_singlethread_workqueue("name");
33 queue_work(wq, &test1->work);
34 queue_work(wq, &test2->work);
/barrelfish-master/lib/devif/backends/net/mlx4/drivers/infiniband/hw/mlx4/
H A Dcq.c591 struct mlx4_ib_wq *wq; local
663 wq = &(*cur_qp)->sq;
666 wq->tail += (u16)(wqe_ctr - (u16) wq->tail);
668 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
669 ++wq->tail;
677 wq = &(*cur_qp)->rq;
678 tail = wq
[all...]
H A Dalias_GUID.c300 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq,
425 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq,
456 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq,
564 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq,
600 flush_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
601 destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
664 dev->sriov.alias_guid.ports_guid[i].wq =
666 if (!dev->sriov.alias_guid.ports_guid[i].wq) {
677 destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
678 dev->sriov.alias_guid.ports_guid[i].wq
[all...]
H A Dqp.c2616 static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, argument
2621 cur = wq->head - wq->tail;
2622 if (/*likely(*/cur + nreq < wq->max_post/*)*/)
2627 cur = wq->head - wq->tail;
2630 return cur + nreq >= wq->max_post;
H A Dmlx4_ib.h388 struct workqueue_struct *wq; member in struct:mlx4_sriov_alias_guid_port_rec_det
442 struct workqueue_struct *wq;*/
449 /*struct workqueue_struct *wq;
/barrelfish-master/lib/devif/backends/net/mlx4/drivers/infiniband/hw/mthca/
H A Dmthca_cq.c488 struct mthca_wq *wq; local
538 wq = &(*cur_qp)->sq;
540 >> wq->wqe_shift);
545 wq = NULL;
551 wq = &(*cur_qp)->rq;
553 wqe_index = wqe >> wq->wqe_shift;
560 wqe_index = wq->max - 1;
564 if (wq) {
565 if (wq->last_comp < wqe_index)
566 wq
[all...]
H A Dmthca_qp.c229 static void mthca_wq_reset(struct mthca_wq *wq) argument
231 wq->next_ind = 0;
232 wq->last_comp = wq->max - 1;
233 wq->head = 0;
234 wq->tail = 0;
1559 static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, argument
1565 cur = wq->head - wq->tail;
1566 if (likely(cur + nreq < wq
[all...]
/barrelfish-master/lib/devif/backends/net/mlx4/drivers/infiniband/core/
H A Dmad_priv.h214 struct workqueue_struct *wq; member in struct:ib_mad_port_private
H A Dmad.c197 queue_work(mad_agent_priv->qp_info->port_priv->wq,
536 flush_workqueue(port_priv->wq);
788 queue_work(mad_agent_priv->qp_info->port_priv->wq,
2475 queue_work(port_priv->wq, &port_priv->work);
2789 port_priv->wq = create_singlethread_workqueue(name);
2790 if (!port_priv->wq) {
2816 destroy_workqueue(port_priv->wq);*/
2849 destroy_workqueue(port_priv->wq);
H A Dcm.c93 struct workqueue_struct *wq; member in struct:ib_cm
828 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
3356 queue_delayed_work(cm.wq, &work->work, 0);
3465 queue_delayed_work(cm.wq, &work->work, 0);
3827 flush_workqueue(cm.wq);
3856 cm.wq = create_workqueue("ib_cm");
3857 if (!cm.wq) {
3868 destroy_workqueue(cm.wq);
3884 destroy_workqueue(cm.wq);
/barrelfish-master/usr/skb/skb_simple/
H A Doctopus_stubs.c356 struct wait_queue* wq = malloc(sizeof(struct wait_queue)); local
357 wq->next = NULL;
358 wq->ors = drs;
360 // Insert wq into waiting_parties of entry
365 *cur = wq;

Completed in 130 milliseconds