Lines Matching defs:cdev

49 cxgbit_wait_for_reply(struct cxgbit_device *cdev,
55 if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
63 func, pci_name(cdev->lldi.pdev), tid);
69 pci_name(cdev->lldi.pdev), wr_waitp->ret, tid);
79 cxgbit_np_hash_add(struct cxgbit_device *cdev, struct cxgbit_np *cnp,
89 spin_lock(&cdev->np_lock);
90 p->next = cdev->np_hash_tab[bucket];
91 cdev->np_hash_tab[bucket] = p;
92 spin_unlock(&cdev->np_lock);
99 cxgbit_np_hash_find(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
104 spin_lock(&cdev->np_lock);
105 for (p = cdev->np_hash_tab[bucket]; p; p = p->next) {
111 spin_unlock(&cdev->np_lock);
116 static int cxgbit_np_hash_del(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
119 struct np_info *p, **prev = &cdev->np_hash_tab[bucket];
121 spin_lock(&cdev->np_lock);
130 spin_unlock(&cdev->np_lock);
144 cxgbit_create_server6(struct cxgbit_device *cdev, unsigned int stid,
153 __func__, cdev->lldi.ports[0]->name, stid, sin6->sin6_port);
158 ret = cxgb4_clip_get(cdev->lldi.ports[0],
170 ret = cxgb4_create_server6(cdev->lldi.ports[0],
173 cdev->lldi.rxq_ids[0]);
175 ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
184 cxgb4_clip_release(cdev->lldi.ports[0],
196 cxgbit_create_server4(struct cxgbit_device *cdev, unsigned int stid,
204 __func__, cdev->lldi.ports[0]->name, stid, sin->sin_port);
209 ret = cxgb4_create_server(cdev->lldi.ports[0],
212 cdev->lldi.rxq_ids[0]);
214 ret = cxgbit_wait_for_reply(cdev,
230 struct cxgbit_device *cdev;
233 list_for_each_entry(cdev, &cdev_list_head, list) {
234 struct cxgb4_lld_info *lldi = &cdev->lldi;
240 return cdev;
295 struct cxgbit_device *cdev = NULL;
312 cdev = cxgbit_find_device(ndev, NULL);
315 return cdev;
343 __cxgbit_setup_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
348 if (!test_bit(CDEV_STATE_UP, &cdev->flags))
351 stid = cxgb4_alloc_stid(cdev->lldi.tids, ss_family, cnp);
355 if (!cxgbit_np_hash_add(cdev, cnp, stid)) {
356 cxgb4_free_stid(cdev->lldi.tids, stid, ss_family);
361 ret = cxgbit_create_server4(cdev, stid, cnp);
363 ret = cxgbit_create_server6(cdev, stid, cnp);
367 cxgb4_free_stid(cdev->lldi.tids, stid,
369 cxgbit_np_hash_del(cdev, cnp);
377 struct cxgbit_device *cdev;
381 cdev = cxgbit_find_np_cdev(cnp);
382 if (!cdev)
385 if (cxgbit_np_hash_find(cdev, cnp) >= 0)
388 if (__cxgbit_setup_cdev_np(cdev, cnp))
391 cnp->com.cdev = cdev;
400 struct cxgbit_device *cdev;
405 list_for_each_entry(cdev, &cdev_list_head, list) {
406 if (cxgbit_np_hash_find(cdev, cnp) >= 0) {
412 list_for_each_entry(cdev, &cdev_list_head, list) {
413 ret = __cxgbit_setup_cdev_np(cdev, cnp);
450 cnp->com.cdev = NULL;
518 __cxgbit_free_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
523 stid = cxgbit_np_hash_del(cdev, cnp);
526 if (!test_bit(CDEV_STATE_UP, &cdev->flags))
534 ret = cxgb4_remove_server(cdev->lldi.ports[0], stid,
535 cdev->lldi.rxq_ids[0], ipv6);
545 ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
550 if (ipv6 && cnp->com.cdev) {
554 cxgb4_clip_release(cdev->lldi.ports[0],
559 cxgb4_free_stid(cdev->lldi.tids, stid,
566 struct cxgbit_device *cdev;
570 list_for_each_entry(cdev, &cdev_list_head, list) {
571 ret = __cxgbit_free_cdev_np(cdev, cnp);
580 struct cxgbit_device *cdev;
584 list_for_each_entry(cdev, &cdev_list_head, list) {
585 if (cdev == cnp->com.cdev) {
593 __cxgbit_free_cdev_np(cdev, cnp);
606 if (cnp->com.cdev)
650 struct cxgbit_device *cdev = handle;
653 pr_debug("%s cdev %p\n", __func__, cdev);
655 cxgbit_ofld_send(cdev, skb);
673 csk->com.cdev, cxgbit_abort_arp_failure);
675 return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
714 cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait,
761 csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
796 struct cxgbit_device *cdev;
805 cxgb4_clip_release(csk->com.cdev->lldi.ports[0],
810 cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid,
815 cdev = csk->com.cdev;
816 spin_lock_bh(&cdev->cskq.lock);
818 spin_unlock_bh(&cdev->cskq.lock);
822 cxgbit_put_cdev(cdev);
901 struct cxgbit_device *cdev)
933 csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t,
941 step = cdev->lldi.ntxq /
942 cdev->lldi.nchan;
944 step = cdev->lldi.nrxq /
945 cdev->lldi.nchan;
947 csk->rss_qid = cdev->lldi.rxq_ids[
966 csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority);
968 csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0);
977 step = cdev->lldi.ntxq /
978 cdev->lldi.nports;
980 (cdev->selectq[port_id][0]++ % step);
982 step = cdev->lldi.nrxq /
983 cdev->lldi.nports;
985 (cdev->selectq[port_id][1]++ % step);
986 csk->rss_qid = cdev->lldi.rxq_ids[rxq_idx];
998 int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb)
1002 if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
1008 ret = cxgb4_ofld_send(cdev->lldi.ports[0], skb);
1014 static void cxgbit_release_tid(struct cxgbit_device *cdev, u32 tid)
1024 cxgbit_ofld_send(cdev, skb);
1028 cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb,
1033 if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
1039 ret = cxgb4_l2t_send(cdev->lldi.ports[0], skb, l2e);
1052 cxgbit_ofld_send(csk->com.cdev, skb);
1135 struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
1156 cxgb_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
1213 cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
1217 cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
1223 struct tid_info *t = cdev->lldi.tids;
1234 pr_debug("%s: cdev = %p; stid = %u; tid = %u\n",
1235 __func__, cdev, stid, tid);
1257 cxgb_get_4tuple(req, cdev->lldi.adapter_type, &iptype, local_ip,
1267 dst = cxgb_find_route(&cdev->lldi, cxgbit_get_real_dev,
1278 dst = cxgb_find_route6(&cdev->lldi, cxgbit_get_real_dev,
1298 dst, cdev);
1318 csk->com.cdev = cdev;
1323 csk->wr_cred = cdev->lldi.wr_cred -
1346 cxgb4_clip_get(cdev->lldi.ports[0],
1373 cxgbit_get_cdev(cdev);
1375 spin_lock(&cdev->cskq.lock);
1376 list_add_tail(&csk->list, &cdev->cskq.list);
1377 spin_unlock(&cdev->cskq.lock);
1383 cxgbit_release_tid(cdev, tid);
1418 struct cxgbit_device *cdev = csk->com.cdev;
1439 (csk->com.cdev->lldi.pf));
1456 if (test_bit(CDEV_ISO_ENABLE, &cdev->flags))
1484 cxgbit_ofld_send(csk->com.cdev, skb);
1502 cxgbit_ofld_send(csk->com.cdev, skb);
1536 ret = cxgbit_wait_for_reply(csk->com.cdev,
1569 ret = cxgbit_wait_for_reply(csk->com.cdev,
1579 cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1582 struct tid_info *t = cdev->lldi.tids;
1601 cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1604 struct tid_info *t = cdev->lldi.tids;
1623 cxgbit_pass_establish(struct cxgbit_device *cdev, struct sk_buff *skb)
1626 struct tid_info *t = cdev->lldi.tids;
1772 cxgbit_ofld_send(csk->com.cdev, rpl_skb);
1897 static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1902 struct cxgb4_lld_info *lldi = &cdev->lldi;
1918 static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
1923 struct cxgb4_lld_info *lldi = &cdev->lldi;
1959 static void cxgbit_rx_cpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1963 struct cxgb4_lld_info *lldi = &cdev->lldi;