Lines Matching defs:cdev

63 		ret = chtls_listen_start(clisten->cdev, clisten->sk);
67 chtls_listen_stop(clisten->cdev, clisten->sk);
86 static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk)
101 clisten->cdev = cdev;
110 static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk)
120 clisten->cdev = cdev;
131 struct chtls_dev *cdev;
134 cdev = to_chtls_dev(dev);
136 for (i = 0; i < cdev->lldi->nports; i++) {
137 netdev = cdev->ports[i];
146 struct chtls_dev *cdev = to_chtls_dev(dev);
149 return chtls_start_listen(cdev, sk);
155 struct chtls_dev *cdev = to_chtls_dev(dev);
158 chtls_stop_listen(cdev, sk);
161 static void chtls_free_uld(struct chtls_dev *cdev)
165 tls_toe_unregister_device(&cdev->tlsdev);
166 kvfree(cdev->kmap.addr);
167 idr_destroy(&cdev->hwtid_idr);
169 kfree_skb(cdev->rspq_skb_cache[i]);
170 kfree(cdev->lldi);
171 kfree_skb(cdev->askb);
172 kfree(cdev);
178 struct chtls_dev *cdev;
182 cdev = to_chtls_dev(dev);
185 adap = pci_get_drvdata(cdev->pdev);
189 chtls_free_uld(cdev);
192 static void chtls_register_dev(struct chtls_dev *cdev)
194 struct tls_toe_device *tlsdev = &cdev->tlsdev;
197 strlcat(tlsdev->name, cdev->lldi->ports[0]->name,
205 cdev->cdev_state = CHTLS_CDEV_STATE_UP;
210 struct chtls_dev *cdev = container_of(task_param,
214 spin_lock_bh(&cdev->deferq.lock);
215 while ((skb = __skb_dequeue(&cdev->deferq)) != NULL) {
216 spin_unlock_bh(&cdev->deferq.lock);
217 DEFERRED_SKB_CB(skb)->handler(cdev, skb);
218 spin_lock_bh(&cdev->deferq.lock);
220 spin_unlock_bh(&cdev->deferq.lock);
223 static int chtls_get_skb(struct chtls_dev *cdev)
225 cdev->askb = alloc_skb(sizeof(struct tcphdr), GFP_KERNEL);
226 if (!cdev->askb)
229 skb_put(cdev->askb, sizeof(struct tcphdr));
230 skb_reset_transport_header(cdev->askb);
231 memset(cdev->askb->data, 0, cdev->askb->len);
238 struct chtls_dev *cdev;
241 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
242 if (!cdev)
249 if (chtls_get_skb(cdev))
253 cdev->lldi = lldi;
254 cdev->pdev = lldi->pdev;
255 cdev->tids = lldi->tids;
256 cdev->ports = lldi->ports;
257 cdev->mtus = lldi->mtus;
258 cdev->tids = lldi->tids;
259 cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
265 cdev->rspq_skb_cache[i] = __alloc_skb(size,
268 if (unlikely(!cdev->rspq_skb_cache[i]))
272 idr_init(&cdev->hwtid_idr);
273 INIT_WORK(&cdev->deferq_task, process_deferq);
274 spin_lock_init(&cdev->listen_lock);
275 spin_lock_init(&cdev->idr_lock);
276 cdev->send_page_order = min_t(uint, get_order(32768),
278 cdev->max_host_sndbuf = 48 * 1024;
281 if (chtls_init_kmap(cdev, lldi))
285 list_add_tail(&cdev->list, &cdev_list);
288 return cdev;
291 kfree_skb(cdev->rspq_skb_cache[j]);
292 kfree_skb(cdev->askb);
296 kfree(cdev);
303 struct chtls_dev *cdev, *tmp;
306 list_for_each_entry_safe(cdev, tmp, &cdev_list, list) {
307 if (cdev->cdev_state == CHTLS_CDEV_STATE_UP) {
308 list_del(&cdev->list);
309 kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release);
317 struct chtls_dev *cdev = handle;
321 chtls_register_dev(cdev);
329 list_del(&cdev->list);
331 kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release);
364 static int chtls_recv_packet(struct chtls_dev *cdev,
371 skb = copy_gl_to_skb_pkt(gl, rsp, cdev->lldi->sge_pktshift);
375 ret = chtls_handlers[opcode](cdev, skb);
382 static int chtls_recv_rsp(struct chtls_dev *cdev, const __be64 *rsp)
394 skb = cdev->rspq_skb_cache[rspq_bin];
414 ret = chtls_handlers[opcode](cdev, skb);
421 static void chtls_recv(struct chtls_dev *cdev,
433 ret = chtls_handlers[opcode](cdev, skb);
441 struct chtls_dev *cdev = handle;
448 if (chtls_recv_packet(cdev, gl, rsp) < 0)
454 return chtls_recv_rsp(cdev, rsp);
460 chtls_recv(cdev, &skb, rsp);