• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/net/

Lines Matching defs:csk

134 static inline void csk_hold(struct cnic_sock *csk)
136 atomic_inc(&csk->ref_count);
139 static inline void csk_put(struct cnic_sock *csk)
141 atomic_dec(&csk->ref_count);
240 static int cnic_in_use(struct cnic_sock *csk)
242 return test_bit(SK_F_INUSE, &csk->flags);
270 struct cnic_sock *csk)
281 if (csk) {
287 path_req.handle = (u64) csk->l5_cid;
288 if (test_bit(SK_F_IPV6, &csk->flags)) {
289 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
293 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
297 path_req.vlan_id = csk->vlan_id;
298 path_req.pmtu = csk->mtu;
318 struct cnic_sock *csk;
336 csk = &cp->csk_tbl[l5_cid];
337 csk_hold(csk);
338 if (cnic_in_use(csk)) {
339 memcpy(csk->ha, path_resp->mac_addr, 6);
340 if (test_bit(SK_F_IPV6, &csk->flags))
341 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
344 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
346 if (is_valid_ether_addr(csk->ha))
347 cnic_cm_set_pg(csk);
349 csk_put(csk);
358 static int cnic_offld_prep(struct cnic_sock *csk)
360 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
363 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
364 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
371 static int cnic_close_prep(struct cnic_sock *csk)
373 clear_bit(SK_F_CONNECT_START, &csk->flags);
376 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
377 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
385 static int cnic_abort_prep(struct cnic_sock *csk)
387 clear_bit(SK_F_CONNECT_START, &csk->flags);
390 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
393 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
394 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
1871 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
1901 conn_addr->remote_addr_0 = csk->ha[0];
1902 conn_addr->remote_addr_1 = csk->ha[1];
1903 conn_addr->remote_addr_2 = csk->ha[2];
1904 conn_addr->remote_addr_3 = csk->ha[3];
1905 conn_addr->remote_addr_4 = csk->ha[4];
1906 conn_addr->remote_addr_5 = csk->ha[5];
1932 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->func), csk->vlan_id);
2516 static int cnic_cm_offload_pg(struct cnic_sock *csk)
2518 struct cnic_dev *dev = csk->dev;
2522 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
2531 l4kwqe->da0 = csk->ha[0];
2532 l4kwqe->da1 = csk->ha[1];
2533 l4kwqe->da2 = csk->ha[2];
2534 l4kwqe->da3 = csk->ha[3];
2535 l4kwqe->da4 = csk->ha[4];
2536 l4kwqe->da5 = csk->ha[5];
2547 l4kwqe->host_opaque = csk->l5_cid;
2549 if (csk->vlan_id) {
2551 l4kwqe->vlan_tag = csk->vlan_id;
2558 static int cnic_cm_update_pg(struct cnic_sock *csk)
2560 struct cnic_dev *dev = csk->dev;
2564 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
2571 l4kwqe->pg_cid = csk->pg_cid;
2573 l4kwqe->da0 = csk->ha[0];
2574 l4kwqe->da1 = csk->ha[1];
2575 l4kwqe->da2 = csk->ha[2];
2576 l4kwqe->da3 = csk->ha[3];
2577 l4kwqe->da4 = csk->ha[4];
2578 l4kwqe->da5 = csk->ha[5];
2580 l4kwqe->pg_host_opaque = csk->l5_cid;
2586 static int cnic_cm_upload_pg(struct cnic_sock *csk)
2588 struct cnic_dev *dev = csk->dev;
2592 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
2599 l4kwqe->cid = csk->pg_cid;
2604 static int cnic_cm_conn_req(struct cnic_sock *csk)
2606 struct cnic_dev *dev = csk->dev;
2614 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
2615 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
2616 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
2624 l4kwqe3->ka_timeout = csk->ka_timeout;
2625 l4kwqe3->ka_interval = csk->ka_interval;
2626 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
2627 l4kwqe3->tos = csk->tos;
2628 l4kwqe3->ttl = csk->ttl;
2629 l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
2630 l4kwqe3->pmtu = csk->mtu;
2631 l4kwqe3->rcv_buf = csk->rcv_buf;
2632 l4kwqe3->snd_buf = csk->snd_buf;
2633 l4kwqe3->seed = csk->seed;
2636 if (test_bit(SK_F_IPV6, &csk->flags)) {
2646 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
2647 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
2648 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
2649 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
2650 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
2651 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
2664 l4kwqe1->cid = csk->cid;
2665 l4kwqe1->pg_cid = csk->pg_cid;
2666 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
2667 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
2668 l4kwqe1->src_port = be16_to_cpu(csk->src_port);
2669 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
2670 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
2672 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
2674 if (csk->tcp_flags & SK_TCP_NAGLE)
2676 if (csk->tcp_flags & SK_TCP_TIMESTAMP)
2678 if (csk->tcp_flags & SK_TCP_SACK)
2680 if (csk->tcp_flags & SK_TCP_SEG_SCALING)
2688 static int cnic_cm_close_req(struct cnic_sock *csk)
2690 struct cnic_dev *dev = csk->dev;
2694 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
2700 l4kwqe->cid = csk->cid;
2705 static int cnic_cm_abort_req(struct cnic_sock *csk)
2707 struct cnic_dev *dev = csk->dev;
2711 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
2717 l4kwqe->cid = csk->cid;
2723 u32 l5_cid, struct cnic_sock **csk, void *context)
2754 *csk = csk1;
2758 static void cnic_cm_cleanup(struct cnic_sock *csk)
2760 if (csk->src_port) {
2761 struct cnic_dev *dev = csk->dev;
2764 cnic_free_id(&cp->csk_port_tbl, csk->src_port);
2765 csk->src_port = 0;
2769 static void cnic_close_conn(struct cnic_sock *csk)
2771 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
2772 cnic_cm_upload_pg(csk);
2773 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
2775 cnic_cm_cleanup(csk);
2778 static int cnic_cm_destroy(struct cnic_sock *csk)
2780 if (!cnic_in_use(csk))
2783 csk_hold(csk);
2784 clear_bit(SK_F_INUSE, &csk->flags);
2786 while (atomic_read(&csk->ref_count) != 1)
2788 cnic_cm_cleanup(csk);
2790 csk->flags = 0;
2791 csk_put(csk);
2880 static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
2882 struct cnic_dev *dev = csk->dev;
2885 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
2888 static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
2890 struct cnic_dev *dev = csk->dev;
2906 clear_bit(SK_F_IPV6, &csk->flags);
2909 set_bit(SK_F_IPV6, &csk->flags);
2912 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
2914 csk->dst_port = saddr->remote.v6.sin6_port;
2920 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
2921 csk->dst_port = saddr->remote.v4.sin_port;
2925 csk->vlan_id = 0;
2926 csk->mtu = dev->netdev->mtu;
2930 csk->vlan_id = vlan;
2931 csk->mtu = dst_mtu(dst);
2949 csk->src_port = local_port;
2956 static void cnic_init_csk_state(struct cnic_sock *csk)
2958 csk->state = 0;
2959 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
2960 clear_bit(SK_F_CLOSING, &csk->flags);
2963 static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
2967 if (!cnic_in_use(csk))
2970 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
2973 cnic_init_csk_state(csk);
2975 err = cnic_get_route(csk, saddr);
2979 err = cnic_resolve_addr(csk, saddr);
2984 clear_bit(SK_F_CONNECT_START, &csk->flags);
2988 static int cnic_cm_abort(struct cnic_sock *csk)
2990 struct cnic_local *cp = csk->dev->cnic_priv;
2993 if (!cnic_in_use(csk))
2996 if (cnic_abort_prep(csk))
2997 return cnic_cm_abort_req(csk);
3003 cp->close_conn(csk, opcode);
3004 if (csk->state != opcode)
3010 static int cnic_cm_close(struct cnic_sock *csk)
3012 if (!cnic_in_use(csk))
3015 if (cnic_close_prep(csk)) {
3016 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3017 return cnic_cm_close_req(csk);
3024 static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
3028 int ulp_type = csk->ulp_type;
3034 ulp_ops->cm_connect_complete(csk);
3036 ulp_ops->cm_close_complete(csk);
3038 ulp_ops->cm_remote_abort(csk);
3040 ulp_ops->cm_abort_complete(csk);
3042 ulp_ops->cm_remote_close(csk);
3047 static int cnic_cm_set_pg(struct cnic_sock *csk)
3049 if (cnic_offld_prep(csk)) {
3050 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3051 cnic_cm_update_pg(csk);
3053 cnic_cm_offload_pg(csk);
3063 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
3065 csk_hold(csk);
3066 if (!cnic_in_use(csk))
3070 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3075 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3076 cnic_cm_upcall(cp, csk,
3081 csk->pg_cid = kcqe->pg_cid;
3082 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3083 cnic_cm_conn_req(csk);
3086 csk_put(csk);
3095 struct cnic_sock *csk;
3109 csk = &cp->csk_tbl[l5_cid];
3110 csk_hold(csk);
3112 if (!cnic_in_use(csk)) {
3113 csk_put(csk);
3120 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3121 cnic_cm_upcall(cp, csk,
3127 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
3130 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3131 cnic_cm_upcall(cp, csk, opcode);
3139 cp->close_conn(csk, opcode);
3143 cnic_cm_upcall(cp, csk, opcode);
3146 csk_put(csk);
3188 static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
3190 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
3193 csk->state = opcode;
3196 /* 1. If event opcode matches the expected event in csk->state
3201 if (opcode == csk->state || csk->state == 0 ||
3202 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) {
3203 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
3204 if (csk->state == 0)
3205 csk->state = opcode;
3212 static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
3214 struct cnic_dev *dev = csk->dev;
3218 cnic_cm_upcall(cp, csk, opcode);
3222 clear_bit(SK_F_CONNECT_START, &csk->flags);
3223 cnic_close_conn(csk);
3224 csk->state = opcode;
3225 cnic_cm_upcall(cp, csk, opcode);
3241 static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
3243 struct cnic_dev *dev = csk->dev;
3245 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
3254 if (cnic_ready_to_close(csk, opcode)) {
3255 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3271 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
3275 cnic_close_conn(csk);
3276 cnic_cm_upcall(cp, csk, csk->state);
3356 struct cnic_sock *csk = &cp->csk_tbl[i];
3358 clear_bit(SK_F_INUSE, &csk->flags);
3359 cnic_cm_cleanup(csk);