• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/fs/dlm/

Lines Matching refs:con

130 	struct connection *con;
154 struct connection *con = NULL;
158 con = idr_find(&connections_idr, nodeid);
159 if (con || !alloc)
160 return con;
166 con = kmem_cache_zalloc(con_cache, alloc);
167 if (!con)
170 r = idr_get_new_above(&connections_idr, con, nodeid, &n);
172 kmem_cache_free(con_cache, con);
178 kmem_cache_free(con_cache, con);
182 con->nodeid = nodeid;
183 mutex_init(&con->sock_mutex);
184 INIT_LIST_HEAD(&con->writequeue);
185 spin_lock_init(&con->writequeue_lock);
186 INIT_WORK(&con->swork, process_send_sockets);
187 INIT_WORK(&con->rwork, process_recv_sockets);
190 if (con->nodeid) {
193 con->connect_action = zerocon->connect_action;
194 if (!con->rx_action)
195 con->rx_action = zerocon->rx_action;
201 return con;
206 struct connection *con;
209 con = __nodeid2con(nodeid, allocation);
212 return con;
219 struct connection *con;
223 con = __nodeid2con(i, 0);
224 if (con && con->sctp_assoc == assoc_id) {
226 return con;
262 struct connection *con = sock2con(sk);
263 if (!test_and_set_bit(CF_READ_PENDING, &con->flags))
264 queue_work(recv_workqueue, &con->rwork);
269 struct connection *con = sock2con(sk);
271 if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags))
272 queue_work(send_workqueue, &con->swork);
275 static inline void lowcomms_connect_sock(struct connection *con)
277 if (!test_and_set_bit(CF_CONNECT_PENDING, &con->flags))
278 queue_work(send_workqueue, &con->swork);
288 static int add_sock(struct socket *sock, struct connection *con)
290 con->sock = sock;
293 con->sock->sk->sk_data_ready = lowcomms_data_ready;
294 con->sock->sk->sk_write_space = lowcomms_write_space;
295 con->sock->sk->sk_state_change = lowcomms_state_change;
296 con->sock->sk->sk_user_data = con;
319 static void close_connection(struct connection *con, bool and_other)
321 mutex_lock(&con->sock_mutex);
323 if (con->sock) {
324 sock_release(con->sock);
325 con->sock = NULL;
327 if (con->othercon && and_other) {
329 close_connection(con->othercon, false);
331 if (con->rx_page) {
332 __free_page(con->rx_page);
333 con->rx_page = NULL;
335 con->retries = 0;
336 mutex_unlock(&con->sock_mutex);
347 struct connection *con;
349 con = nodeid2con(0,0);
350 BUG_ON(con == NULL);
369 ret = kernel_sendmsg(con->sock, &outmessage, NULL, 0, 0);
380 struct connection *con;
384 con = __nodeid2con(i, 0);
385 if (!con)
387 con->sctp_assoc = 0;
388 if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) {
389 if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) {
390 queue_work(send_workqueue, &con->swork);
398 static void process_sctp_notification(struct connection *con,
434 ret = kernel_getsockopt(con->sock,
448 clear_bit(CF_CONNECT_PENDING, &con->flags);
469 ret = kernel_getsockopt(con->sock, IPPROTO_SCTP,
488 clear_bit(CF_INIT_PENDING, &con->flags);
500 con = assoc2con(sn->sn_assoc_change.sac_assoc_id);
501 if (con) {
502 con->sctp_assoc = 0;
527 static int receive_from_sock(struct connection *con)
538 mutex_lock(&con->sock_mutex);
540 if (con->sock == NULL) {
545 if (con->rx_page == NULL) {
550 con->rx_page = alloc_page(GFP_ATOMIC);
551 if (con->rx_page == NULL)
553 cbuf_init(&con->cb, PAGE_CACHE_SIZE);
565 iov[0].iov_len = con->cb.base - cbuf_data(&con->cb);
566 iov[0].iov_base = page_address(con->rx_page) + cbuf_data(&con->cb);
574 if (cbuf_data(&con->cb) >= con->cb.base) {
575 iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&con->cb);
576 iov[1].iov_len = con->cb.base;
577 iov[1].iov_base = page_address(con->rx_page);
582 r = ret = kernel_recvmsg(con->sock, &msg, iov, nvec, len,
592 process_sctp_notification(con, &msg,
593 page_address(con->rx_page) + con->cb.base);
594 mutex_unlock(&con->sock_mutex);
597 BUG_ON(con->nodeid == 0);
601 cbuf_add(&con->cb, ret);
602 ret = dlm_process_incoming_buffer(con->nodeid,
603 page_address(con->rx_page),
604 con->cb.base, con->cb.len,
609 page_address(con->rx_page), con->cb.base, con->cb.len,
614 cbuf_eat(&con->cb, ret);
616 if (cbuf_empty(&con->cb) && !call_again_soon) {
617 __free_page(con->rx_page);
618 con->rx_page = NULL;
623 mutex_unlock(&con->sock_mutex);
627 if (!test_and_set_bit(CF_READ_PENDING, &con->flags))
628 queue_work(recv_workqueue, &con->rwork);
629 mutex_unlock(&con->sock_mutex);
633 mutex_unlock(&con->sock_mutex);
634 if (ret != -EAGAIN && !test_bit(CF_IS_OTHERCON, &con->flags)) {
635 close_connection(con, false);
646 static int tcp_accept_from_sock(struct connection *con)
662 mutex_lock_nested(&con->sock_mutex, 0);
665 if (con->sock == NULL)
668 newsock->type = con->sock->type;
669 newsock->ops = con->sock->ops;
671 result = con->sock->ops->accept(con->sock, newsock, O_NONBLOCK);
688 mutex_unlock(&con->sock_mutex);
745 mutex_unlock(&con->sock_mutex);
750 mutex_unlock(&con->sock_mutex);
769 static void sctp_init_assoc(struct connection *con)
783 if (test_and_set_bit(CF_INIT_PENDING, &con->flags))
786 if (con->retries++ > MAX_CONNECT_RETRIES)
789 log_print("Initiating association with node %d", con->nodeid);
791 if (nodeid_to_addr(con->nodeid, (struct sockaddr *)&rem_addr)) {
792 log_print("no address for nodeid %d", con->nodeid);
806 spin_lock(&con->writequeue_lock);
807 e = list_entry(con->writequeue.next, struct writequeue_entry,
810 BUG_ON((struct list_head *) e == &con->writequeue);
814 spin_unlock(&con->writequeue_lock);
833 con->nodeid, ret);
836 clear_bit(CF_CONNECT_PENDING, &con->flags);
837 clear_bit(CF_INIT_PENDING, &con->flags);
840 spin_lock(&con->writequeue_lock);
849 spin_unlock(&con->writequeue_lock);
854 static void tcp_connect_to_sock(struct connection *con)
861 if (con->nodeid == 0) {
866 mutex_lock(&con->sock_mutex);
867 if (con->retries++ > MAX_CONNECT_RETRIES)
871 if (con->sock) {
883 if (dlm_nodeid_to_addr(con->nodeid, &saddr))
886 sock->sk->sk_user_data = con;
887 con->rx_action = receive_from_sock;
888 con->connect_action = tcp_connect_to_sock;
889 add_sock(sock, con);
893 log_print("connecting to %d", con->nodeid);
903 if (con->sock) {
904 sock_release(con->sock);
905 con->sock = NULL;
914 lowcomms_connect_sock(con);
918 mutex_unlock(&con->sock_mutex);
922 static struct socket *tcp_create_listen_sock(struct connection *con,
949 sock->sk->sk_user_data = con;
950 con->rx_action = tcp_accept_from_sock;
951 con->connect_action = tcp_connect_to_sock;
952 con->sock = sock;
961 con->sock = NULL;
1032 struct connection *con = nodeid2con(0, GFP_KERNEL);
1035 if (!con)
1068 /* Init con struct */
1069 sock->sk->sk_user_data = con;
1070 con->sock = sock;
1071 con->sock->sk->sk_data_ready = lowcomms_data_ready;
1072 con->rx_action = receive_from_sock;
1073 con->connect_action = sctp_init_assoc;
1080 result = add_sctp_bind_addr(con, &localaddr, addr_len, num);
1096 con->sock = NULL;
1104 struct connection *con = nodeid2con(0, GFP_KERNEL);
1107 if (!con)
1119 set_bit(CF_IS_OTHERCON, &con->flags);
1121 sock = tcp_create_listen_sock(con, dlm_local_addr[0]);
1123 add_sock(sock, con);
1135 static struct writequeue_entry *new_writequeue_entry(struct connection *con,
1154 entry->con = con;
1161 struct connection *con;
1166 con = nodeid2con(nodeid, allocation);
1167 if (!con)
1170 spin_lock(&con->writequeue_lock);
1171 e = list_entry(con->writequeue.prev, struct writequeue_entry, list);
1172 if ((&e->list == &con->writequeue) ||
1180 spin_unlock(&con->writequeue_lock);
1190 e = new_writequeue_entry(con, allocation);
1192 spin_lock(&con->writequeue_lock);
1196 list_add_tail(&e->list, &con->writequeue);
1197 spin_unlock(&con->writequeue_lock);
1206 struct connection *con = e->con;
1209 spin_lock(&con->writequeue_lock);
1215 spin_unlock(&con->writequeue_lock);
1217 if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) {
1218 queue_work(send_workqueue, &con->swork);
1223 spin_unlock(&con->writequeue_lock);
1228 static void send_to_sock(struct connection *con)
1236 mutex_lock(&con->sock_mutex);
1237 if (con->sock == NULL)
1240 sendpage = con->sock->ops->sendpage;
1242 spin_lock(&con->writequeue_lock);
1244 e = list_entry(con->writequeue.next, struct writequeue_entry,
1246 if ((struct list_head *) e == &con->writequeue)
1252 spin_unlock(&con->writequeue_lock);
1257 ret = sendpage(con->sock, e->page, offset, len,
1268 spin_lock(&con->writequeue_lock);
1279 spin_unlock(&con->writequeue_lock);
1281 mutex_unlock(&con->sock_mutex);
1285 mutex_unlock(&con->sock_mutex);
1286 close_connection(con, false);
1287 lowcomms_connect_sock(con);
1291 mutex_unlock(&con->sock_mutex);
1292 if (!test_bit(CF_INIT_PENDING, &con->flags))
1293 lowcomms_connect_sock(con);
1297 static void clean_one_writequeue(struct connection *con)
1302 spin_lock(&con->writequeue_lock);
1303 list_for_each_safe(list, temp, &con->writequeue) {
1309 spin_unlock(&con->writequeue_lock);
1316 struct connection *con;
1319 con = nodeid2con(nodeid, 0);
1320 if (con) {
1321 clean_one_writequeue(con);
1322 close_connection(con, true);
1330 struct connection *con = container_of(work, struct connection, rwork);
1333 clear_bit(CF_READ_PENDING, &con->flags);
1335 err = con->rx_action(con);
1342 struct connection *con = container_of(work, struct connection, swork);
1344 if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) {
1345 con->connect_action(con);
1347 clear_bit(CF_WRITE_PENDING, &con->flags);
1348 send_to_sock(con);
1358 struct connection *con = __nodeid2con(nodeid, 0);
1360 if (con)
1361 clean_one_writequeue(con);
1395 struct connection *con;
1402 con = __nodeid2con(i, 0);
1403 if (con)
1404 con->flags |= 0xFF;
1414 con = __nodeid2con(i, 0);
1415 if (con) {
1416 close_connection(con, true);
1417 if (con->othercon)
1418 kmem_cache_free(con_cache, con->othercon);
1419 kmem_cache_free(con_cache, con);
1431 struct connection *con;
1466 con = nodeid2con(0,0);
1467 if (con) {
1468 close_connection(con, false);
1469 kmem_cache_free(con_cache, con);