• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/fs/dlm/

Lines Matching refs:con

135 	struct connection *con;
165 struct connection *con;
169 hlist_for_each_entry(con, h, &connection_hash[r], list) {
170 if (con->nodeid == nodeid)
171 return con;
182 struct connection *con = NULL;
185 con = __find_con(nodeid);
186 if (con || !alloc)
187 return con;
189 con = kmem_cache_zalloc(con_cache, alloc);
190 if (!con)
194 hlist_add_head(&con->list, &connection_hash[r]);
196 con->nodeid = nodeid;
197 mutex_init(&con->sock_mutex);
198 INIT_LIST_HEAD(&con->writequeue);
199 spin_lock_init(&con->writequeue_lock);
200 INIT_WORK(&con->swork, process_send_sockets);
201 INIT_WORK(&con->rwork, process_recv_sockets);
204 if (con->nodeid) {
207 con->connect_action = zerocon->connect_action;
208 if (!con->rx_action)
209 con->rx_action = zerocon->rx_action;
212 return con;
220 struct connection *con;
223 hlist_for_each_entry_safe(con, h, n, &connection_hash[i], list){
224 conn_func(con);
231 struct connection *con;
234 con = __nodeid2con(nodeid, allocation);
237 return con;
245 struct connection *con;
250 hlist_for_each_entry(con, h, &connection_hash[i], list) {
251 if (con->sctp_assoc == assoc_id) {
253 return con;
289 struct connection *con = sock2con(sk);
290 if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags))
291 queue_work(recv_workqueue, &con->rwork);
296 struct connection *con = sock2con(sk);
298 if (con && !test_and_set_bit(CF_WRITE_PENDING, &con->flags))
299 queue_work(send_workqueue, &con->swork);
302 static inline void lowcomms_connect_sock(struct connection *con)
304 if (test_bit(CF_CLOSE, &con->flags))
306 if (!test_and_set_bit(CF_CONNECT_PENDING, &con->flags))
307 queue_work(send_workqueue, &con->swork);
318 struct connection *con;
327 con = nodeid2con(nodeid, GFP_NOFS);
328 if (!con)
330 lowcomms_connect_sock(con);
335 static int add_sock(struct socket *sock, struct connection *con)
337 con->sock = sock;
340 con->sock->sk->sk_data_ready = lowcomms_data_ready;
341 con->sock->sk->sk_write_space = lowcomms_write_space;
342 con->sock->sk->sk_state_change = lowcomms_state_change;
343 con->sock->sk->sk_user_data = con;
344 con->sock->sk->sk_allocation = GFP_NOFS;
368 static void close_connection(struct connection *con, bool and_other)
370 mutex_lock(&con->sock_mutex);
372 if (con->sock) {
373 sock_release(con->sock);
374 con->sock = NULL;
376 if (con->othercon && and_other) {
378 close_connection(con->othercon, false);
380 if (con->rx_page) {
381 __free_page(con->rx_page);
382 con->rx_page = NULL;
385 con->retries = 0;
386 mutex_unlock(&con->sock_mutex);
397 struct connection *con;
399 con = nodeid2con(0,0);
400 BUG_ON(con == NULL);
419 ret = kernel_sendmsg(con->sock, &outmessage, NULL, 0, 0);
425 static void sctp_init_failed_foreach(struct connection *con)
427 con->sctp_assoc = 0;
428 if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) {
429 if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags))
430 queue_work(send_workqueue, &con->swork);
446 static void process_sctp_notification(struct connection *con,
482 ret = kernel_getsockopt(con->sock,
496 clear_bit(CF_CONNECT_PENDING, &con->flags);
517 ret = kernel_getsockopt(con->sock, IPPROTO_SCTP,
539 clear_bit(CF_INIT_PENDING, &con->flags);
551 con = assoc2con(sn->sn_assoc_change.sac_assoc_id);
552 if (con) {
553 con->sctp_assoc = 0;
578 static int receive_from_sock(struct connection *con)
589 mutex_lock(&con->sock_mutex);
591 if (con->sock == NULL) {
596 if (con->rx_page == NULL) {
601 con->rx_page = alloc_page(GFP_ATOMIC);
602 if (con->rx_page == NULL)
604 cbuf_init(&con->cb, PAGE_CACHE_SIZE);
616 iov[0].iov_len = con->cb.base - cbuf_data(&con->cb);
617 iov[0].iov_base = page_address(con->rx_page) + cbuf_data(&con->cb);
625 if (cbuf_data(&con->cb) >= con->cb.base) {
626 iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&con->cb);
627 iov[1].iov_len = con->cb.base;
628 iov[1].iov_base = page_address(con->rx_page);
633 r = ret = kernel_recvmsg(con->sock, &msg, iov, nvec, len,
643 process_sctp_notification(con, &msg,
644 page_address(con->rx_page) + con->cb.base);
645 mutex_unlock(&con->sock_mutex);
648 BUG_ON(con->nodeid == 0);
652 cbuf_add(&con->cb, ret);
653 ret = dlm_process_incoming_buffer(con->nodeid,
654 page_address(con->rx_page),
655 con->cb.base, con->cb.len,
660 page_address(con->rx_page), con->cb.base, con->cb.len,
665 cbuf_eat(&con->cb, ret);
667 if (cbuf_empty(&con->cb) && !call_again_soon) {
668 __free_page(con->rx_page);
669 con->rx_page = NULL;
674 mutex_unlock(&con->sock_mutex);
678 if (!test_and_set_bit(CF_READ_PENDING, &con->flags))
679 queue_work(recv_workqueue, &con->rwork);
680 mutex_unlock(&con->sock_mutex);
684 mutex_unlock(&con->sock_mutex);
686 close_connection(con, false);
697 static int tcp_accept_from_sock(struct connection *con)
713 mutex_lock_nested(&con->sock_mutex, 0);
716 if (con->sock == NULL)
719 newsock->type = con->sock->type;
720 newsock->ops = con->sock->ops;
722 result = con->sock->ops->accept(con->sock, newsock, O_NONBLOCK);
739 mutex_unlock(&con->sock_mutex);
804 mutex_unlock(&con->sock_mutex);
809 mutex_unlock(&con->sock_mutex);
828 static void sctp_init_assoc(struct connection *con)
842 if (test_and_set_bit(CF_INIT_PENDING, &con->flags))
845 if (con->retries++ > MAX_CONNECT_RETRIES)
848 if (nodeid_to_addr(con->nodeid, (struct sockaddr *)&rem_addr)) {
849 log_print("no address for nodeid %d", con->nodeid);
863 spin_lock(&con->writequeue_lock);
865 if (list_empty(&con->writequeue)) {
866 spin_unlock(&con->writequeue_lock);
867 log_print("writequeue empty for nodeid %d", con->nodeid);
871 e = list_first_entry(&con->writequeue, struct writequeue_entry, list);
874 spin_unlock(&con->writequeue_lock);
892 con->nodeid, ret);
895 clear_bit(CF_CONNECT_PENDING, &con->flags);
896 clear_bit(CF_INIT_PENDING, &con->flags);
899 spin_lock(&con->writequeue_lock);
907 spin_unlock(&con->writequeue_lock);
912 static void tcp_connect_to_sock(struct connection *con)
919 if (con->nodeid == 0) {
924 mutex_lock(&con->sock_mutex);
925 if (con->retries++ > MAX_CONNECT_RETRIES)
929 if (con->sock) {
941 if (dlm_nodeid_to_addr(con->nodeid, &saddr))
944 sock->sk->sk_user_data = con;
945 con->rx_action = receive_from_sock;
946 con->connect_action = tcp_connect_to_sock;
947 add_sock(sock, con);
962 log_print("connecting to %d", con->nodeid);
972 if (con->sock) {
973 sock_release(con->sock);
974 con->sock = NULL;
985 lowcomms_connect_sock(con);
989 mutex_unlock(&con->sock_mutex);
993 static struct socket *tcp_create_listen_sock(struct connection *con,
1020 sock->sk->sk_user_data = con;
1021 con->rx_action = tcp_accept_from_sock;
1022 con->connect_action = tcp_connect_to_sock;
1023 con->sock = sock;
1032 con->sock = NULL;
1103 struct connection *con = nodeid2con(0, GFP_NOFS);
1106 if (!con)
1139 /* Init con struct */
1140 sock->sk->sk_user_data = con;
1141 con->sock = sock;
1142 con->sock->sk->sk_data_ready = lowcomms_data_ready;
1143 con->rx_action = receive_from_sock;
1144 con->connect_action = sctp_init_assoc;
1151 result = add_sctp_bind_addr(con, &localaddr, addr_len, num);
1167 con->sock = NULL;
1175 struct connection *con = nodeid2con(0, GFP_NOFS);
1178 if (!con)
1190 sock = tcp_create_listen_sock(con, dlm_local_addr[0]);
1192 add_sock(sock, con);
1204 static struct writequeue_entry *new_writequeue_entry(struct connection *con,
1223 entry->con = con;
1230 struct connection *con;
1235 con = nodeid2con(nodeid, allocation);
1236 if (!con)
1239 spin_lock(&con->writequeue_lock);
1240 e = list_entry(con->writequeue.prev, struct writequeue_entry, list);
1241 if ((&e->list == &con->writequeue) ||
1249 spin_unlock(&con->writequeue_lock);
1257 e = new_writequeue_entry(con, allocation);
1259 spin_lock(&con->writequeue_lock);
1263 list_add_tail(&e->list, &con->writequeue);
1264 spin_unlock(&con->writequeue_lock);
1273 struct connection *con = e->con;
1276 spin_lock(&con->writequeue_lock);
1281 spin_unlock(&con->writequeue_lock);
1283 if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) {
1284 queue_work(send_workqueue, &con->swork);
1289 spin_unlock(&con->writequeue_lock);
1294 static void send_to_sock(struct connection *con)
1301 mutex_lock(&con->sock_mutex);
1302 if (con->sock == NULL)
1305 spin_lock(&con->writequeue_lock);
1307 e = list_entry(con->writequeue.next, struct writequeue_entry,
1309 if ((struct list_head *) e == &con->writequeue)
1315 spin_unlock(&con->writequeue_lock);
1319 ret = kernel_sendpage(con->sock, e->page, offset, len,
1331 spin_lock(&con->writequeue_lock);
1341 spin_unlock(&con->writequeue_lock);
1343 mutex_unlock(&con->sock_mutex);
1347 mutex_unlock(&con->sock_mutex);
1348 close_connection(con, false);
1349 lowcomms_connect_sock(con);
1353 mutex_unlock(&con->sock_mutex);
1354 if (!test_bit(CF_INIT_PENDING, &con->flags))
1355 lowcomms_connect_sock(con);
1359 static void clean_one_writequeue(struct connection *con)
1363 spin_lock(&con->writequeue_lock);
1364 list_for_each_entry_safe(e, safe, &con->writequeue, list) {
1368 spin_unlock(&con->writequeue_lock);
1375 struct connection *con;
1378 con = nodeid2con(nodeid, 0);
1379 if (con) {
1380 clear_bit(CF_CONNECT_PENDING, &con->flags);
1381 clear_bit(CF_WRITE_PENDING, &con->flags);
1382 set_bit(CF_CLOSE, &con->flags);
1383 if (cancel_work_sync(&con->swork))
1385 if (cancel_work_sync(&con->rwork))
1387 clean_one_writequeue(con);
1388 close_connection(con, true);
1396 struct connection *con = container_of(work, struct connection, rwork);
1399 clear_bit(CF_READ_PENDING, &con->flags);
1401 err = con->rx_action(con);
1408 struct connection *con = container_of(work, struct connection, swork);
1410 if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) {
1411 con->connect_action(con);
1412 set_bit(CF_WRITE_PENDING, &con->flags);
1414 if (test_and_clear_bit(CF_WRITE_PENDING, &con->flags))
1415 send_to_sock(con);
1452 static void stop_conn(struct connection *con)
1454 con->flags |= 0x0F;
1455 if (con->sock && con->sock->sk)
1456 con->sock->sk->sk_user_data = NULL;
1459 static void free_conn(struct connection *con)
1461 close_connection(con, true);
1462 if (con->othercon)
1463 kmem_cache_free(con_cache, con->othercon);
1464 hlist_del(&con->list);
1465 kmem_cache_free(con_cache, con);
1491 struct connection *con;
1526 con = nodeid2con(0,0);
1527 if (con) {
1528 close_connection(con, false);
1529 kmem_cache_free(con_cache, con);