• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/net/sunrpc/

Lines Matching refs:xprt

20 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt);
45 * The XPT_BUSY bit in xprt->xpt_flags prevents a transport being
128 struct svc_xprt *xprt =
130 struct module *owner = xprt->xpt_class->xcl_owner;
131 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags) &&
132 xprt->xpt_auth_cache != NULL)
133 svcauth_unix_info_release(xprt->xpt_auth_cache);
134 xprt->xpt_ops->xpo_free(xprt);
138 void svc_xprt_put(struct svc_xprt *xprt)
140 kref_put(&xprt->xpt_ref, svc_xprt_free);
148 void svc_xprt_init(struct svc_xprt_class *xcl, struct svc_xprt *xprt,
151 memset(xprt, 0, sizeof(*xprt));
152 xprt->xpt_class = xcl;
153 xprt->xpt_ops = xcl->xcl_ops;
154 kref_init(&xprt->xpt_ref);
155 xprt->xpt_server = serv;
156 INIT_LIST_HEAD(&xprt->xpt_list);
157 INIT_LIST_HEAD(&xprt->xpt_ready);
158 INIT_LIST_HEAD(&xprt->xpt_deferred);
159 mutex_init(&xprt->xpt_mutex);
160 spin_lock_init(&xprt->xpt_lock);
161 set_bit(XPT_BUSY, &xprt->xpt_flags);
162 rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending");
249 * Copy the local and remote xprt addresses to the rqstp structure
251 void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt)
255 memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen);
256 rqstp->rq_addrlen = xprt->xpt_remotelen;
262 sin = (struct sockaddr *)&xprt->xpt_local;
311 void svc_xprt_enqueue(struct svc_xprt *xprt)
313 struct svc_serv *serv = xprt->xpt_server;
318 if (!(xprt->xpt_flags &
323 pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
334 if (test_bit(XPT_DEAD, &xprt->xpt_flags)) {
336 dprintk("svc: transport %p is dead, not enqueued\n", xprt);
347 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) {
349 dprintk("svc: transport %p busy, not enqueued\n", xprt);
352 BUG_ON(xprt->xpt_pool != NULL);
353 xprt->xpt_pool = pool;
356 if (test_bit(XPT_CONN, &xprt->xpt_flags))
360 if (test_bit(XPT_CLOSE, &xprt->xpt_flags))
364 if (!xprt->xpt_ops->xpo_has_wspace(xprt)) {
367 xprt);
368 xprt->xpt_pool = NULL;
369 clear_bit(XPT_BUSY, &xprt->xpt_flags);
379 xprt, rqstp);
385 rqstp->rq_xprt = xprt;
386 svc_xprt_get(xprt);
388 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
390 BUG_ON(xprt->xpt_pool != pool);
393 dprintk("svc: transport %p put into queue\n", xprt);
394 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
396 BUG_ON(xprt->xpt_pool != pool);
409 struct svc_xprt *xprt;
414 xprt = list_entry(pool->sp_sockets.next,
416 list_del_init(&xprt->xpt_ready);
419 xprt, atomic_read(&xprt->xpt_ref.refcount));
421 return xprt;
432 void svc_xprt_received(struct svc_xprt *xprt)
434 BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags));
435 xprt->xpt_pool = NULL;
436 /* As soon as we clear busy, the xprt could be closed and
439 svc_xprt_get(xprt);
440 clear_bit(XPT_BUSY, &xprt->xpt_flags);
441 svc_xprt_enqueue(xprt);
442 svc_xprt_put(xprt);
461 struct svc_xprt *xprt = rqstp->rq_xprt;
462 atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved);
465 svc_xprt_enqueue(xprt);
472 struct svc_xprt *xprt = rqstp->rq_xprt;
497 svc_xprt_put(xprt);
569 struct svc_xprt *xprt = NULL;
584 xprt = list_entry(serv->sv_tempsocks.prev,
587 set_bit(XPT_CLOSE, &xprt->xpt_flags);
588 svc_xprt_get(xprt);
592 if (xprt) {
593 svc_xprt_enqueue(xprt);
594 svc_xprt_put(xprt);
606 struct svc_xprt *xprt = NULL;
662 xprt = svc_xprt_dequeue(pool);
663 if (xprt) {
664 rqstp->rq_xprt = xprt;
665 svc_xprt_get(xprt);
667 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
703 xprt = rqstp->rq_xprt;
704 if (!xprt) {
717 if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
719 svc_delete_xprt(xprt);
720 } else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
722 newxpt = xprt->xpt_ops->xpo_accept(xprt);
729 svc_check_conn_limits(xprt->xpt_server);
745 svc_xprt_received(xprt);
748 rqstp, pool->sp_id, xprt,
749 atomic_read(&xprt->xpt_ref.refcount));
750 rqstp->rq_deferred = svc_deferred_dequeue(xprt);
752 svc_xprt_received(xprt);
755 len = xprt->xpt_ops->xpo_recvfrom(rqstp);
756 svc_xprt_received(xprt);
767 clear_bit(XPT_OLD, &xprt->xpt_flags);
783 dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt);
793 struct svc_xprt *xprt;
797 xprt = rqstp->rq_xprt;
798 if (!xprt)
811 mutex_lock(&xprt->xpt_mutex);
812 if (test_bit(XPT_DEAD, &xprt->xpt_flags))
815 len = xprt->xpt_ops->xpo_sendto(rqstp);
816 mutex_unlock(&xprt->xpt_mutex);
817 rpc_wake_up(&xprt->xpt_bc_pending);
832 struct svc_xprt *xprt;
846 xprt = list_entry(le, struct svc_xprt, xpt_list);
850 if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags))
852 if (atomic_read(&xprt->xpt_ref.refcount) > 1 ||
853 test_bit(XPT_BUSY, &xprt->xpt_flags))
855 svc_xprt_get(xprt);
857 set_bit(XPT_CLOSE, &xprt->xpt_flags);
858 set_bit(XPT_DETACHED, &xprt->xpt_flags);
866 xprt = list_entry(le, struct svc_xprt, xpt_list);
868 dprintk("queuing xprt %p for closing\n", xprt);
871 svc_xprt_enqueue(xprt);
872 svc_xprt_put(xprt);
881 void svc_delete_xprt(struct svc_xprt *xprt)
883 struct svc_serv *serv = xprt->xpt_server;
887 if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags))
890 dprintk("svc: svc_delete_xprt(%p)\n", xprt);
891 xprt->xpt_ops->xpo_detach(xprt);
894 if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags))
895 list_del_init(&xprt->xpt_list);
903 if (test_bit(XPT_TEMP, &xprt->xpt_flags))
907 while ((dr = svc_deferred_dequeue(xprt)) != NULL)
910 svc_xprt_put(xprt);
913 void svc_close_xprt(struct svc_xprt *xprt)
915 set_bit(XPT_CLOSE, &xprt->xpt_flags);
916 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
920 svc_xprt_get(xprt);
921 svc_delete_xprt(xprt);
922 clear_bit(XPT_BUSY, &xprt->xpt_flags);
923 svc_xprt_put(xprt);
929 struct svc_xprt *xprt;
932 list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) {
933 set_bit(XPT_CLOSE, &xprt->xpt_flags);
934 if (test_bit(XPT_BUSY, &xprt->xpt_flags)) {
938 list_del_init(&xprt->xpt_ready);
939 clear_bit(XPT_BUSY, &xprt->xpt_flags);
941 svc_close_xprt(xprt);
953 struct svc_xprt *xprt = dr->xprt;
955 spin_lock(&xprt->xpt_lock);
956 set_bit(XPT_DEFERRED, &xprt->xpt_flags);
957 if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) {
958 spin_unlock(&xprt->xpt_lock);
960 svc_xprt_put(xprt);
965 dr->xprt = NULL;
966 list_add(&dr->handle.recent, &xprt->xpt_deferred);
967 spin_unlock(&xprt->xpt_lock);
968 svc_xprt_enqueue(xprt);
969 svc_xprt_put(xprt);
976 * <xprt-header><rpc-header><rpc-pagelist><rpc-tail>
978 * This code can only handle requests that consist of an xprt-header
1013 dr->xprt = rqstp->rq_xprt;
1044 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt)
1048 if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags))
1050 spin_lock(&xprt->xpt_lock);
1051 clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
1052 if (!list_empty(&xprt->xpt_deferred)) {
1053 dr = list_entry(xprt->xpt_deferred.next,
1057 set_bit(XPT_DEFERRED, &xprt->xpt_flags);
1059 spin_unlock(&xprt->xpt_lock);
1081 struct svc_xprt *xprt;
1089 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
1090 if (strcmp(xprt->xpt_class->xcl_name, xcl_name))
1092 if (af != AF_UNSPEC && af != xprt->xpt_local.ss_family)
1094 if (port != 0 && port != svc_xprt_local_port(xprt))
1096 found = xprt;
1097 svc_xprt_get(xprt);
1105 static int svc_one_xprt_name(const struct svc_xprt *xprt,
1111 xprt->xpt_class->xcl_name,
1112 svc_xprt_local_port(xprt));
1132 struct svc_xprt *xprt;
1144 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
1145 len = svc_one_xprt_name(xprt, pos, buflen - totlen);