svc_dg.c revision 109957
1/*	$NetBSD: svc_dg.c,v 1.4 2000/07/06 03:10:35 christos Exp $	*/
2
3/*
4 * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
5 * unrestricted use provided that this legend is included on all tape
6 * media and as a part of the software program in whole or part.  Users
7 * may copy or modify Sun RPC without charge, but are not authorized
8 * to license or distribute it to anyone else except as part of a product or
9 * program developed by the user.
10 *
11 * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
12 * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
13 * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
14 *
15 * Sun RPC is provided with no support and without any obligation on the
16 * part of Sun Microsystems, Inc. to assist in its use, correction,
17 * modification or enhancement.
18 *
19 * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
20 * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
21 * OR ANY PART THEREOF.
22 *
23 * In no event will Sun Microsystems, Inc. be liable for any lost revenue
24 * or profits or other special, indirect and consequential damages, even if
25 * Sun has been advised of the possibility of such damages.
26 *
27 * Sun Microsystems, Inc.
28 * 2550 Garcia Avenue
29 * Mountain View, California  94043
30 */
31
32/*
33 * Copyright (c) 1986-1991 by Sun Microsystems Inc.
34 */
35
36/* #ident	"@(#)svc_dg.c	1.17	94/04/24 SMI" */
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: head/lib/libc/rpc/svc_dg.c 109957 2003-01-27 22:46:50Z mbr $");
39
40/*
41 * svc_dg.c, Server side for connectionless RPC.
42 *
43 * Does some caching in the hopes of achieving execute-at-most-once semantics.
44 */
45
46#include "namespace.h"
47#include "reentrant.h"
48#include <sys/types.h>
49#include <sys/socket.h>
50#include <rpc/rpc.h>
51#include <rpc/svc_dg.h>
52#include <errno.h>
53#include <unistd.h>
54#include <stdio.h>
55#include <stdlib.h>
56#include <string.h>
57#ifdef RPC_CACHE_DEBUG
58#include <netconfig.h>
59#include <netdir.h>
60#endif
61#include <err.h>
62#include "un-namespace.h"
63
64#include "rpc_com.h"
65
66#define	su_data(xprt)	((struct svc_dg_data *)(xprt->xp_p2))
67#define	rpc_buffer(xprt) ((xprt)->xp_p1)
68
69#ifndef MAX
70#define	MAX(a, b)	(((a) > (b)) ? (a) : (b))
71#endif
72
73static void svc_dg_ops(SVCXPRT *);
74static enum xprt_stat svc_dg_stat(SVCXPRT *);
75static bool_t svc_dg_recv(SVCXPRT *, struct rpc_msg *);
76static bool_t svc_dg_reply(SVCXPRT *, struct rpc_msg *);
77static bool_t svc_dg_getargs(SVCXPRT *, xdrproc_t, void *);
78static bool_t svc_dg_freeargs(SVCXPRT *, xdrproc_t, void *);
79static void svc_dg_destroy(SVCXPRT *);
80static bool_t svc_dg_control(SVCXPRT *, const u_int, void *);
81static int cache_get(SVCXPRT *, struct rpc_msg *, char **, size_t *);
82static void cache_set(SVCXPRT *, size_t);
83int svc_dg_enablecache(SVCXPRT *, u_int);
84
85/*
86 * Usage:
87 *	xprt = svc_dg_create(sock, sendsize, recvsize);
88 * Does other connectionless specific initializations.
89 * Once *xprt is initialized, it is registered.
90 * see (svc.h, xprt_register). If recvsize or sendsize are 0 suitable
91 * system defaults are chosen.
92 * The routines returns NULL if a problem occurred.
93 */
94static const char svc_dg_str[] = "svc_dg_create: %s";
95static const char svc_dg_err1[] = "could not get transport information";
96static const char svc_dg_err2[] = " transport does not support data transfer";
97static const char __no_mem_str[] = "out of memory";
98
99SVCXPRT *
100svc_dg_create(fd, sendsize, recvsize)
101	int fd;
102	u_int sendsize;
103	u_int recvsize;
104{
105	SVCXPRT *xprt;
106	struct svc_dg_data *su = NULL;
107	struct __rpc_sockinfo si;
108	struct sockaddr_storage ss;
109	socklen_t slen;
110
111	if (!__rpc_fd2sockinfo(fd, &si)) {
112		warnx(svc_dg_str, svc_dg_err1);
113		return (NULL);
114	}
115	/*
116	 * Find the receive and the send size
117	 */
118	sendsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsize);
119	recvsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsize);
120	if ((sendsize == 0) || (recvsize == 0)) {
121		warnx(svc_dg_str, svc_dg_err2);
122		return (NULL);
123	}
124
125	xprt = mem_alloc(sizeof (SVCXPRT));
126	if (xprt == NULL)
127		goto freedata;
128	memset(xprt, 0, sizeof (SVCXPRT));
129
130	su = mem_alloc(sizeof (*su));
131	if (su == NULL)
132		goto freedata;
133	su->su_iosz = ((MAX(sendsize, recvsize) + 3) / 4) * 4;
134	if ((rpc_buffer(xprt) = mem_alloc(su->su_iosz)) == NULL)
135		goto freedata;
136	xdrmem_create(&(su->su_xdrs), rpc_buffer(xprt), su->su_iosz,
137		XDR_DECODE);
138	su->su_cache = NULL;
139	xprt->xp_fd = fd;
140	xprt->xp_p2 = su;
141	xprt->xp_verf.oa_base = su->su_verfbody;
142	svc_dg_ops(xprt);
143	xprt->xp_rtaddr.maxlen = sizeof (struct sockaddr_storage);
144
145	slen = sizeof ss;
146	if (_getsockname(fd, (struct sockaddr *)(void *)&ss, &slen) < 0)
147		goto freedata;
148	xprt->xp_ltaddr.buf = mem_alloc(sizeof (struct sockaddr_storage));
149	xprt->xp_ltaddr.maxlen = sizeof (struct sockaddr_storage);
150	xprt->xp_ltaddr.len = slen;
151	memcpy(xprt->xp_ltaddr.buf, &ss, slen);
152
153	xprt_register(xprt);
154	return (xprt);
155freedata:
156	(void) warnx(svc_dg_str, __no_mem_str);
157	if (xprt) {
158		if (su)
159			(void) mem_free(su, sizeof (*su));
160		(void) mem_free(xprt, sizeof (SVCXPRT));
161	}
162	return (NULL);
163}
164
165/*ARGSUSED*/
166static enum xprt_stat
167svc_dg_stat(xprt)
168	SVCXPRT *xprt;
169{
170	return (XPRT_IDLE);
171}
172
173static bool_t
174svc_dg_recv(xprt, msg)
175	SVCXPRT *xprt;
176	struct rpc_msg *msg;
177{
178	struct svc_dg_data *su = su_data(xprt);
179	XDR *xdrs = &(su->su_xdrs);
180	char *reply;
181	struct sockaddr_storage ss;
182	socklen_t alen;
183	size_t replylen;
184	ssize_t rlen;
185
186again:
187	alen = sizeof (struct sockaddr_storage);
188	rlen = _recvfrom(xprt->xp_fd, rpc_buffer(xprt), su->su_iosz, 0,
189	    (struct sockaddr *)(void *)&ss, &alen);
190	if (rlen == -1 && errno == EINTR)
191		goto again;
192	if (rlen == -1 || (rlen < (ssize_t)(4 * sizeof (u_int32_t))))
193		return (FALSE);
194	if (xprt->xp_rtaddr.len < alen) {
195		if (xprt->xp_rtaddr.len != 0)
196			mem_free(xprt->xp_rtaddr.buf, xprt->xp_rtaddr.len);
197		xprt->xp_rtaddr.buf = mem_alloc(alen);
198		xprt->xp_rtaddr.len = alen;
199	}
200	memcpy(xprt->xp_rtaddr.buf, &ss, alen);
201#ifdef PORTMAP
202	if (ss.ss_family == AF_INET) {
203		xprt->xp_raddr = *(struct sockaddr_in *)xprt->xp_rtaddr.buf;
204		xprt->xp_addrlen = sizeof (struct sockaddr_in);
205	}
206#endif				/* PORTMAP */
207	xdrs->x_op = XDR_DECODE;
208	XDR_SETPOS(xdrs, 0);
209	if (! xdr_callmsg(xdrs, msg)) {
210		return (FALSE);
211	}
212	su->su_xid = msg->rm_xid;
213	if (su->su_cache != NULL) {
214		if (cache_get(xprt, msg, &reply, &replylen)) {
215			(void)_sendto(xprt->xp_fd, reply, replylen, 0,
216			    (struct sockaddr *)(void *)&ss, alen);
217			return (FALSE);
218		}
219	}
220	return (TRUE);
221}
222
223static bool_t
224svc_dg_reply(xprt, msg)
225	SVCXPRT *xprt;
226	struct rpc_msg *msg;
227{
228	struct svc_dg_data *su = su_data(xprt);
229	XDR *xdrs = &(su->su_xdrs);
230	bool_t stat = FALSE;
231	size_t slen;
232
233	xdrs->x_op = XDR_ENCODE;
234	XDR_SETPOS(xdrs, 0);
235	msg->rm_xid = su->su_xid;
236	if (xdr_replymsg(xdrs, msg)) {
237		slen = XDR_GETPOS(xdrs);
238		if (_sendto(xprt->xp_fd, rpc_buffer(xprt), slen, 0,
239		    (struct sockaddr *)xprt->xp_rtaddr.buf,
240		    (socklen_t)xprt->xp_rtaddr.len) == (ssize_t) slen) {
241			stat = TRUE;
242			if (su->su_cache)
243				cache_set(xprt, slen);
244		}
245	}
246	return (stat);
247}
248
249static bool_t
250svc_dg_getargs(xprt, xdr_args, args_ptr)
251	SVCXPRT *xprt;
252	xdrproc_t xdr_args;
253	void *args_ptr;
254{
255	return (*xdr_args)(&(su_data(xprt)->su_xdrs), args_ptr);
256}
257
258static bool_t
259svc_dg_freeargs(xprt, xdr_args, args_ptr)
260	SVCXPRT *xprt;
261	xdrproc_t xdr_args;
262	void *args_ptr;
263{
264	XDR *xdrs = &(su_data(xprt)->su_xdrs);
265
266	xdrs->x_op = XDR_FREE;
267	return (*xdr_args)(xdrs, args_ptr);
268}
269
270static void
271svc_dg_destroy(xprt)
272	SVCXPRT *xprt;
273{
274	struct svc_dg_data *su = su_data(xprt);
275
276	xprt_unregister(xprt);
277	if (xprt->xp_fd != -1)
278		(void)_close(xprt->xp_fd);
279	XDR_DESTROY(&(su->su_xdrs));
280	(void) mem_free(rpc_buffer(xprt), su->su_iosz);
281	(void) mem_free(su, sizeof (*su));
282	if (xprt->xp_rtaddr.buf)
283		(void) mem_free(xprt->xp_rtaddr.buf, xprt->xp_rtaddr.maxlen);
284	if (xprt->xp_ltaddr.buf)
285		(void) mem_free(xprt->xp_ltaddr.buf, xprt->xp_ltaddr.maxlen);
286	if (xprt->xp_tp)
287		(void) free(xprt->xp_tp);
288	(void) mem_free(xprt, sizeof (SVCXPRT));
289}
290
291static bool_t
292/*ARGSUSED*/
293svc_dg_control(xprt, rq, in)
294	SVCXPRT *xprt;
295	const u_int	rq;
296	void		*in;
297{
298	return (FALSE);
299}
300
301static void
302svc_dg_ops(xprt)
303	SVCXPRT *xprt;
304{
305	static struct xp_ops ops;
306	static struct xp_ops2 ops2;
307	extern mutex_t ops_lock;
308
309/* VARIABLES PROTECTED BY ops_lock: ops */
310
311	mutex_lock(&ops_lock);
312	if (ops.xp_recv == NULL) {
313		ops.xp_recv = svc_dg_recv;
314		ops.xp_stat = svc_dg_stat;
315		ops.xp_getargs = svc_dg_getargs;
316		ops.xp_reply = svc_dg_reply;
317		ops.xp_freeargs = svc_dg_freeargs;
318		ops.xp_destroy = svc_dg_destroy;
319		ops2.xp_control = svc_dg_control;
320	}
321	xprt->xp_ops = &ops;
322	xprt->xp_ops2 = &ops2;
323	mutex_unlock(&ops_lock);
324}
325
326/*  The CACHING COMPONENT */
327
328/*
329 * Could have been a separate file, but some part of it depends upon the
330 * private structure of the client handle.
331 *
332 * Fifo cache for cl server
333 * Copies pointers to reply buffers into fifo cache
334 * Buffers are sent again if retransmissions are detected.
335 */
336
337#define	SPARSENESS 4	/* 75% sparse */
338
339#define	ALLOC(type, size)	\
340	(type *) mem_alloc((sizeof (type) * (size)))
341
342#define	MEMZERO(addr, type, size)	 \
343	(void) memset((void *) (addr), 0, sizeof (type) * (int) (size))
344
345#define	FREE(addr, type, size)	\
346	mem_free((addr), (sizeof (type) * (size)))
347
348/*
349 * An entry in the cache
350 */
351typedef struct cache_node *cache_ptr;
352struct cache_node {
353	/*
354	 * Index into cache is xid, proc, vers, prog and address
355	 */
356	u_int32_t cache_xid;
357	rpcproc_t cache_proc;
358	rpcvers_t cache_vers;
359	rpcprog_t cache_prog;
360	struct netbuf cache_addr;
361	/*
362	 * The cached reply and length
363	 */
364	char *cache_reply;
365	size_t cache_replylen;
366	/*
367	 * Next node on the list, if there is a collision
368	 */
369	cache_ptr cache_next;
370};
371
372/*
373 * The entire cache
374 */
375struct cl_cache {
376	u_int uc_size;		/* size of cache */
377	cache_ptr *uc_entries;	/* hash table of entries in cache */
378	cache_ptr *uc_fifo;	/* fifo list of entries in cache */
379	u_int uc_nextvictim;	/* points to next victim in fifo list */
380	rpcprog_t uc_prog;	/* saved program number */
381	rpcvers_t uc_vers;	/* saved version number */
382	rpcproc_t uc_proc;	/* saved procedure number */
383};
384
385
386/*
387 * the hashing function
388 */
389#define	CACHE_LOC(transp, xid)	\
390	(xid % (SPARSENESS * ((struct cl_cache *) \
391		su_data(transp)->su_cache)->uc_size))
392
393extern mutex_t	dupreq_lock;
394
395/*
396 * Enable use of the cache. Returns 1 on success, 0 on failure.
397 * Note: there is no disable.
398 */
399static const char cache_enable_str[] = "svc_enablecache: %s %s";
400static const char alloc_err[] = "could not allocate cache ";
401static const char enable_err[] = "cache already enabled";
402
403int
404svc_dg_enablecache(transp, size)
405	SVCXPRT *transp;
406	u_int size;
407{
408	struct svc_dg_data *su = su_data(transp);
409	struct cl_cache *uc;
410
411	mutex_lock(&dupreq_lock);
412	if (su->su_cache != NULL) {
413		(void) warnx(cache_enable_str, enable_err, " ");
414		mutex_unlock(&dupreq_lock);
415		return (0);
416	}
417	uc = ALLOC(struct cl_cache, 1);
418	if (uc == NULL) {
419		warnx(cache_enable_str, alloc_err, " ");
420		mutex_unlock(&dupreq_lock);
421		return (0);
422	}
423	uc->uc_size = size;
424	uc->uc_nextvictim = 0;
425	uc->uc_entries = ALLOC(cache_ptr, size * SPARSENESS);
426	if (uc->uc_entries == NULL) {
427		warnx(cache_enable_str, alloc_err, "data");
428		FREE(uc, struct cl_cache, 1);
429		mutex_unlock(&dupreq_lock);
430		return (0);
431	}
432	MEMZERO(uc->uc_entries, cache_ptr, size * SPARSENESS);
433	uc->uc_fifo = ALLOC(cache_ptr, size);
434	if (uc->uc_fifo == NULL) {
435		warnx(cache_enable_str, alloc_err, "fifo");
436		FREE(uc->uc_entries, cache_ptr, size * SPARSENESS);
437		FREE(uc, struct cl_cache, 1);
438		mutex_unlock(&dupreq_lock);
439		return (0);
440	}
441	MEMZERO(uc->uc_fifo, cache_ptr, size);
442	su->su_cache = (char *)(void *)uc;
443	mutex_unlock(&dupreq_lock);
444	return (1);
445}
446
447/*
448 * Set an entry in the cache.  It assumes that the uc entry is set from
449 * the earlier call to cache_get() for the same procedure.  This will always
450 * happen because cache_get() is calle by svc_dg_recv and cache_set() is called
451 * by svc_dg_reply().  All this hoopla because the right RPC parameters are
452 * not available at svc_dg_reply time.
453 */
454
455static const char cache_set_str[] = "cache_set: %s";
456static const char cache_set_err1[] = "victim not found";
457static const char cache_set_err2[] = "victim alloc failed";
458static const char cache_set_err3[] = "could not allocate new rpc buffer";
459
460static void
461cache_set(xprt, replylen)
462	SVCXPRT *xprt;
463	size_t replylen;
464{
465	cache_ptr victim;
466	cache_ptr *vicp;
467	struct svc_dg_data *su = su_data(xprt);
468	struct cl_cache *uc = (struct cl_cache *) su->su_cache;
469	u_int loc;
470	char *newbuf;
471#ifdef RPC_CACHE_DEBUG
472	struct netconfig *nconf;
473	char *uaddr;
474#endif
475
476	mutex_lock(&dupreq_lock);
477	/*
478	 * Find space for the new entry, either by
479	 * reusing an old entry, or by mallocing a new one
480	 */
481	victim = uc->uc_fifo[uc->uc_nextvictim];
482	if (victim != NULL) {
483		loc = CACHE_LOC(xprt, victim->cache_xid);
484		for (vicp = &uc->uc_entries[loc];
485			*vicp != NULL && *vicp != victim;
486			vicp = &(*vicp)->cache_next)
487			;
488		if (*vicp == NULL) {
489			warnx(cache_set_str, cache_set_err1);
490			mutex_unlock(&dupreq_lock);
491			return;
492		}
493		*vicp = victim->cache_next;	/* remove from cache */
494		newbuf = victim->cache_reply;
495	} else {
496		victim = ALLOC(struct cache_node, 1);
497		if (victim == NULL) {
498			warnx(cache_set_str, cache_set_err2);
499			mutex_unlock(&dupreq_lock);
500			return;
501		}
502		newbuf = mem_alloc(su->su_iosz);
503		if (newbuf == NULL) {
504			warnx(cache_set_str, cache_set_err3);
505			FREE(victim, struct cache_node, 1);
506			mutex_unlock(&dupreq_lock);
507			return;
508		}
509	}
510
511	/*
512	 * Store it away
513	 */
514#ifdef RPC_CACHE_DEBUG
515	if (nconf = getnetconfigent(xprt->xp_netid)) {
516		uaddr = taddr2uaddr(nconf, &xprt->xp_rtaddr);
517		freenetconfigent(nconf);
518		printf(
519	"cache set for xid= %x prog=%d vers=%d proc=%d for rmtaddr=%s\n",
520			su->su_xid, uc->uc_prog, uc->uc_vers,
521			uc->uc_proc, uaddr);
522		free(uaddr);
523	}
524#endif
525	victim->cache_replylen = replylen;
526	victim->cache_reply = rpc_buffer(xprt);
527	rpc_buffer(xprt) = newbuf;
528	xdrmem_create(&(su->su_xdrs), rpc_buffer(xprt),
529			su->su_iosz, XDR_ENCODE);
530	victim->cache_xid = su->su_xid;
531	victim->cache_proc = uc->uc_proc;
532	victim->cache_vers = uc->uc_vers;
533	victim->cache_prog = uc->uc_prog;
534	victim->cache_addr = xprt->xp_rtaddr;
535	victim->cache_addr.buf = ALLOC(char, xprt->xp_rtaddr.len);
536	(void) memcpy(victim->cache_addr.buf, xprt->xp_rtaddr.buf,
537	    (size_t)xprt->xp_rtaddr.len);
538	loc = CACHE_LOC(xprt, victim->cache_xid);
539	victim->cache_next = uc->uc_entries[loc];
540	uc->uc_entries[loc] = victim;
541	uc->uc_fifo[uc->uc_nextvictim++] = victim;
542	uc->uc_nextvictim %= uc->uc_size;
543	mutex_unlock(&dupreq_lock);
544}
545
546/*
547 * Try to get an entry from the cache
548 * return 1 if found, 0 if not found and set the stage for cache_set()
549 */
550static int
551cache_get(xprt, msg, replyp, replylenp)
552	SVCXPRT *xprt;
553	struct rpc_msg *msg;
554	char **replyp;
555	size_t *replylenp;
556{
557	u_int loc;
558	cache_ptr ent;
559	struct svc_dg_data *su = su_data(xprt);
560	struct cl_cache *uc = (struct cl_cache *) su->su_cache;
561#ifdef RPC_CACHE_DEBUG
562	struct netconfig *nconf;
563	char *uaddr;
564#endif
565
566	mutex_lock(&dupreq_lock);
567	loc = CACHE_LOC(xprt, su->su_xid);
568	for (ent = uc->uc_entries[loc]; ent != NULL; ent = ent->cache_next) {
569		if (ent->cache_xid == su->su_xid &&
570			ent->cache_proc == msg->rm_call.cb_proc &&
571			ent->cache_vers == msg->rm_call.cb_vers &&
572			ent->cache_prog == msg->rm_call.cb_prog &&
573			ent->cache_addr.len == xprt->xp_rtaddr.len &&
574			(memcmp(ent->cache_addr.buf, xprt->xp_rtaddr.buf,
575				xprt->xp_rtaddr.len) == 0)) {
576#ifdef RPC_CACHE_DEBUG
577			if (nconf = getnetconfigent(xprt->xp_netid)) {
578				uaddr = taddr2uaddr(nconf, &xprt->xp_rtaddr);
579				freenetconfigent(nconf);
580				printf(
581	"cache entry found for xid=%x prog=%d vers=%d proc=%d for rmtaddr=%s\n",
582					su->su_xid, msg->rm_call.cb_prog,
583					msg->rm_call.cb_vers,
584					msg->rm_call.cb_proc, uaddr);
585				free(uaddr);
586			}
587#endif
588			*replyp = ent->cache_reply;
589			*replylenp = ent->cache_replylen;
590			mutex_unlock(&dupreq_lock);
591			return (1);
592		}
593	}
594	/*
595	 * Failed to find entry
596	 * Remember a few things so we can do a set later
597	 */
598	uc->uc_proc = msg->rm_call.cb_proc;
599	uc->uc_vers = msg->rm_call.cb_vers;
600	uc->uc_prog = msg->rm_call.cb_prog;
601	mutex_unlock(&dupreq_lock);
602	return (0);
603}
604