getnetgrent.c revision 2830:5228d1267a01
1146856Sharti/*
2146856Sharti * CDDL HEADER START
3146856Sharti *
4146856Sharti * The contents of this file are subject to the terms of the
5146856Sharti * Common Development and Distribution License (the "License").
6146856Sharti * You may not use this file except in compliance with the License.
7146856Sharti *
8146856Sharti * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9146856Sharti * or http://www.opensolaris.org/os/licensing.
10146856Sharti * See the License for the specific language governing permissions
11146856Sharti * and limitations under the License.
12146856Sharti *
13146856Sharti * When distributing Covered Code, include this CDDL HEADER in each
14146856Sharti * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15146856Sharti * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28/*
29 * getnetgrent.c
30 *
31 *	- name-service switch frontend routines for the netgroup API.
32 *
33 * Policy decision:
34 *	If netgroup A refers to netgroup B, both must occur in the same
35 *	source (any other choice gives very confusing semantics).  This
36 *	assumption is deeply embedded in the code below and in the backends.
37 *
38 * innetgr() is implemented on top of something called __multi_innetgr(),
39 * which replaces each (char *) argument of innetgr() with a counted vector
40 * of (char *).  The semantics are the same as an OR of the results of
41 * innetgr() operations on each possible 4-tuple picked from the arguments,
42 * but it's possible to implement some cases more efficiently.  This is
43 * important for mountd, which used to read YP netgroup.byhost directly in
44 * order to determine efficiently whether a given host belonged to any one
45 * of a long list of netgroups.  Wildcarded arguments are indicated by a
46 * count of zero.
47 */
48
49#include "synonyms.h"
50#include <string.h>
51#include <synch.h>
52#include <nss_dbdefs.h>
53#include <mtlib.h>
54#include <libc.h>
55
56static DEFINE_NSS_DB_ROOT(db_root);
57
58void
59_nss_initf_netgroup(p)
60	nss_db_params_t	*p;
61{
62	p->name	= NSS_DBNAM_NETGROUP;
63	p->default_config = NSS_DEFCONF_NETGROUP;
64}
65
66/*
67 * The netgroup routines aren't quite like the majority of the switch clients.
68 *   innetgr() more-or-less fits the getXXXbyYYY mould, but for the others:
69 *	- setnetgrent("netgroup") is really a getXXXbyYYY routine, i.e. it
70 *	  searches the sources until it finds an entry with the given name.
71 *	  Rather than returning the (potentially large) entry, it simply
72 *	  initializes a cursor, and then...
73 *      - getnetgrent(...) is repeatedly invoked by the user to extract the
74 *	  contents of the entry found by setnetgrent().
75 *	- endnetgrent() is almost like a real endXXXent routine.
76 * The behaviour in NSS was:
77 *  If we were certain that all the backends could provide netgroup information
78 *  in a common form, we could make the setnetgrent() backend return the entire
79 *  entry to the frontend, then implement getnetgrent() and endnetgrent()
80 *  strictly in the frontend (aka here).  But we're not certain, so we won't.
81 * In NSS2:
82 *  Since nscd returns the results, and it is nscd that accumulates
83 *  the results, then we can return the entire result on the setnetgrent.
84 *
85 * NOTE:
86 *	In the SunOS 4.x (YP) version of this code, innetgr() did not
87 *	affect the state of {set,get,end}netgrent().  Somewhere out
88 *	there probably lurks a program that depends on this behaviour,
89 *	so this version (both frontend and backends) had better
90 *	behave the same way.
91 */
92
93/* ===> ?? fix "__" name */
94int
95__multi_innetgr(ngroup,	pgroup,
96		nhost,	phost,
97		nuser,	puser,
98		ndomain, pdomain)
99	nss_innetgr_argc	ngroup, nhost, nuser, ndomain;
100	nss_innetgr_argv	pgroup, phost, puser, pdomain;
101{
102	struct nss_innetgr_args	ia;
103
104	if (ngroup == 0) {
105		return (0);	/* One thing fewer to worry backends */
106	}
107
108	ia.groups.argc			= ngroup;
109	ia.groups.argv			= pgroup;
110	ia.arg[NSS_NETGR_MACHINE].argc	= nhost;
111	ia.arg[NSS_NETGR_MACHINE].argv	= phost;
112	ia.arg[NSS_NETGR_USER].argc	= nuser;
113	ia.arg[NSS_NETGR_USER].argv	= puser;
114	ia.arg[NSS_NETGR_DOMAIN].argc	= ndomain;
115	ia.arg[NSS_NETGR_DOMAIN].argv	= pdomain;
116	ia.status			= NSS_NETGR_NO;
117
118	(void) nss_search(&db_root, _nss_initf_netgroup,
119	    NSS_DBOP_NETGROUP_IN, &ia);
120	return (ia.status == NSS_NETGR_FOUND);
121}
122
123int
124innetgr(group, host, user, domain)
125	const char *group, *host, *user, *domain;
126{
127#define	IA(charp)	\
128	(nss_innetgr_argc)((charp) != 0), (nss_innetgr_argv)(&(charp))
129
130	return (__multi_innetgr(IA(group), IA(host), IA(user), IA(domain)));
131}
132
133/*
134 * Context for setnetgrent()/getnetgrent().  If the user is being sensible
135 * the requests will be serialized anyway, but let's play safe and
136 * serialize them ourselves (anything to prevent a coredump)...
137 * We can't use lmutex_lock() here because we don't know what the backends
138 * that we call may call in turn.  They might call malloc()/free().
139 * So we use the brute-force fork_lock_enter() instead.
140 */
141static nss_backend_t	*getnetgrent_backend;
142
143int
144setnetgrent(const char *netgroup)
145{
146	nss_backend_t	*be;
147
148	if (netgroup == NULL) {
149		/* Prevent coredump, otherwise don't do anything profound */
150		netgroup = "";
151	}
152
153	(void) fork_lock_enter(NULL);
154	be = getnetgrent_backend;
155	if (be != NULL && NSS_INVOKE_DBOP(be, NSS_DBOP_SETENT,
156	    (void *)netgroup) != NSS_SUCCESS) {
157		(void) NSS_INVOKE_DBOP(be, NSS_DBOP_DESTRUCTOR, 0);
158		be = NULL;
159	}
160	if (be == NULL) {
161		struct nss_setnetgrent_args	args;
162
163		args.netgroup	= netgroup;
164		args.iterator	= 0;
165		(void) nss_search(&db_root, _nss_initf_netgroup,
166		    NSS_DBOP_NETGROUP_SET, &args);
167		be = args.iterator;
168	}
169	getnetgrent_backend = be;
170	fork_lock_exit();
171	return (0);
172}
173
174int
175getnetgrent_r(machinep, namep, domainp, buffer, buflen)
176	char		**machinep;
177	char		**namep;
178	char		**domainp;
179	char		*buffer;
180	int		buflen;
181{
182	struct nss_getnetgrent_args	args;
183
184	args.buffer	= buffer;
185	args.buflen	= buflen;
186	args.status	= NSS_NETGR_NO;
187
188	(void) fork_lock_enter(NULL);
189	if (getnetgrent_backend != 0) {
190		(void) NSS_INVOKE_DBOP(getnetgrent_backend,
191			NSS_DBOP_GETENT, &args);
192	}
193	fork_lock_exit();
194
195	if (args.status == NSS_NETGR_FOUND) {
196		*machinep = args.retp[NSS_NETGR_MACHINE];
197		*namep	  = args.retp[NSS_NETGR_USER];
198		*domainp  = args.retp[NSS_NETGR_DOMAIN];
199		return (1);
200	} else {
201		return (0);
202	}
203}
204
205static nss_XbyY_buf_t *buf;
206
207int
208getnetgrent(machinep, namep, domainp)
209	char		**machinep;
210	char		**namep;
211	char		**domainp;
212{
213	(void) NSS_XbyY_ALLOC(&buf, 0, NSS_BUFLEN_NETGROUP);
214	return (getnetgrent_r(machinep, namep, domainp,
215	    buf->buffer, buf->buflen));
216}
217
218int
219endnetgrent()
220{
221	(void) fork_lock_enter(NULL);
222	if (getnetgrent_backend != 0) {
223		(void) NSS_INVOKE_DBOP(getnetgrent_backend,
224			NSS_DBOP_DESTRUCTOR, 0);
225		getnetgrent_backend = 0;
226	}
227	fork_lock_exit();
228	nss_delete(&db_root);	/* === ? */
229	NSS_XbyY_FREE(&buf);
230	return (0);
231}
232