vio_util.c revision 12139:9b80e92356c3
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26#include <sys/types.h>
27#include <sys/sysmacros.h>
28#include <sys/errno.h>
29#include <sys/kmem.h>
30#include <sys/ksynch.h>
31#include <sys/stream.h>
32#include <sys/ddi.h>
33#include <sys/sunddi.h>
34#include <sys/vio_util.h>
35
36static int vio_pool_cleanup_retries = 10;	/* Max retries to free pool */
37static int vio_pool_cleanup_delay = 10000;	/* 10ms */
38
39/*
40 * Create a pool of mblks from which future vio_allocb() requests
41 * will be serviced.
42 *
43 * NOTE: num_mblks has to non-zero and a power-of-2
44 *
45 * Returns
46 *	0 on success
47 *	EINVAL if num_mblks is zero or not a power of 2.
48 *	ENOSPC if the pool could not be created due to alloc failures.
49 */
50int
51vio_create_mblks(uint64_t num_mblks, size_t mblk_size, uint8_t *mblk_datap,
52    vio_mblk_pool_t **poolp)
53{
54	vio_mblk_pool_t		*vmplp;
55	vio_mblk_t		*vmp;
56	uint8_t			*datap;
57	int			i;
58	int			rv;
59
60	if (!(num_mblks) || (!ISP2(num_mblks))) {
61		*poolp = 0;
62		return (EINVAL);
63	}
64
65	vmplp = kmem_zalloc(sizeof (*vmplp), KM_SLEEP);
66	vmplp->quelen = num_mblks;
67	vmplp->quemask = num_mblks - 1; /* expects quelen is power-of-2 */
68	vmplp->mblk_size = mblk_size;
69
70	mutex_init(&vmplp->hlock, NULL, MUTEX_DRIVER,
71	    DDI_INTR_PRI(DDI_INTR_SOFTPRI_DEFAULT));
72	mutex_init(&vmplp->tlock, NULL, MUTEX_DRIVER,
73	    DDI_INTR_PRI(DDI_INTR_SOFTPRI_DEFAULT));
74
75	vmplp->basep = kmem_zalloc(num_mblks * sizeof (vio_mblk_t), KM_SLEEP);
76	if (mblk_datap == NULL) {
77		vmplp->datap = kmem_zalloc(num_mblks * mblk_size, KM_SLEEP);
78	} else {
79		vmplp->datap = mblk_datap;
80		vmplp->flag |= VMPL_FLAG_CLIENT_DATA;
81	}
82	vmplp->nextp = NULL;
83
84	/* create a queue of pointers to free vio_mblk_t's */
85	vmplp->quep = kmem_zalloc(vmplp->quelen *
86	    sizeof (vio_mblk_t *), KM_SLEEP);
87	vmplp->head = 0;
88	vmplp->tail =  0;
89
90	for (i = 0, datap = vmplp->datap; i < num_mblks; i++) {
91
92		vmp = &(vmplp->basep[i]);
93		vmp->vmplp = vmplp;
94		vmp->datap = datap;
95		vmp->reclaim.free_func = vio_freeb;
96		vmp->reclaim.free_arg = (caddr_t)vmp;
97		vmp->mp = desballoc(vmp->datap, mblk_size, BPRI_MED,
98		    &vmp->reclaim);
99
100		if (vmp->mp == NULL) {
101			/* reset tail */
102			vmplp->tail = vmplp->head;
103
104			/*
105			 * vio_destroy_mblks() frees mblks that have been
106			 * allocated so far and then destroys the pool.
107			 */
108			rv = vio_destroy_mblks(vmplp);
109			ASSERT(rv == 0);
110
111			*poolp = NULL;
112			return (ENOSPC);
113		}
114
115		vmp->index = i;
116		vmp->state = VIO_MBLK_FREE;
117
118		/* put this vmp on the free stack */
119		vmplp->quep[vmplp->tail] = vmp;
120		vmplp->tail = (vmplp->tail + 1) & vmplp->quemask;
121
122		datap += mblk_size;
123	}
124
125	*poolp = vmplp;
126	return (0);
127}
128
129/*
130 * Destroy the pool of mblks. This can only succeed when
131 * all allocated mblks have been returned to the pool.
132 *
133 * It is up to the caller to ensure that no further mblks are
134 * requested from the pool after destroy has been invoked.
135 *
136 * Returns 0 on success, EINVAL if handle is invalid, or
137 * EBUSY if not all mblks reclaimed yet.
138 */
139int
140vio_destroy_mblks(vio_mblk_pool_t *vmplp)
141{
142	uint64_t	i;
143	uint64_t	num_mblks;
144	vio_mblk_t	*vmp;
145	int		pool_cleanup_retries = 0;
146
147
148	if (vmplp == NULL)
149		return (EINVAL);
150
151	/*
152	 * We can only destroy the pool once all the mblks have
153	 * been reclaimed.
154	 */
155	do {
156		if (vmplp->head == vmplp->tail) {
157			break;
158		}
159
160		/* some mblks still in use */
161		drv_usecwait(vio_pool_cleanup_delay);
162	} while (++pool_cleanup_retries < vio_pool_cleanup_retries);
163
164	if (vmplp->head != vmplp->tail) {
165		return (EBUSY);
166	}
167
168	num_mblks = vmplp->quelen;
169
170	/*
171	 * Set pool flag to tell vio_freeb() which is invoked from freeb(),
172	 * that it is being called in the context of vio_destroy_mblks().
173	 * This results in freeing only mblk_t and dblk_t structures for
174	 * each mp. The associated data buffers are freed below as one big
175	 * chunk through kmem_free(vmplp->datap).
176	 */
177	vmplp->flag |= VMPL_FLAG_DESTROYING;
178	for (i = 0; i < num_mblks; i++) {
179		vmp = &(vmplp->basep[i]);
180		/*
181		 * It is possible that mblks have been allocated only upto
182		 * a certain index and the entire quelen has not been
183		 * initialized. This might happen due to desballoc() failure
184		 * while creating the pool. The below check handles this
185		 * condition.
186		 */
187		if (vmp->mp != NULL)
188			freeb(vmp->mp);
189	}
190	vmplp->flag &= ~(VMPL_FLAG_DESTROYING);
191
192	kmem_free(vmplp->basep, num_mblks * sizeof (vio_mblk_t));
193	if ((vmplp->flag & VMPL_FLAG_CLIENT_DATA) == 0) {
194		kmem_free(vmplp->datap, num_mblks * vmplp->mblk_size);
195	}
196	kmem_free(vmplp->quep, num_mblks * sizeof (vio_mblk_t *));
197
198	mutex_destroy(&vmplp->hlock);
199	mutex_destroy(&vmplp->tlock);
200
201	kmem_free(vmplp, sizeof (*vmplp));
202
203	return (0);
204}
205
206/*
207 * Allocate a vio_mblk from the free pool if one is available.
208 * Otherwise returns NULL.
209 */
210vio_mblk_t *
211vio_allocb(vio_mblk_pool_t *vmplp)
212{
213	vio_mblk_t	*vmp = NULL;
214	uint32_t	head;
215
216	mutex_enter(&vmplp->hlock);
217	head = (vmplp->head + 1) & vmplp->quemask;
218	if (head != vmplp->tail) {
219		/* we have free mblks */
220		vmp = vmplp->quep[vmplp->head];
221		vmplp->head = head;
222		ASSERT(vmp->state == VIO_MBLK_FREE);
223		vmp->state = VIO_MBLK_BOUND;
224	}
225	mutex_exit(&vmplp->hlock);
226
227	return (vmp);
228}
229
230/*
231 * Return a mblk to the free pool. Invoked when the upper IP
232 * layers do freemsg() etc on the mblk they were passed.
233 */
234void
235vio_freeb(void *arg)
236{
237	vio_mblk_t	*vmp = (vio_mblk_t *)arg;
238	vio_mblk_pool_t	*vmplp = vmp->vmplp;
239
240	if (vmplp->flag & VMPL_FLAG_DESTROYING) {
241		/*
242		 * This flag indicates that freeb() is being called from
243		 * vio_destroy_mblks().
244		 * We don't need to alloc a new mblk_t/dblk_t pair for
245		 * this data buffer, return from here and the data buffer
246		 * itself will be freed in vio_destroy_mblks().
247		 */
248		return;
249	}
250
251	vmp->mp = desballoc(vmp->datap, vmplp->mblk_size,
252	    BPRI_MED, &vmp->reclaim);
253	vmp->state = VIO_MBLK_FREE;
254
255	mutex_enter(&vmplp->tlock);
256	vmplp->quep[vmplp->tail] = vmp;
257	vmplp->tail = (vmplp->tail + 1) & vmplp->quemask;
258	mutex_exit(&vmplp->tlock);
259}
260
261
262/*
263 * This function searches the given mblk pool for mblks that are in the
264 * BOUND state and moves them to the FREE state. Note that only clients that
265 * are operating in RxDringData mode use this function. This allows such
266 * clients to reclaim buffers that are provided to the peer as shared memory,
267 * before calling vio_destroy_mblks(). We don't need this in other cases
268 * as the buffer is locally managed.
269 */
270void
271vio_clobber_pool(vio_mblk_pool_t *vmplp)
272{
273	uint64_t	num_mblks = vmplp->quelen;
274	uint64_t	i;
275	vio_mblk_t	*vmp;
276
277	mutex_enter(&vmplp->hlock);
278	mutex_enter(&vmplp->tlock);
279	for (i = 0; i < num_mblks; i++) {
280		vmp = &(vmplp->basep[i]);
281		if ((vmp->state & VIO_MBLK_BOUND) != 0) {
282			/* put this vmp on the free stack */
283			vmp->state = VIO_MBLK_FREE;
284			ASSERT(vmplp->tail != vmplp->head);
285			vmplp->quep[vmplp->tail] = vmp;
286			vmplp->tail = (vmplp->tail + 1) & vmplp->quemask;
287		}
288	}
289	mutex_exit(&vmplp->tlock);
290	mutex_exit(&vmplp->hlock);
291}
292
293/*
294 * Create a multiple pools of mblks from which future vio_allocb()
295 * or vio_multipool_allocb() requests will be serviced.
296 *
297 * Arguments:
298 *	vmultip -- A pointer to vio_multi_pool_t structure.
299 *	num_pools -- Number of the pools.
300 *	... -- Variable arguments consisting a list of buffer sizes for
301 *		each pool and list of number of buffers for each pool.
302 *
303 * NOTE: The restrictions of vio_create_mblks() apply to this interface also.
304 *
305 * Returns 0 on success or an error returned by vio_create_mblks().
306 */
307int
308vio_init_multipools(vio_multi_pool_t *vmultip, int num_pools, ...)
309{
310	int		i;
311	int		status;
312	char		*tbuf;
313	va_list		vap;
314	vio_mblk_pool_t *fvmp = NULL;
315
316	/*
317	 * Allocate memory for all of the following in one allocation.
318	 * 	bufsz_tbl -- sizeof (uint32_t) * num_pools
319	 * 	nbuf_tbl  -- sizeof (uint32_t) * num_pools
320	 *	vmpp	  -- sizeof (vio_mblk_pool_t *) * numpools
321	 */
322	vmultip->tbsz = (sizeof (uint32_t) * num_pools) +
323	    (sizeof (uint32_t) * num_pools) +
324	    (sizeof (vio_mblk_pool_t *) * num_pools);
325	tbuf = kmem_zalloc(vmultip->tbsz, KM_SLEEP);
326	vmultip->bufsz_tbl = (uint32_t *)tbuf;
327	vmultip->nbuf_tbl = (uint32_t *)(tbuf +
328	    (sizeof (uint32_t) * num_pools));
329	vmultip->vmpp = (vio_mblk_pool_t **)(tbuf +
330	    (sizeof (uint32_t) * num_pools * 2));
331	vmultip->num_pools = num_pools;
332
333	/* initialize the array first */
334	va_start(vap, num_pools);
335	for (i = 0; i < num_pools; i++) {
336		vmultip->bufsz_tbl[i] = va_arg(vap, uint32_t);
337	}
338	for (i = 0; i < num_pools; i++) {
339		vmultip->nbuf_tbl[i] = va_arg(vap, uint32_t);
340	}
341	va_end(vap);
342
343	for (i = 0; i < vmultip->num_pools; i++) {
344		status = vio_create_mblks(vmultip->nbuf_tbl[i],
345		    vmultip->bufsz_tbl[i], NULL, &vmultip->vmpp[i]);
346		if (status != 0) {
347			vio_destroy_multipools(vmultip, &fvmp);
348			/* We expect to free the pools without failure here */
349			ASSERT(fvmp == NULL);
350			return (status);
351		}
352	}
353	return (0);
354}
355
356/*
357 * Destroy the multiple pools of mblks. This can only succeed when
358 * all allocated mblks have been returned to the pool.
359 *
360 * If a pool of mblks couldn't be destroyed, then the failed vio_mblk_pool_t
361 * pointers are returned via th fvmp list. Its the caller's
362 * responsibility to check this list and free them later at an appropriate
363 * time with vio_destroy_mblks().
364 *
365 * Arguments:
366 *	vmultip -- A pointer to vio_multi_pool_t structure.
367 *	fvmp -- A list in which the pools that couldn't be destroyed are
368 *		returned.
369 */
370void
371vio_destroy_multipools(vio_multi_pool_t *vmultip, vio_mblk_pool_t **fvmp)
372{
373	int i;
374	vio_mblk_pool_t *vmp;
375
376	for (i = 0; i < vmultip->num_pools; i++) {
377		if ((vmp = vmultip->vmpp[i]) != NULL) {
378			if (vio_destroy_mblks(vmp)) {
379				/*
380				 * if we cannot reclaim all mblks, then
381				 * return the pool in the failed vmp
382				 * list(fvmp).
383				 */
384				vmp->nextp =  *fvmp;
385				*fvmp = vmp;
386			}
387		}
388	}
389	if (vmultip->tbsz != 0)
390		kmem_free(vmultip->bufsz_tbl, vmultip->tbsz);
391	vmultip->bufsz_tbl = NULL;
392	vmultip->nbuf_tbl = NULL;
393	vmultip->vmpp = NULL;
394	vmultip->num_pools = 0;
395	vmultip->tbsz = 0;
396}
397
398
399/*
400 * Allocate an vio_mblk from one of the free pools, but tries the pool that
401 * best fits size requested first.
402 */
403vio_mblk_t *
404vio_multipool_allocb(vio_multi_pool_t *vmultip, size_t size)
405{
406	int i;
407	vio_mblk_t *vmp = NULL;
408
409	/* Try allocating any size that fits */
410	for (i = 0; i < vmultip->num_pools; i++) {
411		if (size > vmultip->bufsz_tbl[i]) {
412			continue;
413		}
414		vmp = vio_allocb(vmultip->vmpp[i]);
415		if (vmp != NULL) {
416			break;
417		}
418	}
419	return (vmp);
420}
421
422/*
423 * -----------------------------------------------------------------------------
424 * LDoms versioning functions
425 *
426 * Future work: the version negotiating code in the various VIO drivers
427 * could be made common and placed here.
428 */
429
430/*
431 * Description:
432 *	This function checks to see if the supplied version tuple (major,minor)
433 *	is supported by the version 'ver', negotiated during the handshake
434 *	between the client and the server (ver).
435 *
436 * Assumption:
437 *	This function assumes that backward compatability is not broken in
438 *	newer minor versions of the protocol (e.g. v1.5 & v1.1 support v1.0)
439 *
440 * Return Value:
441 *	B_TRUE		- The (major,minor) version is supported
442 *	B_FALSE		- not supported
443 */
444boolean_t
445vio_ver_is_supported(vio_ver_t ver, uint16_t major, uint16_t minor)
446{
447	if ((ver.major == major) && (ver.minor >= minor))
448		return (B_TRUE);
449
450	return (B_FALSE);
451}
452