1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#pragma ident	"%Z%%M%	%I%	%E% SMI"
28
29/*
30 * dcopy.c
31 *    dcopy misc module
32 */
33
34#include <sys/conf.h>
35#include <sys/kmem.h>
36#include <sys/ddi.h>
37#include <sys/sunddi.h>
38#include <sys/modctl.h>
39#include <sys/sysmacros.h>
40#include <sys/atomic.h>
41
42
43#include <sys/dcopy.h>
44#include <sys/dcopy_device.h>
45
46
47/* Number of entries per channel to allocate */
48uint_t dcopy_channel_size = 1024;
49
50
51typedef struct dcopy_list_s {
52	list_t			dl_list;
53	kmutex_t		dl_mutex;
54	uint_t			dl_cnt; /* num entries on list */
55} dcopy_list_t;
56
57/* device state for register/unregister */
58struct dcopy_device_s {
59	/* DMA device drivers private pointer */
60	void			*dc_device_private;
61
62	/* to track list of channels from this DMA device */
63	dcopy_list_t		dc_devchan_list;
64	list_node_t		dc_device_list_node;
65
66	/*
67	 * dc_removing_cnt track how many channels still have to be freed up
68	 * before it's safe to allow the DMA device driver to detach.
69	 */
70	uint_t			dc_removing_cnt;
71	dcopy_device_cb_t	*dc_cb;
72
73	dcopy_device_info_t	dc_info;
74
75};
76
77typedef struct dcopy_stats_s {
78	kstat_named_t	cs_bytes_xfer;
79	kstat_named_t	cs_cmd_alloc;
80	kstat_named_t	cs_cmd_post;
81	kstat_named_t	cs_cmd_poll;
82	kstat_named_t	cs_notify_poll;
83	kstat_named_t	cs_notify_pending;
84	kstat_named_t	cs_id;
85	kstat_named_t	cs_capabilities;
86} dcopy_stats_t;
87
88/* DMA channel state */
89struct dcopy_channel_s {
90	/* DMA driver channel private pointer */
91	void			*ch_channel_private;
92
93	/* shortcut to device callbacks */
94	dcopy_device_cb_t	*ch_cb;
95
96	/*
97	 * number of outstanding allocs for this channel. used to track when
98	 * it's safe to free up this channel so the DMA device driver can
99	 * detach.
100	 */
101	uint64_t		ch_ref_cnt;
102
103	/* state for if channel needs to be removed when ch_ref_cnt gets to 0 */
104	boolean_t		ch_removing;
105
106	list_node_t		ch_devchan_list_node;
107	list_node_t		ch_globalchan_list_node;
108
109	/*
110	 * per channel list of commands actively blocking waiting for
111	 * completion.
112	 */
113	dcopy_list_t		ch_poll_list;
114
115	/* pointer back to our device */
116	struct dcopy_device_s	*ch_device;
117
118	dcopy_query_channel_t	ch_info;
119
120	kstat_t			*ch_kstat;
121	dcopy_stats_t		ch_stat;
122};
123
124/*
125 * If grabbing both device_list mutex & globalchan_list mutex,
126 * Always grab globalchan_list mutex before device_list mutex
127 */
128typedef struct dcopy_state_s {
129	dcopy_list_t		d_device_list;
130	dcopy_list_t		d_globalchan_list;
131} dcopy_state_t;
132dcopy_state_t *dcopy_statep;
133
134
135/* Module Driver Info */
136static struct modlmisc dcopy_modlmisc = {
137	&mod_miscops,
138	"dcopy kernel module"
139};
140
141/* Module Linkage */
142static struct modlinkage dcopy_modlinkage = {
143	MODREV_1,
144	&dcopy_modlmisc,
145	NULL
146};
147
148static int dcopy_init();
149static void dcopy_fini();
150
151static int dcopy_list_init(dcopy_list_t *list, size_t node_size,
152    offset_t link_offset);
153static void dcopy_list_fini(dcopy_list_t *list);
154static void dcopy_list_push(dcopy_list_t *list, void *list_node);
155static void *dcopy_list_pop(dcopy_list_t *list);
156
157static void dcopy_device_cleanup(dcopy_device_handle_t device,
158    boolean_t do_callback);
159
160static int dcopy_stats_init(dcopy_handle_t channel);
161static void dcopy_stats_fini(dcopy_handle_t channel);
162
163
164/*
165 * _init()
166 */
167int
168_init()
169{
170	int e;
171
172	e = dcopy_init();
173	if (e != 0) {
174		return (e);
175	}
176
177	return (mod_install(&dcopy_modlinkage));
178}
179
180
181/*
182 * _info()
183 */
184int
185_info(struct modinfo *modinfop)
186{
187	return (mod_info(&dcopy_modlinkage, modinfop));
188}
189
190
191/*
192 * _fini()
193 */
194int
195_fini()
196{
197	int e;
198
199	e = mod_remove(&dcopy_modlinkage);
200	if (e != 0) {
201		return (e);
202	}
203
204	dcopy_fini();
205
206	return (e);
207}
208
209/*
210 * dcopy_init()
211 */
212static int
213dcopy_init()
214{
215	int e;
216
217
218	dcopy_statep = kmem_zalloc(sizeof (*dcopy_statep), KM_SLEEP);
219
220	/* Initialize the list we use to track device register/unregister */
221	e = dcopy_list_init(&dcopy_statep->d_device_list,
222	    sizeof (struct dcopy_device_s),
223	    offsetof(struct dcopy_device_s, dc_device_list_node));
224	if (e != DCOPY_SUCCESS) {
225		goto dcopyinitfail_device;
226	}
227
228	/* Initialize the list we use to track all DMA channels */
229	e = dcopy_list_init(&dcopy_statep->d_globalchan_list,
230	    sizeof (struct dcopy_channel_s),
231	    offsetof(struct dcopy_channel_s, ch_globalchan_list_node));
232	if (e != DCOPY_SUCCESS) {
233		goto dcopyinitfail_global;
234	}
235
236	return (0);
237
238dcopyinitfail_cback:
239	dcopy_list_fini(&dcopy_statep->d_globalchan_list);
240dcopyinitfail_global:
241	dcopy_list_fini(&dcopy_statep->d_device_list);
242dcopyinitfail_device:
243	kmem_free(dcopy_statep, sizeof (*dcopy_statep));
244
245	return (-1);
246}
247
248
249/*
250 * dcopy_fini()
251 */
252static void
253dcopy_fini()
254{
255	/*
256	 * if mod_remove was successfull, we shouldn't have any
257	 * devices/channels to worry about.
258	 */
259	ASSERT(list_head(&dcopy_statep->d_globalchan_list.dl_list) == NULL);
260	ASSERT(list_head(&dcopy_statep->d_device_list.dl_list) == NULL);
261
262	dcopy_list_fini(&dcopy_statep->d_globalchan_list);
263	dcopy_list_fini(&dcopy_statep->d_device_list);
264	kmem_free(dcopy_statep, sizeof (*dcopy_statep));
265}
266
267
268/* *** EXTERNAL INTERFACE *** */
269/*
270 * dcopy_query()
271 */
272void
273dcopy_query(dcopy_query_t *query)
274{
275	query->dq_version = DCOPY_QUERY_V0;
276	query->dq_num_channels = dcopy_statep->d_globalchan_list.dl_cnt;
277}
278
279
280/*
281 * dcopy_alloc()
282 */
283/*ARGSUSED*/
284int
285dcopy_alloc(int flags, dcopy_handle_t *handle)
286{
287	dcopy_handle_t channel;
288	dcopy_list_t *list;
289
290
291	/*
292	 * we don't use the dcopy_list_* code here because we need to due
293	 * some non-standard stuff.
294	 */
295
296	list = &dcopy_statep->d_globalchan_list;
297
298	/*
299	 * if nothing is on the channel list, return DCOPY_NORESOURCES. This
300	 * can happen if there aren't any DMA device registered.
301	 */
302	mutex_enter(&list->dl_mutex);
303	channel = list_head(&list->dl_list);
304	if (channel == NULL) {
305		mutex_exit(&list->dl_mutex);
306		return (DCOPY_NORESOURCES);
307	}
308
309	/*
310	 * increment the reference count, and pop the channel off the head and
311	 * push it on the tail. This ensures we rotate through the channels.
312	 * DMA channels are shared.
313	 */
314	channel->ch_ref_cnt++;
315	list_remove(&list->dl_list, channel);
316	list_insert_tail(&list->dl_list, channel);
317	mutex_exit(&list->dl_mutex);
318
319	*handle = (dcopy_handle_t)channel;
320	return (DCOPY_SUCCESS);
321}
322
323
324/*
325 * dcopy_free()
326 */
327void
328dcopy_free(dcopy_handle_t *channel)
329{
330	dcopy_device_handle_t device;
331	dcopy_list_t *list;
332	boolean_t cleanup;
333
334
335	ASSERT(*channel != NULL);
336
337	/*
338	 * we don't need to add the channel back to the list since we never
339	 * removed it. decrement the reference count.
340	 */
341	list = &dcopy_statep->d_globalchan_list;
342	mutex_enter(&list->dl_mutex);
343	(*channel)->ch_ref_cnt--;
344
345	/*
346	 * if we need to remove this channel, and the reference count is down
347	 * to 0, decrement the number of channels which still need to be
348	 * removed on the device.
349	 */
350	if ((*channel)->ch_removing && ((*channel)->ch_ref_cnt == 0)) {
351		cleanup = B_FALSE;
352		device = (*channel)->ch_device;
353		mutex_enter(&device->dc_devchan_list.dl_mutex);
354		device->dc_removing_cnt--;
355		if (device->dc_removing_cnt == 0) {
356			cleanup = B_TRUE;
357		}
358		mutex_exit(&device->dc_devchan_list.dl_mutex);
359	}
360	mutex_exit(&list->dl_mutex);
361
362	/*
363	 * if there are no channels which still need to be removed, cleanup the
364	 * device state and call back into the DMA device driver to tell them
365	 * the device is free.
366	 */
367	if (cleanup) {
368		dcopy_device_cleanup(device, B_TRUE);
369	}
370
371	*channel = NULL;
372}
373
374
375/*
376 * dcopy_query_channel()
377 */
378void
379dcopy_query_channel(dcopy_handle_t channel, dcopy_query_channel_t *query)
380{
381	*query = channel->ch_info;
382}
383
384
385/*
386 * dcopy_cmd_alloc()
387 */
388int
389dcopy_cmd_alloc(dcopy_handle_t handle, int flags, dcopy_cmd_t *cmd)
390{
391	dcopy_handle_t channel;
392	dcopy_cmd_priv_t priv;
393	int e;
394
395
396	channel = handle;
397
398	atomic_inc_64(&channel->ch_stat.cs_cmd_alloc.value.ui64);
399	e = channel->ch_cb->cb_cmd_alloc(channel->ch_channel_private, flags,
400	    cmd);
401	if (e == DCOPY_SUCCESS) {
402		priv = (*cmd)->dp_private;
403		priv->pr_channel = channel;
404		/*
405		 * we won't initialize the blocking state until we actually
406		 * need to block.
407		 */
408		priv->pr_block_init = B_FALSE;
409	}
410
411	return (e);
412}
413
414
415/*
416 * dcopy_cmd_free()
417 */
418void
419dcopy_cmd_free(dcopy_cmd_t *cmd)
420{
421	dcopy_handle_t channel;
422	dcopy_cmd_priv_t priv;
423
424
425	ASSERT(*cmd != NULL);
426
427	priv = (*cmd)->dp_private;
428	channel = priv->pr_channel;
429
430	/* if we initialized the blocking state, clean it up too */
431	if (priv->pr_block_init) {
432		cv_destroy(&priv->pr_cv);
433		mutex_destroy(&priv->pr_mutex);
434	}
435
436	channel->ch_cb->cb_cmd_free(channel->ch_channel_private, cmd);
437}
438
439
440/*
441 * dcopy_cmd_post()
442 */
443int
444dcopy_cmd_post(dcopy_cmd_t cmd)
445{
446	dcopy_handle_t channel;
447	int e;
448
449
450	channel = cmd->dp_private->pr_channel;
451
452	atomic_inc_64(&channel->ch_stat.cs_cmd_post.value.ui64);
453	if (cmd->dp_cmd == DCOPY_CMD_COPY) {
454		atomic_add_64(&channel->ch_stat.cs_bytes_xfer.value.ui64,
455		    cmd->dp.copy.cc_size);
456	}
457	e = channel->ch_cb->cb_cmd_post(channel->ch_channel_private, cmd);
458	if (e != DCOPY_SUCCESS) {
459		return (e);
460	}
461
462	return (DCOPY_SUCCESS);
463}
464
465
466/*
467 * dcopy_cmd_poll()
468 */
469int
470dcopy_cmd_poll(dcopy_cmd_t cmd, int flags)
471{
472	dcopy_handle_t channel;
473	dcopy_cmd_priv_t priv;
474	int e;
475
476
477	priv = cmd->dp_private;
478	channel = priv->pr_channel;
479
480	/*
481	 * if the caller is trying to block, they needed to post the
482	 * command with DCOPY_CMD_INTR set.
483	 */
484	if ((flags & DCOPY_POLL_BLOCK) && !(cmd->dp_flags & DCOPY_CMD_INTR)) {
485		return (DCOPY_FAILURE);
486	}
487
488	atomic_inc_64(&channel->ch_stat.cs_cmd_poll.value.ui64);
489
490repoll:
491	e = channel->ch_cb->cb_cmd_poll(channel->ch_channel_private, cmd);
492	if (e == DCOPY_PENDING) {
493		/*
494		 * if the command is still active, and the blocking flag
495		 * is set.
496		 */
497		if (flags & DCOPY_POLL_BLOCK) {
498
499			/*
500			 * if we haven't initialized the state, do it now. A
501			 * command can be re-used, so it's possible it's
502			 * already been initialized.
503			 */
504			if (!priv->pr_block_init) {
505				priv->pr_block_init = B_TRUE;
506				mutex_init(&priv->pr_mutex, NULL, MUTEX_DRIVER,
507				    NULL);
508				cv_init(&priv->pr_cv, NULL, CV_DRIVER, NULL);
509				priv->pr_cmd = cmd;
510			}
511
512			/* push it on the list for blocking commands */
513			priv->pr_wait = B_TRUE;
514			dcopy_list_push(&channel->ch_poll_list, priv);
515
516			mutex_enter(&priv->pr_mutex);
517			/*
518			 * it's possible we already cleared pr_wait before we
519			 * grabbed the mutex.
520			 */
521			if (priv->pr_wait) {
522				cv_wait(&priv->pr_cv, &priv->pr_mutex);
523			}
524			mutex_exit(&priv->pr_mutex);
525
526			/*
527			 * the command has completed, go back and poll so we
528			 * get the status.
529			 */
530			goto repoll;
531		}
532	}
533
534	return (e);
535}
536
537/* *** END OF EXTERNAL INTERFACE *** */
538
539/*
540 * dcopy_list_init()
541 */
542static int
543dcopy_list_init(dcopy_list_t *list, size_t node_size, offset_t link_offset)
544{
545	mutex_init(&list->dl_mutex, NULL, MUTEX_DRIVER, NULL);
546	list_create(&list->dl_list, node_size, link_offset);
547	list->dl_cnt = 0;
548
549	return (DCOPY_SUCCESS);
550}
551
552
553/*
554 * dcopy_list_fini()
555 */
556static void
557dcopy_list_fini(dcopy_list_t *list)
558{
559	list_destroy(&list->dl_list);
560	mutex_destroy(&list->dl_mutex);
561}
562
563
564/*
565 * dcopy_list_push()
566 */
567static void
568dcopy_list_push(dcopy_list_t *list, void *list_node)
569{
570	mutex_enter(&list->dl_mutex);
571	list_insert_tail(&list->dl_list, list_node);
572	list->dl_cnt++;
573	mutex_exit(&list->dl_mutex);
574}
575
576
577/*
578 * dcopy_list_pop()
579 */
580static void *
581dcopy_list_pop(dcopy_list_t *list)
582{
583	list_node_t *list_node;
584
585	mutex_enter(&list->dl_mutex);
586	list_node = list_head(&list->dl_list);
587	if (list_node == NULL) {
588		mutex_exit(&list->dl_mutex);
589		return (list_node);
590	}
591	list->dl_cnt--;
592	list_remove(&list->dl_list, list_node);
593	mutex_exit(&list->dl_mutex);
594
595	return (list_node);
596}
597
598
599/* *** DEVICE INTERFACE *** */
600/*
601 * dcopy_device_register()
602 */
603int
604dcopy_device_register(void *device_private, dcopy_device_info_t *info,
605    dcopy_device_handle_t *handle)
606{
607	struct dcopy_channel_s *channel;
608	struct dcopy_device_s *device;
609	int e;
610	int i;
611
612
613	/* initialize the per device state */
614	device = kmem_zalloc(sizeof (*device), KM_SLEEP);
615	device->dc_device_private = device_private;
616	device->dc_info = *info;
617	device->dc_removing_cnt = 0;
618	device->dc_cb = info->di_cb;
619
620	/*
621	 * we have a per device channel list so we can remove a device in the
622	 * future.
623	 */
624	e = dcopy_list_init(&device->dc_devchan_list,
625	    sizeof (struct dcopy_channel_s),
626	    offsetof(struct dcopy_channel_s, ch_devchan_list_node));
627	if (e != DCOPY_SUCCESS) {
628		goto registerfail_devchan;
629	}
630
631	/*
632	 * allocate state for each channel, allocate the channel,  and then add
633	 * the devices dma channels to the devices channel list.
634	 */
635	for (i = 0; i < info->di_num_dma; i++) {
636		channel = kmem_zalloc(sizeof (*channel), KM_SLEEP);
637		channel->ch_device = device;
638		channel->ch_removing = B_FALSE;
639		channel->ch_ref_cnt = 0;
640		channel->ch_cb = info->di_cb;
641
642		e = info->di_cb->cb_channel_alloc(device_private, channel,
643		    DCOPY_SLEEP, dcopy_channel_size, &channel->ch_info,
644		    &channel->ch_channel_private);
645		if (e != DCOPY_SUCCESS) {
646			kmem_free(channel, sizeof (*channel));
647			goto registerfail_alloc;
648		}
649
650		e = dcopy_stats_init(channel);
651		if (e != DCOPY_SUCCESS) {
652			info->di_cb->cb_channel_free(
653			    &channel->ch_channel_private);
654			kmem_free(channel, sizeof (*channel));
655			goto registerfail_alloc;
656		}
657
658		e = dcopy_list_init(&channel->ch_poll_list,
659		    sizeof (struct dcopy_cmd_priv_s),
660		    offsetof(struct dcopy_cmd_priv_s, pr_poll_list_node));
661		if (e != DCOPY_SUCCESS) {
662			dcopy_stats_fini(channel);
663			info->di_cb->cb_channel_free(
664			    &channel->ch_channel_private);
665			kmem_free(channel, sizeof (*channel));
666			goto registerfail_alloc;
667		}
668
669		dcopy_list_push(&device->dc_devchan_list, channel);
670	}
671
672	/* add the device to device list */
673	dcopy_list_push(&dcopy_statep->d_device_list, device);
674
675	/*
676	 * add the device's dma channels to the global channel list (where
677	 * dcopy_alloc's come from)
678	 */
679	mutex_enter(&dcopy_statep->d_globalchan_list.dl_mutex);
680	mutex_enter(&dcopy_statep->d_device_list.dl_mutex);
681	channel = list_head(&device->dc_devchan_list.dl_list);
682	while (channel != NULL) {
683		list_insert_tail(&dcopy_statep->d_globalchan_list.dl_list,
684		    channel);
685		dcopy_statep->d_globalchan_list.dl_cnt++;
686		channel = list_next(&device->dc_devchan_list.dl_list, channel);
687	}
688	mutex_exit(&dcopy_statep->d_device_list.dl_mutex);
689	mutex_exit(&dcopy_statep->d_globalchan_list.dl_mutex);
690
691	*handle = device;
692
693	/* last call-back into kernel for dcopy KAPI enabled */
694	uioa_dcopy_enable();
695
696	return (DCOPY_SUCCESS);
697
698registerfail_alloc:
699	channel = list_head(&device->dc_devchan_list.dl_list);
700	while (channel != NULL) {
701		/* remove from the list */
702		channel = dcopy_list_pop(&device->dc_devchan_list);
703		ASSERT(channel != NULL);
704
705		dcopy_list_fini(&channel->ch_poll_list);
706		dcopy_stats_fini(channel);
707		info->di_cb->cb_channel_free(&channel->ch_channel_private);
708		kmem_free(channel, sizeof (*channel));
709	}
710
711	dcopy_list_fini(&device->dc_devchan_list);
712registerfail_devchan:
713	kmem_free(device, sizeof (*device));
714
715	return (DCOPY_FAILURE);
716}
717
718
719/*
720 * dcopy_device_unregister()
721 */
722/*ARGSUSED*/
723int
724dcopy_device_unregister(dcopy_device_handle_t *handle)
725{
726	struct dcopy_channel_s *channel;
727	dcopy_device_handle_t device;
728	boolean_t device_busy;
729
730	/* first call-back into kernel for dcopy KAPI disable */
731	uioa_dcopy_disable();
732
733	device = *handle;
734	device_busy = B_FALSE;
735
736	/*
737	 * remove the devices dma channels from the global channel list (where
738	 * dcopy_alloc's come from)
739	 */
740	mutex_enter(&dcopy_statep->d_globalchan_list.dl_mutex);
741	mutex_enter(&device->dc_devchan_list.dl_mutex);
742	channel = list_head(&device->dc_devchan_list.dl_list);
743	while (channel != NULL) {
744		/*
745		 * if the channel has outstanding allocs, mark it as having
746		 * to be removed and increment the number of channels which
747		 * need to be removed in the device state too.
748		 */
749		if (channel->ch_ref_cnt != 0) {
750			channel->ch_removing = B_TRUE;
751			device_busy = B_TRUE;
752			device->dc_removing_cnt++;
753		}
754		dcopy_statep->d_globalchan_list.dl_cnt--;
755		list_remove(&dcopy_statep->d_globalchan_list.dl_list, channel);
756		channel = list_next(&device->dc_devchan_list.dl_list, channel);
757	}
758	mutex_exit(&device->dc_devchan_list.dl_mutex);
759	mutex_exit(&dcopy_statep->d_globalchan_list.dl_mutex);
760
761	/*
762	 * if there are channels which still need to be removed, we will clean
763	 * up the device state after they are freed up.
764	 */
765	if (device_busy) {
766		return (DCOPY_PENDING);
767	}
768
769	dcopy_device_cleanup(device, B_FALSE);
770
771	*handle = NULL;
772	return (DCOPY_SUCCESS);
773}
774
775
776/*
777 * dcopy_device_cleanup()
778 */
779static void
780dcopy_device_cleanup(dcopy_device_handle_t device, boolean_t do_callback)
781{
782	struct dcopy_channel_s *channel;
783
784	/*
785	 * remove all the channels in the device list, free them, and clean up
786	 * the state.
787	 */
788	mutex_enter(&dcopy_statep->d_device_list.dl_mutex);
789	channel = list_head(&device->dc_devchan_list.dl_list);
790	while (channel != NULL) {
791		device->dc_devchan_list.dl_cnt--;
792		list_remove(&device->dc_devchan_list.dl_list, channel);
793		dcopy_list_fini(&channel->ch_poll_list);
794		dcopy_stats_fini(channel);
795		channel->ch_cb->cb_channel_free(&channel->ch_channel_private);
796		kmem_free(channel, sizeof (*channel));
797		channel = list_head(&device->dc_devchan_list.dl_list);
798	}
799
800	/* remove it from the list of devices */
801	list_remove(&dcopy_statep->d_device_list.dl_list, device);
802
803	mutex_exit(&dcopy_statep->d_device_list.dl_mutex);
804
805	/*
806	 * notify the DMA device driver that the device is free to be
807	 * detached.
808	 */
809	if (do_callback) {
810		device->dc_cb->cb_unregister_complete(
811		    device->dc_device_private, DCOPY_SUCCESS);
812	}
813
814	dcopy_list_fini(&device->dc_devchan_list);
815	kmem_free(device, sizeof (*device));
816}
817
818
819/*
820 * dcopy_device_channel_notify()
821 */
822/*ARGSUSED*/
823void
824dcopy_device_channel_notify(dcopy_handle_t handle, int status)
825{
826	struct dcopy_channel_s *channel;
827	dcopy_list_t *poll_list;
828	dcopy_cmd_priv_t priv;
829	int e;
830
831
832	ASSERT(status == DCOPY_COMPLETION);
833	channel = handle;
834
835	poll_list = &channel->ch_poll_list;
836
837	/*
838	 * when we get a completion notification from the device, go through
839	 * all of the commands blocking on this channel and see if they have
840	 * completed. Remove the command and wake up the block thread if they
841	 * have. Once we hit a command which is still pending, we are done
842	 * polling since commands in a channel complete in order.
843	 */
844	mutex_enter(&poll_list->dl_mutex);
845	if (poll_list->dl_cnt != 0) {
846		priv = list_head(&poll_list->dl_list);
847		while (priv != NULL) {
848			atomic_inc_64(&channel->
849			    ch_stat.cs_notify_poll.value.ui64);
850			e = channel->ch_cb->cb_cmd_poll(
851			    channel->ch_channel_private,
852			    priv->pr_cmd);
853			if (e == DCOPY_PENDING) {
854				atomic_inc_64(&channel->
855				    ch_stat.cs_notify_pending.value.ui64);
856				break;
857			}
858
859			poll_list->dl_cnt--;
860			list_remove(&poll_list->dl_list, priv);
861
862			mutex_enter(&priv->pr_mutex);
863			priv->pr_wait = B_FALSE;
864			cv_signal(&priv->pr_cv);
865			mutex_exit(&priv->pr_mutex);
866
867			priv = list_head(&poll_list->dl_list);
868		}
869	}
870
871	mutex_exit(&poll_list->dl_mutex);
872}
873
874
875/*
876 * dcopy_stats_init()
877 */
878static int
879dcopy_stats_init(dcopy_handle_t channel)
880{
881#define	CHANSTRSIZE	20
882	char chanstr[CHANSTRSIZE];
883	dcopy_stats_t *stats;
884	int instance;
885	char *name;
886
887
888	stats = &channel->ch_stat;
889	name = (char *)ddi_driver_name(channel->ch_device->dc_info.di_dip);
890	instance = ddi_get_instance(channel->ch_device->dc_info.di_dip);
891
892	(void) snprintf(chanstr, CHANSTRSIZE, "channel%d",
893	    (uint32_t)channel->ch_info.qc_chan_num);
894
895	channel->ch_kstat = kstat_create(name, instance, chanstr, "misc",
896	    KSTAT_TYPE_NAMED, sizeof (dcopy_stats_t) / sizeof (kstat_named_t),
897	    KSTAT_FLAG_VIRTUAL);
898	if (channel->ch_kstat == NULL) {
899		return (DCOPY_FAILURE);
900	}
901	channel->ch_kstat->ks_data = stats;
902
903	kstat_named_init(&stats->cs_bytes_xfer, "bytes_xfer",
904	    KSTAT_DATA_UINT64);
905	kstat_named_init(&stats->cs_cmd_alloc, "cmd_alloc",
906	    KSTAT_DATA_UINT64);
907	kstat_named_init(&stats->cs_cmd_post, "cmd_post",
908	    KSTAT_DATA_UINT64);
909	kstat_named_init(&stats->cs_cmd_poll, "cmd_poll",
910	    KSTAT_DATA_UINT64);
911	kstat_named_init(&stats->cs_notify_poll, "notify_poll",
912	    KSTAT_DATA_UINT64);
913	kstat_named_init(&stats->cs_notify_pending, "notify_pending",
914	    KSTAT_DATA_UINT64);
915	kstat_named_init(&stats->cs_id, "id",
916	    KSTAT_DATA_UINT64);
917	kstat_named_init(&stats->cs_capabilities, "capabilities",
918	    KSTAT_DATA_UINT64);
919
920	kstat_install(channel->ch_kstat);
921
922	channel->ch_stat.cs_id.value.ui64 = channel->ch_info.qc_id;
923	channel->ch_stat.cs_capabilities.value.ui64 =
924	    channel->ch_info.qc_capabilities;
925
926	return (DCOPY_SUCCESS);
927}
928
929
930/*
931 * dcopy_stats_fini()
932 */
933static void
934dcopy_stats_fini(dcopy_handle_t channel)
935{
936	kstat_delete(channel->ch_kstat);
937}
938/* *** END OF DEVICE INTERFACE *** */
939