• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/staging/tidspbridge/core/
1/*
2 * chnl_sm.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Implements upper edge functions for Bridge driver channel module.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19/*
20 *      The lower edge functions must be implemented by the Bridge driver
21 *      writer, and are declared in chnl_sm.h.
22 *
23 *      Care is taken in this code to prevent simulataneous access to channel
24 *      queues from
25 *      1. Threads.
26 *      2. io_dpc(), scheduled from the io_isr() as an event.
27 *
28 *      This is done primarily by:
29 *      - Semaphores.
30 *      - state flags in the channel object; and
31 *      - ensuring the IO_Dispatch() routine, which is called from both
32 *        CHNL_AddIOReq() and the DPC(if implemented), is not re-entered.
33 *
34 *  Channel Invariant:
35 *      There is an important invariant condition which must be maintained per
36 *      channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of
37 *      which may cause timeouts and/or failure offunction sync_wait_on_event.
38 *      This invariant condition is:
39 *
40 *          LST_Empty(pchnl->pio_completions) ==> pchnl->sync_event is reset
41 *      and
42 *          !LST_Empty(pchnl->pio_completions) ==> pchnl->sync_event is set.
43 */
44
45#include <linux/types.h>
46
47/*  ----------------------------------- OS */
48#include <dspbridge/host_os.h>
49
50/*  ----------------------------------- DSP/BIOS Bridge */
51#include <dspbridge/dbdefs.h>
52
53/*  ----------------------------------- Trace & Debug */
54#include <dspbridge/dbc.h>
55
56/*  ----------------------------------- OS Adaptation Layer */
57#include <dspbridge/cfg.h>
58#include <dspbridge/sync.h>
59
60/*  ----------------------------------- Bridge Driver */
61#include <dspbridge/dspdefs.h>
62#include <dspbridge/dspchnl.h>
63#include "_tiomap.h"
64
65/*  ----------------------------------- Platform Manager */
66#include <dspbridge/dev.h>
67
68/*  ----------------------------------- Others */
69#include <dspbridge/io_sm.h>
70
71/*  ----------------------------------- Define for This */
72#define USERMODE_ADDR   PAGE_OFFSET
73
74#define MAILBOX_IRQ INT_MAIL_MPU_IRQ
75
76/*  ----------------------------------- Function Prototypes */
77static struct lst_list *create_chirp_list(u32 chirps);
78
79static void free_chirp_list(struct lst_list *chirp_list);
80
81static struct chnl_irp *make_new_chirp(void);
82
83static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
84				      u32 *chnl);
85
86/*
87 *  ======== bridge_chnl_add_io_req ========
88 *      Enqueue an I/O request for data transfer on a channel to the DSP.
89 *      The direction (mode) is specified in the channel object. Note the DSP
90 *      address is specified for channels opened in direct I/O mode.
91 */
92int bridge_chnl_add_io_req(struct chnl_object *chnl_obj, void *host_buf,
93			       u32 byte_size, u32 buf_size,
94			       u32 dw_dsp_addr, u32 dw_arg)
95{
96	int status = 0;
97	struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
98	struct chnl_irp *chnl_packet_obj = NULL;
99	struct bridge_dev_context *dev_ctxt;
100	struct dev_object *dev_obj;
101	u8 dw_state;
102	bool is_eos;
103	struct chnl_mgr *chnl_mgr_obj = pchnl->chnl_mgr_obj;
104	u8 *host_sys_buf = NULL;
105	bool sched_dpc = false;
106	u16 mb_val = 0;
107
108	is_eos = (byte_size == 0);
109
110	/* Validate args */
111	if (!host_buf || !pchnl) {
112		status = -EFAULT;
113	} else if (is_eos && CHNL_IS_INPUT(pchnl->chnl_mode)) {
114		status = -EPERM;
115	} else {
116		/*
117		 * Check the channel state: only queue chirp if channel state
118		 * allows it.
119		 */
120		dw_state = pchnl->dw_state;
121		if (dw_state != CHNL_STATEREADY) {
122			if (dw_state & CHNL_STATECANCEL)
123				status = -ECANCELED;
124			else if ((dw_state & CHNL_STATEEOS) &&
125				 CHNL_IS_OUTPUT(pchnl->chnl_mode))
126				status = -EPIPE;
127			else
128				/* No other possible states left */
129				DBC_ASSERT(0);
130		}
131	}
132
133	dev_obj = dev_get_first();
134	dev_get_bridge_context(dev_obj, &dev_ctxt);
135	if (!dev_ctxt)
136		status = -EFAULT;
137
138	if (status)
139		goto func_end;
140
141	if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1 && host_buf) {
142		if (!(host_buf < (void *)USERMODE_ADDR)) {
143			host_sys_buf = host_buf;
144			goto func_cont;
145		}
146		/* if addr in user mode, then copy to kernel space */
147		host_sys_buf = kmalloc(buf_size, GFP_KERNEL);
148		if (host_sys_buf == NULL) {
149			status = -ENOMEM;
150			goto func_end;
151		}
152		if (CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
153			status = copy_from_user(host_sys_buf, host_buf,
154						buf_size);
155			if (status) {
156				kfree(host_sys_buf);
157				host_sys_buf = NULL;
158				status = -EFAULT;
159				goto func_end;
160			}
161		}
162	}
163func_cont:
164	/* Mailbox IRQ is disabled to avoid race condition with DMA/ZCPY
165	 * channels. DPCCS is held to avoid race conditions with PCPY channels.
166	 * If DPC is scheduled in process context (iosm_schedule) and any
167	 * non-mailbox interrupt occurs, that DPC will run and break CS. Hence
168	 * we disable ALL DPCs. We will try to disable ONLY IO DPC later. */
169	spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
170	omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
171	if (pchnl->chnl_type == CHNL_PCPY) {
172		/* This is a processor-copy channel. */
173		if (!status && CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
174			/* Check buffer size on output channels for fit. */
175			if (byte_size >
176			    io_buf_size(pchnl->chnl_mgr_obj->hio_mgr))
177				status = -EINVAL;
178
179		}
180	}
181	if (!status) {
182		/* Get a free chirp: */
183		chnl_packet_obj =
184		    (struct chnl_irp *)lst_get_head(pchnl->free_packets_list);
185		if (chnl_packet_obj == NULL)
186			status = -EIO;
187
188	}
189	if (!status) {
190		/* Enqueue the chirp on the chnl's IORequest queue: */
191		chnl_packet_obj->host_user_buf = chnl_packet_obj->host_sys_buf =
192		    host_buf;
193		if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)
194			chnl_packet_obj->host_sys_buf = host_sys_buf;
195
196		/*
197		 * Note: for dma chans dw_dsp_addr contains dsp address
198		 * of SM buffer.
199		 */
200		DBC_ASSERT(chnl_mgr_obj->word_size != 0);
201		/* DSP address */
202		chnl_packet_obj->dsp_tx_addr =
203		    dw_dsp_addr / chnl_mgr_obj->word_size;
204		chnl_packet_obj->byte_size = byte_size;
205		chnl_packet_obj->buf_size = buf_size;
206		/* Only valid for output channel */
207		chnl_packet_obj->dw_arg = dw_arg;
208		chnl_packet_obj->status = (is_eos ? CHNL_IOCSTATEOS :
209					   CHNL_IOCSTATCOMPLETE);
210		lst_put_tail(pchnl->pio_requests,
211			     (struct list_head *)chnl_packet_obj);
212		pchnl->cio_reqs++;
213		DBC_ASSERT(pchnl->cio_reqs <= pchnl->chnl_packets);
214		/*
215		 * If end of stream, update the channel state to prevent
216		 * more IOR's.
217		 */
218		if (is_eos)
219			pchnl->dw_state |= CHNL_STATEEOS;
220
221		/* Legacy DSM Processor-Copy */
222		DBC_ASSERT(pchnl->chnl_type == CHNL_PCPY);
223		/* Request IO from the DSP */
224		io_request_chnl(chnl_mgr_obj->hio_mgr, pchnl,
225				(CHNL_IS_INPUT(pchnl->chnl_mode) ? IO_INPUT :
226				 IO_OUTPUT), &mb_val);
227		sched_dpc = true;
228
229	}
230	omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
231	spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
232	if (mb_val != 0)
233		sm_interrupt_dsp(dev_ctxt, mb_val);
234
235	/* Schedule a DPC, to do the actual data transfer */
236	if (sched_dpc)
237		iosm_schedule(chnl_mgr_obj->hio_mgr);
238
239func_end:
240	return status;
241}
242
243/*
244 *  ======== bridge_chnl_cancel_io ========
245 *      Return all I/O requests to the client which have not yet been
246 *      transferred.  The channel's I/O completion object is
247 *      signalled, and all the I/O requests are queued as IOC's, with the
248 *      status field set to CHNL_IOCSTATCANCEL.
249 *      This call is typically used in abort situations, and is a prelude to
250 *      chnl_close();
251 */
252int bridge_chnl_cancel_io(struct chnl_object *chnl_obj)
253{
254	int status = 0;
255	struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
256	u32 chnl_id = -1;
257	s8 chnl_mode;
258	struct chnl_irp *chnl_packet_obj;
259	struct chnl_mgr *chnl_mgr_obj = NULL;
260
261	/* Check args: */
262	if (pchnl && pchnl->chnl_mgr_obj) {
263		chnl_id = pchnl->chnl_id;
264		chnl_mode = pchnl->chnl_mode;
265		chnl_mgr_obj = pchnl->chnl_mgr_obj;
266	} else {
267		status = -EFAULT;
268	}
269	if (status)
270		goto func_end;
271
272	/*  Mark this channel as cancelled, to prevent further IORequests or
273	 *  IORequests or dispatching. */
274	spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
275	pchnl->dw_state |= CHNL_STATECANCEL;
276	if (LST_IS_EMPTY(pchnl->pio_requests))
277		goto func_cont;
278
279	if (pchnl->chnl_type == CHNL_PCPY) {
280		/* Indicate we have no more buffers available for transfer: */
281		if (CHNL_IS_INPUT(pchnl->chnl_mode)) {
282			io_cancel_chnl(chnl_mgr_obj->hio_mgr, chnl_id);
283		} else {
284			/* Record that we no longer have output buffers
285			 * available: */
286			chnl_mgr_obj->dw_output_mask &= ~(1 << chnl_id);
287		}
288	}
289	/* Move all IOR's to IOC queue: */
290	while (!LST_IS_EMPTY(pchnl->pio_requests)) {
291		chnl_packet_obj =
292		    (struct chnl_irp *)lst_get_head(pchnl->pio_requests);
293		if (chnl_packet_obj) {
294			chnl_packet_obj->byte_size = 0;
295			chnl_packet_obj->status |= CHNL_IOCSTATCANCEL;
296			lst_put_tail(pchnl->pio_completions,
297				     (struct list_head *)chnl_packet_obj);
298			pchnl->cio_cs++;
299			pchnl->cio_reqs--;
300			DBC_ASSERT(pchnl->cio_reqs >= 0);
301		}
302	}
303func_cont:
304	spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
305func_end:
306	return status;
307}
308
309/*
310 *  ======== bridge_chnl_close ========
311 *  Purpose:
312 *      Ensures all pending I/O on this channel is cancelled, discards all
313 *      queued I/O completion notifications, then frees the resources allocated
314 *      for this channel, and makes the corresponding logical channel id
315 *      available for subsequent use.
316 */
317int bridge_chnl_close(struct chnl_object *chnl_obj)
318{
319	int status;
320	struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
321
322	/* Check args: */
323	if (!pchnl) {
324		status = -EFAULT;
325		goto func_cont;
326	}
327	{
328		/* Cancel IO: this ensures no further IO requests or
329		 * notifications. */
330		status = bridge_chnl_cancel_io(chnl_obj);
331	}
332func_cont:
333	if (!status) {
334		/* Assert I/O on this channel is now cancelled: Protects
335		 * from io_dpc. */
336		DBC_ASSERT((pchnl->dw_state & CHNL_STATECANCEL));
337		/* Invalidate channel object: Protects from
338		 * CHNL_GetIOCompletion(). */
339		/* Free the slot in the channel manager: */
340		pchnl->chnl_mgr_obj->ap_channel[pchnl->chnl_id] = NULL;
341		spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
342		pchnl->chnl_mgr_obj->open_channels -= 1;
343		spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
344		if (pchnl->ntfy_obj) {
345			ntfy_delete(pchnl->ntfy_obj);
346			kfree(pchnl->ntfy_obj);
347			pchnl->ntfy_obj = NULL;
348		}
349		/* Reset channel event: (NOTE: user_event freed in user
350		 * context.). */
351		if (pchnl->sync_event) {
352			sync_reset_event(pchnl->sync_event);
353			kfree(pchnl->sync_event);
354			pchnl->sync_event = NULL;
355		}
356		/* Free I/O request and I/O completion queues: */
357		if (pchnl->pio_completions) {
358			free_chirp_list(pchnl->pio_completions);
359			pchnl->pio_completions = NULL;
360			pchnl->cio_cs = 0;
361		}
362		if (pchnl->pio_requests) {
363			free_chirp_list(pchnl->pio_requests);
364			pchnl->pio_requests = NULL;
365			pchnl->cio_reqs = 0;
366		}
367		if (pchnl->free_packets_list) {
368			free_chirp_list(pchnl->free_packets_list);
369			pchnl->free_packets_list = NULL;
370		}
371		/* Release channel object. */
372		kfree(pchnl);
373		pchnl = NULL;
374	}
375	DBC_ENSURE(status || !pchnl);
376	return status;
377}
378
379/*
380 *  ======== bridge_chnl_create ========
381 *      Create a channel manager object, responsible for opening new channels
382 *      and closing old ones for a given board.
383 */
384int bridge_chnl_create(struct chnl_mgr **channel_mgr,
385			      struct dev_object *hdev_obj,
386			      const struct chnl_mgrattrs *mgr_attrts)
387{
388	int status = 0;
389	struct chnl_mgr *chnl_mgr_obj = NULL;
390	u8 max_channels;
391
392	/* Check DBC requirements: */
393	DBC_REQUIRE(channel_mgr != NULL);
394	DBC_REQUIRE(mgr_attrts != NULL);
395	DBC_REQUIRE(mgr_attrts->max_channels > 0);
396	DBC_REQUIRE(mgr_attrts->max_channels <= CHNL_MAXCHANNELS);
397	DBC_REQUIRE(mgr_attrts->word_size != 0);
398
399	/* Allocate channel manager object */
400	chnl_mgr_obj = kzalloc(sizeof(struct chnl_mgr), GFP_KERNEL);
401	if (chnl_mgr_obj) {
402		/*
403		 * The max_channels attr must equal the # of supported chnls for
404		 * each transport(# chnls for PCPY = DDMA = ZCPY): i.e.
405		 *      mgr_attrts->max_channels = CHNL_MAXCHANNELS =
406		 *                       DDMA_MAXDDMACHNLS = DDMA_MAXZCPYCHNLS.
407		 */
408		DBC_ASSERT(mgr_attrts->max_channels == CHNL_MAXCHANNELS);
409		max_channels = CHNL_MAXCHANNELS + CHNL_MAXCHANNELS * CHNL_PCPY;
410		/* Create array of channels */
411		chnl_mgr_obj->ap_channel = kzalloc(sizeof(struct chnl_object *)
412						* max_channels, GFP_KERNEL);
413		if (chnl_mgr_obj->ap_channel) {
414			/* Initialize chnl_mgr object */
415			chnl_mgr_obj->dw_type = CHNL_TYPESM;
416			chnl_mgr_obj->word_size = mgr_attrts->word_size;
417			/* Total # chnls supported */
418			chnl_mgr_obj->max_channels = max_channels;
419			chnl_mgr_obj->open_channels = 0;
420			chnl_mgr_obj->dw_output_mask = 0;
421			chnl_mgr_obj->dw_last_output = 0;
422			chnl_mgr_obj->hdev_obj = hdev_obj;
423			spin_lock_init(&chnl_mgr_obj->chnl_mgr_lock);
424		} else {
425			status = -ENOMEM;
426		}
427	} else {
428		status = -ENOMEM;
429	}
430
431	if (status) {
432		bridge_chnl_destroy(chnl_mgr_obj);
433		*channel_mgr = NULL;
434	} else {
435		/* Return channel manager object to caller... */
436		*channel_mgr = chnl_mgr_obj;
437	}
438	return status;
439}
440
441/*
442 *  ======== bridge_chnl_destroy ========
443 *  Purpose:
444 *      Close all open channels, and destroy the channel manager.
445 */
446int bridge_chnl_destroy(struct chnl_mgr *hchnl_mgr)
447{
448	int status = 0;
449	struct chnl_mgr *chnl_mgr_obj = hchnl_mgr;
450	u32 chnl_id;
451
452	if (hchnl_mgr) {
453		/* Close all open channels: */
454		for (chnl_id = 0; chnl_id < chnl_mgr_obj->max_channels;
455		     chnl_id++) {
456			status =
457			    bridge_chnl_close(chnl_mgr_obj->ap_channel
458					      [chnl_id]);
459			if (status)
460				dev_dbg(bridge, "%s: Error status 0x%x\n",
461					__func__, status);
462		}
463
464		/* Free channel manager object: */
465		kfree(chnl_mgr_obj->ap_channel);
466
467		/* Set hchnl_mgr to NULL in device object. */
468		dev_set_chnl_mgr(chnl_mgr_obj->hdev_obj, NULL);
469		/* Free this Chnl Mgr object: */
470		kfree(hchnl_mgr);
471	} else {
472		status = -EFAULT;
473	}
474	return status;
475}
476
477/*
478 *  ======== bridge_chnl_flush_io ========
479 *  purpose:
480 *      Flushes all the outstanding data requests on a channel.
481 */
482int bridge_chnl_flush_io(struct chnl_object *chnl_obj, u32 timeout)
483{
484	int status = 0;
485	struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
486	s8 chnl_mode = -1;
487	struct chnl_mgr *chnl_mgr_obj;
488	struct chnl_ioc chnl_ioc_obj;
489	/* Check args: */
490	if (pchnl) {
491		if ((timeout == CHNL_IOCNOWAIT)
492		    && CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
493			status = -EINVAL;
494		} else {
495			chnl_mode = pchnl->chnl_mode;
496			chnl_mgr_obj = pchnl->chnl_mgr_obj;
497		}
498	} else {
499		status = -EFAULT;
500	}
501	if (!status) {
502		/* Note: Currently, if another thread continues to add IO
503		 * requests to this channel, this function will continue to
504		 * flush all such queued IO requests. */
505		if (CHNL_IS_OUTPUT(chnl_mode)
506		    && (pchnl->chnl_type == CHNL_PCPY)) {
507			/* Wait for IO completions, up to the specified
508			 * timeout: */
509			while (!LST_IS_EMPTY(pchnl->pio_requests) && !status) {
510				status = bridge_chnl_get_ioc(chnl_obj,
511						timeout, &chnl_ioc_obj);
512				if (status)
513					continue;
514
515				if (chnl_ioc_obj.status & CHNL_IOCSTATTIMEOUT)
516					status = -ETIMEDOUT;
517
518			}
519		} else {
520			status = bridge_chnl_cancel_io(chnl_obj);
521			/* Now, leave the channel in the ready state: */
522			pchnl->dw_state &= ~CHNL_STATECANCEL;
523		}
524	}
525	DBC_ENSURE(status || LST_IS_EMPTY(pchnl->pio_requests));
526	return status;
527}
528
529/*
530 *  ======== bridge_chnl_get_info ========
531 *  Purpose:
532 *      Retrieve information related to a channel.
533 */
534int bridge_chnl_get_info(struct chnl_object *chnl_obj,
535			     struct chnl_info *channel_info)
536{
537	int status = 0;
538	struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
539	if (channel_info != NULL) {
540		if (pchnl) {
541			/* Return the requested information: */
542			channel_info->hchnl_mgr = pchnl->chnl_mgr_obj;
543			channel_info->event_obj = pchnl->user_event;
544			channel_info->cnhl_id = pchnl->chnl_id;
545			channel_info->dw_mode = pchnl->chnl_mode;
546			channel_info->bytes_tx = pchnl->bytes_moved;
547			channel_info->process = pchnl->process;
548			channel_info->sync_event = pchnl->sync_event;
549			channel_info->cio_cs = pchnl->cio_cs;
550			channel_info->cio_reqs = pchnl->cio_reqs;
551			channel_info->dw_state = pchnl->dw_state;
552		} else {
553			status = -EFAULT;
554		}
555	} else {
556		status = -EFAULT;
557	}
558	return status;
559}
560
561/*
562 *  ======== bridge_chnl_get_ioc ========
563 *      Optionally wait for I/O completion on a channel.  Dequeue an I/O
564 *      completion record, which contains information about the completed
565 *      I/O request.
566 *      Note: Ensures Channel Invariant (see notes above).
567 */
568int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 timeout,
569			    struct chnl_ioc *chan_ioc)
570{
571	int status = 0;
572	struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
573	struct chnl_irp *chnl_packet_obj;
574	int stat_sync;
575	bool dequeue_ioc = true;
576	struct chnl_ioc ioc = { NULL, 0, 0, 0, 0 };
577	u8 *host_sys_buf = NULL;
578	struct bridge_dev_context *dev_ctxt;
579	struct dev_object *dev_obj;
580
581	/* Check args: */
582	if (!chan_ioc || !pchnl) {
583		status = -EFAULT;
584	} else if (timeout == CHNL_IOCNOWAIT) {
585		if (LST_IS_EMPTY(pchnl->pio_completions))
586			status = -EREMOTEIO;
587
588	}
589
590	dev_obj = dev_get_first();
591	dev_get_bridge_context(dev_obj, &dev_ctxt);
592	if (!dev_ctxt)
593		status = -EFAULT;
594
595	if (status)
596		goto func_end;
597
598	ioc.status = CHNL_IOCSTATCOMPLETE;
599	if (timeout !=
600	    CHNL_IOCNOWAIT && LST_IS_EMPTY(pchnl->pio_completions)) {
601		if (timeout == CHNL_IOCINFINITE)
602			timeout = SYNC_INFINITE;
603
604		stat_sync = sync_wait_on_event(pchnl->sync_event, timeout);
605		if (stat_sync == -ETIME) {
606			/* No response from DSP */
607			ioc.status |= CHNL_IOCSTATTIMEOUT;
608			dequeue_ioc = false;
609		} else if (stat_sync == -EPERM) {
610			/* This can occur when the user mode thread is
611			 * aborted (^C), or when _VWIN32_WaitSingleObject()
612			 * fails due to unkown causes. */
613			/* Even though Wait failed, there may be something in
614			 * the Q: */
615			if (LST_IS_EMPTY(pchnl->pio_completions)) {
616				ioc.status |= CHNL_IOCSTATCANCEL;
617				dequeue_ioc = false;
618			}
619		}
620	}
621	/* See comment in AddIOReq */
622	spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
623	omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
624	if (dequeue_ioc) {
625		/* Dequeue IOC and set chan_ioc; */
626		DBC_ASSERT(!LST_IS_EMPTY(pchnl->pio_completions));
627		chnl_packet_obj =
628		    (struct chnl_irp *)lst_get_head(pchnl->pio_completions);
629		/* Update chan_ioc from channel state and chirp: */
630		if (chnl_packet_obj) {
631			pchnl->cio_cs--;
632			/*  If this is a zero-copy channel, then set IOC's pbuf
633			 *  to the DSP's address. This DSP address will get
634			 *  translated to user's virtual addr later. */
635			{
636				host_sys_buf = chnl_packet_obj->host_sys_buf;
637				ioc.pbuf = chnl_packet_obj->host_user_buf;
638			}
639			ioc.byte_size = chnl_packet_obj->byte_size;
640			ioc.buf_size = chnl_packet_obj->buf_size;
641			ioc.dw_arg = chnl_packet_obj->dw_arg;
642			ioc.status |= chnl_packet_obj->status;
643			/* Place the used chirp on the free list: */
644			lst_put_tail(pchnl->free_packets_list,
645				     (struct list_head *)chnl_packet_obj);
646		} else {
647			ioc.pbuf = NULL;
648			ioc.byte_size = 0;
649		}
650	} else {
651		ioc.pbuf = NULL;
652		ioc.byte_size = 0;
653		ioc.dw_arg = 0;
654		ioc.buf_size = 0;
655	}
656	/* Ensure invariant: If any IOC's are queued for this channel... */
657	if (!LST_IS_EMPTY(pchnl->pio_completions)) {
658		/*  Since DSPStream_Reclaim() does not take a timeout
659		 *  parameter, we pass the stream's timeout value to
660		 *  bridge_chnl_get_ioc. We cannot determine whether or not
661		 *  we have waited in User mode. Since the stream's timeout
662		 *  value may be non-zero, we still have to set the event.
663		 *  Therefore, this optimization is taken out.
664		 *
665		 *  if (timeout == CHNL_IOCNOWAIT) {
666		 *    ... ensure event is set..
667		 *      sync_set_event(pchnl->sync_event);
668		 *  } */
669		sync_set_event(pchnl->sync_event);
670	} else {
671		/* else, if list is empty, ensure event is reset. */
672		sync_reset_event(pchnl->sync_event);
673	}
674	omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
675	spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
676	if (dequeue_ioc
677	    && (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)) {
678		if (!(ioc.pbuf < (void *)USERMODE_ADDR))
679			goto func_cont;
680
681		/* If the addr is in user mode, then copy it */
682		if (!host_sys_buf || !ioc.pbuf) {
683			status = -EFAULT;
684			goto func_cont;
685		}
686		if (!CHNL_IS_INPUT(pchnl->chnl_mode))
687			goto func_cont1;
688
689		/*host_user_buf */
690		status = copy_to_user(ioc.pbuf, host_sys_buf, ioc.byte_size);
691		if (status) {
692			if (current->flags & PF_EXITING)
693				status = 0;
694		}
695		if (status)
696			status = -EFAULT;
697func_cont1:
698		kfree(host_sys_buf);
699	}
700func_cont:
701	/* Update User's IOC block: */
702	*chan_ioc = ioc;
703func_end:
704	return status;
705}
706
707/*
708 *  ======== bridge_chnl_get_mgr_info ========
709 *      Retrieve information related to the channel manager.
710 */
711int bridge_chnl_get_mgr_info(struct chnl_mgr *hchnl_mgr, u32 ch_id,
712				 struct chnl_mgrinfo *mgr_info)
713{
714	int status = 0;
715	struct chnl_mgr *chnl_mgr_obj = (struct chnl_mgr *)hchnl_mgr;
716
717	if (mgr_info != NULL) {
718		if (ch_id <= CHNL_MAXCHANNELS) {
719			if (hchnl_mgr) {
720				/* Return the requested information: */
721				mgr_info->chnl_obj =
722				    chnl_mgr_obj->ap_channel[ch_id];
723				mgr_info->open_channels =
724				    chnl_mgr_obj->open_channels;
725				mgr_info->dw_type = chnl_mgr_obj->dw_type;
726				/* total # of chnls */
727				mgr_info->max_channels =
728				    chnl_mgr_obj->max_channels;
729			} else {
730				status = -EFAULT;
731			}
732		} else {
733			status = -ECHRNG;
734		}
735	} else {
736		status = -EFAULT;
737	}
738
739	return status;
740}
741
742/*
743 *  ======== bridge_chnl_idle ========
744 *      Idles a particular channel.
745 */
746int bridge_chnl_idle(struct chnl_object *chnl_obj, u32 timeout,
747			    bool flush_data)
748{
749	s8 chnl_mode;
750	struct chnl_mgr *chnl_mgr_obj;
751	int status = 0;
752
753	DBC_REQUIRE(chnl_obj);
754
755	chnl_mode = chnl_obj->chnl_mode;
756	chnl_mgr_obj = chnl_obj->chnl_mgr_obj;
757
758	if (CHNL_IS_OUTPUT(chnl_mode) && !flush_data) {
759		/* Wait for IO completions, up to the specified timeout: */
760		status = bridge_chnl_flush_io(chnl_obj, timeout);
761	} else {
762		status = bridge_chnl_cancel_io(chnl_obj);
763
764		/* Reset the byte count and put channel back in ready state. */
765		chnl_obj->bytes_moved = 0;
766		chnl_obj->dw_state &= ~CHNL_STATECANCEL;
767	}
768
769	return status;
770}
771
772/*
773 *  ======== bridge_chnl_open ========
774 *      Open a new half-duplex channel to the DSP board.
775 */
776int bridge_chnl_open(struct chnl_object **chnl,
777			    struct chnl_mgr *hchnl_mgr, s8 chnl_mode,
778			    u32 ch_id, const struct chnl_attr *pattrs)
779{
780	int status = 0;
781	struct chnl_mgr *chnl_mgr_obj = hchnl_mgr;
782	struct chnl_object *pchnl = NULL;
783	struct sync_object *sync_event = NULL;
784	/* Ensure DBC requirements: */
785	DBC_REQUIRE(chnl != NULL);
786	DBC_REQUIRE(pattrs != NULL);
787	DBC_REQUIRE(hchnl_mgr != NULL);
788	*chnl = NULL;
789	/* Validate Args: */
790	if (pattrs->uio_reqs == 0) {
791		status = -EINVAL;
792	} else {
793		if (!hchnl_mgr) {
794			status = -EFAULT;
795		} else {
796			if (ch_id != CHNL_PICKFREE) {
797				if (ch_id >= chnl_mgr_obj->max_channels)
798					status = -ECHRNG;
799				else if (chnl_mgr_obj->ap_channel[ch_id] !=
800					 NULL)
801					status = -EALREADY;
802			} else {
803				/* Check for free channel */
804				status =
805				    search_free_channel(chnl_mgr_obj, &ch_id);
806			}
807		}
808	}
809	if (status)
810		goto func_end;
811
812	DBC_ASSERT(ch_id < chnl_mgr_obj->max_channels);
813	/* Create channel object: */
814	pchnl = kzalloc(sizeof(struct chnl_object), GFP_KERNEL);
815	if (!pchnl) {
816		status = -ENOMEM;
817		goto func_end;
818	}
819	/* Protect queues from io_dpc: */
820	pchnl->dw_state = CHNL_STATECANCEL;
821	/* Allocate initial IOR and IOC queues: */
822	pchnl->free_packets_list = create_chirp_list(pattrs->uio_reqs);
823	pchnl->pio_requests = create_chirp_list(0);
824	pchnl->pio_completions = create_chirp_list(0);
825	pchnl->chnl_packets = pattrs->uio_reqs;
826	pchnl->cio_cs = 0;
827	pchnl->cio_reqs = 0;
828	sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
829	if (sync_event)
830		sync_init_event(sync_event);
831	else
832		status = -ENOMEM;
833
834	if (!status) {
835		pchnl->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
836							GFP_KERNEL);
837		if (pchnl->ntfy_obj)
838			ntfy_init(pchnl->ntfy_obj);
839		else
840			status = -ENOMEM;
841	}
842
843	if (!status) {
844		if (pchnl->pio_completions && pchnl->pio_requests &&
845		    pchnl->free_packets_list) {
846			/* Initialize CHNL object fields: */
847			pchnl->chnl_mgr_obj = chnl_mgr_obj;
848			pchnl->chnl_id = ch_id;
849			pchnl->chnl_mode = chnl_mode;
850			pchnl->user_event = sync_event;
851			pchnl->sync_event = sync_event;
852			/* Get the process handle */
853			pchnl->process = current->tgid;
854			pchnl->pcb_arg = 0;
855			pchnl->bytes_moved = 0;
856			/* Default to proc-copy */
857			pchnl->chnl_type = CHNL_PCPY;
858		} else {
859			status = -ENOMEM;
860		}
861	}
862
863	if (status) {
864		/* Free memory */
865		if (pchnl->pio_completions) {
866			free_chirp_list(pchnl->pio_completions);
867			pchnl->pio_completions = NULL;
868			pchnl->cio_cs = 0;
869		}
870		if (pchnl->pio_requests) {
871			free_chirp_list(pchnl->pio_requests);
872			pchnl->pio_requests = NULL;
873		}
874		if (pchnl->free_packets_list) {
875			free_chirp_list(pchnl->free_packets_list);
876			pchnl->free_packets_list = NULL;
877		}
878		kfree(sync_event);
879		sync_event = NULL;
880
881		if (pchnl->ntfy_obj) {
882			ntfy_delete(pchnl->ntfy_obj);
883			kfree(pchnl->ntfy_obj);
884			pchnl->ntfy_obj = NULL;
885		}
886		kfree(pchnl);
887	} else {
888		/* Insert channel object in channel manager: */
889		chnl_mgr_obj->ap_channel[pchnl->chnl_id] = pchnl;
890		spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
891		chnl_mgr_obj->open_channels++;
892		spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
893		/* Return result... */
894		pchnl->dw_state = CHNL_STATEREADY;
895		*chnl = pchnl;
896	}
897func_end:
898	DBC_ENSURE((!status && pchnl) || (*chnl == NULL));
899	return status;
900}
901
902/*
903 *  ======== bridge_chnl_register_notify ========
904 *      Registers for events on a particular channel.
905 */
906int bridge_chnl_register_notify(struct chnl_object *chnl_obj,
907				    u32 event_mask, u32 notify_type,
908				    struct dsp_notification *hnotification)
909{
910	int status = 0;
911
912	DBC_ASSERT(!(event_mask & ~(DSP_STREAMDONE | DSP_STREAMIOCOMPLETION)));
913
914	if (event_mask)
915		status = ntfy_register(chnl_obj->ntfy_obj, hnotification,
916						event_mask, notify_type);
917	else
918		status = ntfy_unregister(chnl_obj->ntfy_obj, hnotification);
919
920	return status;
921}
922
923/*
924 *  ======== create_chirp_list ========
925 *  Purpose:
926 *      Initialize a queue of channel I/O Request/Completion packets.
927 *  Parameters:
928 *      chirps:     Number of Chirps to allocate.
929 *  Returns:
930 *      Pointer to queue of IRPs, or NULL.
931 *  Requires:
932 *  Ensures:
933 */
934static struct lst_list *create_chirp_list(u32 chirps)
935{
936	struct lst_list *chirp_list;
937	struct chnl_irp *chnl_packet_obj;
938	u32 i;
939
940	chirp_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
941
942	if (chirp_list) {
943		INIT_LIST_HEAD(&chirp_list->head);
944		/* Make N chirps and place on queue. */
945		for (i = 0; (i < chirps)
946		     && ((chnl_packet_obj = make_new_chirp()) != NULL); i++) {
947			lst_put_tail(chirp_list,
948				     (struct list_head *)chnl_packet_obj);
949		}
950
951		/* If we couldn't allocate all chirps, free those allocated: */
952		if (i != chirps) {
953			free_chirp_list(chirp_list);
954			chirp_list = NULL;
955		}
956	}
957
958	return chirp_list;
959}
960
961/*
962 *  ======== free_chirp_list ========
963 *  Purpose:
964 *      Free the queue of Chirps.
965 */
966static void free_chirp_list(struct lst_list *chirp_list)
967{
968	DBC_REQUIRE(chirp_list != NULL);
969
970	while (!LST_IS_EMPTY(chirp_list))
971		kfree(lst_get_head(chirp_list));
972
973	kfree(chirp_list);
974}
975
976/*
977 *  ======== make_new_chirp ========
978 *      Allocate the memory for a new channel IRP.
979 */
980static struct chnl_irp *make_new_chirp(void)
981{
982	struct chnl_irp *chnl_packet_obj;
983
984	chnl_packet_obj = kzalloc(sizeof(struct chnl_irp), GFP_KERNEL);
985	if (chnl_packet_obj != NULL) {
986		/* lst_init_elem only resets the list's member values. */
987		lst_init_elem(&chnl_packet_obj->link);
988	}
989
990	return chnl_packet_obj;
991}
992
993/*
994 *  ======== search_free_channel ========
995 *      Search for a free channel slot in the array of channel pointers.
996 */
997static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
998				      u32 *chnl)
999{
1000	int status = -ENOSR;
1001	u32 i;
1002
1003	DBC_REQUIRE(chnl_mgr_obj);
1004
1005	for (i = 0; i < chnl_mgr_obj->max_channels; i++) {
1006		if (chnl_mgr_obj->ap_channel[i] == NULL) {
1007			status = 0;
1008			*chnl = i;
1009			break;
1010		}
1011	}
1012
1013	return status;
1014}
1015