• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/staging/tidspbridge/core/
1/*
2 * msg_sm.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Implements upper edge functions for Bridge message module.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18#include <linux/types.h>
19
20/*  ----------------------------------- DSP/BIOS Bridge */
21#include <dspbridge/dbdefs.h>
22
23/*  ----------------------------------- Trace & Debug */
24#include <dspbridge/dbc.h>
25
26/*  ----------------------------------- OS Adaptation Layer */
27#include <dspbridge/list.h>
28#include <dspbridge/sync.h>
29
30/*  ----------------------------------- Platform Manager */
31#include <dspbridge/dev.h>
32
33/*  ----------------------------------- Others */
34#include <dspbridge/io_sm.h>
35
36/*  ----------------------------------- This */
37#include <_msg_sm.h>
38#include <dspbridge/dspmsg.h>
39
40/*  ----------------------------------- Function Prototypes */
41static int add_new_msg(struct lst_list *msg_list);
42static void delete_msg_mgr(struct msg_mgr *hmsg_mgr);
43static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp);
44static void free_msg_list(struct lst_list *msg_list);
45
46/*
47 *  ======== bridge_msg_create ========
48 *      Create an object to manage message queues. Only one of these objects
49 *      can exist per device object.
50 */
51int bridge_msg_create(struct msg_mgr **msg_man,
52			     struct dev_object *hdev_obj,
53			     msg_onexit msg_callback)
54{
55	struct msg_mgr *msg_mgr_obj;
56	struct io_mgr *hio_mgr;
57	int status = 0;
58
59	if (!msg_man || !msg_callback || !hdev_obj) {
60		status = -EFAULT;
61		goto func_end;
62	}
63	dev_get_io_mgr(hdev_obj, &hio_mgr);
64	if (!hio_mgr) {
65		status = -EFAULT;
66		goto func_end;
67	}
68	*msg_man = NULL;
69	/* Allocate msg_ctrl manager object */
70	msg_mgr_obj = kzalloc(sizeof(struct msg_mgr), GFP_KERNEL);
71
72	if (msg_mgr_obj) {
73		msg_mgr_obj->on_exit = msg_callback;
74		msg_mgr_obj->hio_mgr = hio_mgr;
75		/* List of MSG_QUEUEs */
76		msg_mgr_obj->queue_list = kzalloc(sizeof(struct lst_list),
77							GFP_KERNEL);
78		/*  Queues of message frames for messages to the DSP. Message
79		 * frames will only be added to the free queue when a
80		 * msg_queue object is created. */
81		msg_mgr_obj->msg_free_list = kzalloc(sizeof(struct lst_list),
82							GFP_KERNEL);
83		msg_mgr_obj->msg_used_list = kzalloc(sizeof(struct lst_list),
84							GFP_KERNEL);
85		if (msg_mgr_obj->queue_list == NULL ||
86		    msg_mgr_obj->msg_free_list == NULL ||
87		    msg_mgr_obj->msg_used_list == NULL) {
88			status = -ENOMEM;
89		} else {
90			INIT_LIST_HEAD(&msg_mgr_obj->queue_list->head);
91			INIT_LIST_HEAD(&msg_mgr_obj->msg_free_list->head);
92			INIT_LIST_HEAD(&msg_mgr_obj->msg_used_list->head);
93			spin_lock_init(&msg_mgr_obj->msg_mgr_lock);
94		}
95
96		/*  Create an event to be used by bridge_msg_put() in waiting
97		 *  for an available free frame from the message manager. */
98		msg_mgr_obj->sync_event =
99				kzalloc(sizeof(struct sync_object), GFP_KERNEL);
100		if (!msg_mgr_obj->sync_event)
101			status = -ENOMEM;
102		else
103			sync_init_event(msg_mgr_obj->sync_event);
104
105		if (!status)
106			*msg_man = msg_mgr_obj;
107		else
108			delete_msg_mgr(msg_mgr_obj);
109
110	} else {
111		status = -ENOMEM;
112	}
113func_end:
114	return status;
115}
116
117/*
118 *  ======== bridge_msg_create_queue ========
119 *      Create a msg_queue for sending/receiving messages to/from a node
120 *      on the DSP.
121 */
122int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr,
123				struct msg_queue **msgq,
124				u32 msgq_id, u32 max_msgs, void *arg)
125{
126	u32 i;
127	u32 num_allocated = 0;
128	struct msg_queue *msg_q;
129	int status = 0;
130
131	if (!hmsg_mgr || msgq == NULL || !hmsg_mgr->msg_free_list) {
132		status = -EFAULT;
133		goto func_end;
134	}
135
136	*msgq = NULL;
137	/* Allocate msg_queue object */
138	msg_q = kzalloc(sizeof(struct msg_queue), GFP_KERNEL);
139	if (!msg_q) {
140		status = -ENOMEM;
141		goto func_end;
142	}
143	lst_init_elem((struct list_head *)msg_q);
144	msg_q->max_msgs = max_msgs;
145	msg_q->hmsg_mgr = hmsg_mgr;
146	msg_q->arg = arg;	/* Node handle */
147	msg_q->msgq_id = msgq_id;	/* Node env (not valid yet) */
148	/* Queues of Message frames for messages from the DSP */
149	msg_q->msg_free_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
150	msg_q->msg_used_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
151	if (msg_q->msg_free_list == NULL || msg_q->msg_used_list == NULL)
152		status = -ENOMEM;
153	else {
154		INIT_LIST_HEAD(&msg_q->msg_free_list->head);
155		INIT_LIST_HEAD(&msg_q->msg_used_list->head);
156	}
157
158	/*  Create event that will be signalled when a message from
159	 *  the DSP is available. */
160	if (!status) {
161		msg_q->sync_event = kzalloc(sizeof(struct sync_object),
162							GFP_KERNEL);
163		if (msg_q->sync_event)
164			sync_init_event(msg_q->sync_event);
165		else
166			status = -ENOMEM;
167	}
168
169	/* Create a notification list for message ready notification. */
170	if (!status) {
171		msg_q->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
172							GFP_KERNEL);
173		if (msg_q->ntfy_obj)
174			ntfy_init(msg_q->ntfy_obj);
175		else
176			status = -ENOMEM;
177	}
178
179	/*  Create events that will be used to synchronize cleanup
180	 *  when the object is deleted. sync_done will be set to
181	 *  unblock threads in MSG_Put() or MSG_Get(). sync_done_ack
182	 *  will be set by the unblocked thread to signal that it
183	 *  is unblocked and will no longer reference the object. */
184	if (!status) {
185		msg_q->sync_done = kzalloc(sizeof(struct sync_object),
186							GFP_KERNEL);
187		if (msg_q->sync_done)
188			sync_init_event(msg_q->sync_done);
189		else
190			status = -ENOMEM;
191	}
192
193	if (!status) {
194		msg_q->sync_done_ack = kzalloc(sizeof(struct sync_object),
195							GFP_KERNEL);
196		if (msg_q->sync_done_ack)
197			sync_init_event(msg_q->sync_done_ack);
198		else
199			status = -ENOMEM;
200	}
201
202	if (!status) {
203		/* Enter critical section */
204		spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
205		/* Initialize message frames and put in appropriate queues */
206		for (i = 0; i < max_msgs && !status; i++) {
207			status = add_new_msg(hmsg_mgr->msg_free_list);
208			if (!status) {
209				num_allocated++;
210				status = add_new_msg(msg_q->msg_free_list);
211			}
212		}
213		if (status) {
214			/*  Stay inside CS to prevent others from taking any
215			 *  of the newly allocated message frames. */
216			delete_msg_queue(msg_q, num_allocated);
217		} else {
218			lst_put_tail(hmsg_mgr->queue_list,
219				     (struct list_head *)msg_q);
220			*msgq = msg_q;
221			/* Signal that free frames are now available */
222			if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list))
223				sync_set_event(hmsg_mgr->sync_event);
224
225		}
226		/* Exit critical section */
227		spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
228	} else {
229		delete_msg_queue(msg_q, 0);
230	}
231func_end:
232	return status;
233}
234
235/*
236 *  ======== bridge_msg_delete ========
237 *      Delete a msg_ctrl manager allocated in bridge_msg_create().
238 */
239void bridge_msg_delete(struct msg_mgr *hmsg_mgr)
240{
241	if (hmsg_mgr)
242		delete_msg_mgr(hmsg_mgr);
243}
244
245/*
246 *  ======== bridge_msg_delete_queue ========
247 *      Delete a msg_ctrl queue allocated in bridge_msg_create_queue.
248 */
249void bridge_msg_delete_queue(struct msg_queue *msg_queue_obj)
250{
251	struct msg_mgr *hmsg_mgr;
252	u32 io_msg_pend;
253
254	if (!msg_queue_obj || !msg_queue_obj->hmsg_mgr)
255		goto func_end;
256
257	hmsg_mgr = msg_queue_obj->hmsg_mgr;
258	msg_queue_obj->done = true;
259	/*  Unblock all threads blocked in MSG_Get() or MSG_Put(). */
260	io_msg_pend = msg_queue_obj->io_msg_pend;
261	while (io_msg_pend) {
262		/* Unblock thread */
263		sync_set_event(msg_queue_obj->sync_done);
264		/* Wait for acknowledgement */
265		sync_wait_on_event(msg_queue_obj->sync_done_ack, SYNC_INFINITE);
266		io_msg_pend = msg_queue_obj->io_msg_pend;
267	}
268	/* Remove message queue from hmsg_mgr->queue_list */
269	spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
270	lst_remove_elem(hmsg_mgr->queue_list,
271			(struct list_head *)msg_queue_obj);
272	/* Free the message queue object */
273	delete_msg_queue(msg_queue_obj, msg_queue_obj->max_msgs);
274	if (!hmsg_mgr->msg_free_list)
275		goto func_cont;
276	if (LST_IS_EMPTY(hmsg_mgr->msg_free_list))
277		sync_reset_event(hmsg_mgr->sync_event);
278func_cont:
279	spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
280func_end:
281	return;
282}
283
284/*
285 *  ======== bridge_msg_get ========
286 *      Get a message from a msg_ctrl queue.
287 */
288int bridge_msg_get(struct msg_queue *msg_queue_obj,
289			  struct dsp_msg *pmsg, u32 utimeout)
290{
291	struct msg_frame *msg_frame_obj;
292	struct msg_mgr *hmsg_mgr;
293	bool got_msg = false;
294	struct sync_object *syncs[2];
295	u32 index;
296	int status = 0;
297
298	if (!msg_queue_obj || pmsg == NULL) {
299		status = -ENOMEM;
300		goto func_end;
301	}
302
303	hmsg_mgr = msg_queue_obj->hmsg_mgr;
304	if (!msg_queue_obj->msg_used_list) {
305		status = -EFAULT;
306		goto func_end;
307	}
308
309	/* Enter critical section */
310	spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
311	/* If a message is already there, get it */
312	if (!LST_IS_EMPTY(msg_queue_obj->msg_used_list)) {
313		msg_frame_obj = (struct msg_frame *)
314		    lst_get_head(msg_queue_obj->msg_used_list);
315		if (msg_frame_obj != NULL) {
316			*pmsg = msg_frame_obj->msg_data.msg;
317			lst_put_tail(msg_queue_obj->msg_free_list,
318				     (struct list_head *)msg_frame_obj);
319			if (LST_IS_EMPTY(msg_queue_obj->msg_used_list))
320				sync_reset_event(msg_queue_obj->sync_event);
321
322			got_msg = true;
323		}
324	} else {
325		if (msg_queue_obj->done)
326			status = -EPERM;
327		else
328			msg_queue_obj->io_msg_pend++;
329
330	}
331	/* Exit critical section */
332	spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
333	if (!status && !got_msg) {
334		/*  Wait til message is available, timeout, or done. We don't
335		 *  have to schedule the DPC, since the DSP will send messages
336		 *  when they are available. */
337		syncs[0] = msg_queue_obj->sync_event;
338		syncs[1] = msg_queue_obj->sync_done;
339		status = sync_wait_on_multiple_events(syncs, 2, utimeout,
340						      &index);
341		/* Enter critical section */
342		spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
343		if (msg_queue_obj->done) {
344			msg_queue_obj->io_msg_pend--;
345			/* Exit critical section */
346			spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
347			/*  Signal that we're not going to access msg_queue_obj
348			 *  anymore, so it can be deleted. */
349			(void)sync_set_event(msg_queue_obj->sync_done_ack);
350			status = -EPERM;
351		} else {
352			if (!status) {
353				DBC_ASSERT(!LST_IS_EMPTY
354					   (msg_queue_obj->msg_used_list));
355				/* Get msg from used list */
356				msg_frame_obj = (struct msg_frame *)
357				    lst_get_head(msg_queue_obj->msg_used_list);
358				/* Copy message into pmsg and put frame on the
359				 * free list */
360				if (msg_frame_obj != NULL) {
361					*pmsg = msg_frame_obj->msg_data.msg;
362					lst_put_tail
363					    (msg_queue_obj->msg_free_list,
364					     (struct list_head *)
365					     msg_frame_obj);
366				}
367			}
368			msg_queue_obj->io_msg_pend--;
369			/* Reset the event if there are still queued messages */
370			if (!LST_IS_EMPTY(msg_queue_obj->msg_used_list))
371				sync_set_event(msg_queue_obj->sync_event);
372
373			/* Exit critical section */
374			spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
375		}
376	}
377func_end:
378	return status;
379}
380
381/*
382 *  ======== bridge_msg_put ========
383 *      Put a message onto a msg_ctrl queue.
384 */
385int bridge_msg_put(struct msg_queue *msg_queue_obj,
386			  const struct dsp_msg *pmsg, u32 utimeout)
387{
388	struct msg_frame *msg_frame_obj;
389	struct msg_mgr *hmsg_mgr;
390	bool put_msg = false;
391	struct sync_object *syncs[2];
392	u32 index;
393	int status = 0;
394
395	if (!msg_queue_obj || !pmsg || !msg_queue_obj->hmsg_mgr) {
396		status = -ENOMEM;
397		goto func_end;
398	}
399	hmsg_mgr = msg_queue_obj->hmsg_mgr;
400	if (!hmsg_mgr->msg_free_list) {
401		status = -EFAULT;
402		goto func_end;
403	}
404
405	spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
406
407	/* If a message frame is available, use it */
408	if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list)) {
409		msg_frame_obj =
410		    (struct msg_frame *)lst_get_head(hmsg_mgr->msg_free_list);
411		if (msg_frame_obj != NULL) {
412			msg_frame_obj->msg_data.msg = *pmsg;
413			msg_frame_obj->msg_data.msgq_id =
414			    msg_queue_obj->msgq_id;
415			lst_put_tail(hmsg_mgr->msg_used_list,
416				     (struct list_head *)msg_frame_obj);
417			hmsg_mgr->msgs_pending++;
418			put_msg = true;
419		}
420		if (LST_IS_EMPTY(hmsg_mgr->msg_free_list))
421			sync_reset_event(hmsg_mgr->sync_event);
422
423		/* Release critical section before scheduling DPC */
424		spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
425		/* Schedule a DPC, to do the actual data transfer: */
426		iosm_schedule(hmsg_mgr->hio_mgr);
427	} else {
428		if (msg_queue_obj->done)
429			status = -EPERM;
430		else
431			msg_queue_obj->io_msg_pend++;
432
433		spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
434	}
435	if (!status && !put_msg) {
436		/* Wait til a free message frame is available, timeout,
437		 * or done */
438		syncs[0] = hmsg_mgr->sync_event;
439		syncs[1] = msg_queue_obj->sync_done;
440		status = sync_wait_on_multiple_events(syncs, 2, utimeout,
441						      &index);
442		if (status)
443			goto func_end;
444		/* Enter critical section */
445		spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
446		if (msg_queue_obj->done) {
447			msg_queue_obj->io_msg_pend--;
448			/* Exit critical section */
449			spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
450			/*  Signal that we're not going to access msg_queue_obj
451			 *  anymore, so it can be deleted. */
452			(void)sync_set_event(msg_queue_obj->sync_done_ack);
453			status = -EPERM;
454		} else {
455			if (LST_IS_EMPTY(hmsg_mgr->msg_free_list)) {
456				status = -EFAULT;
457				goto func_cont;
458			}
459			/* Get msg from free list */
460			msg_frame_obj = (struct msg_frame *)
461			    lst_get_head(hmsg_mgr->msg_free_list);
462			/*
463			 * Copy message into pmsg and put frame on the
464			 * used list.
465			 */
466			if (msg_frame_obj) {
467				msg_frame_obj->msg_data.msg = *pmsg;
468				msg_frame_obj->msg_data.msgq_id =
469				    msg_queue_obj->msgq_id;
470				lst_put_tail(hmsg_mgr->msg_used_list,
471					     (struct list_head *)msg_frame_obj);
472				hmsg_mgr->msgs_pending++;
473				/*
474				 * Schedule a DPC, to do the actual
475				 * data transfer.
476				 */
477				iosm_schedule(hmsg_mgr->hio_mgr);
478			}
479
480			msg_queue_obj->io_msg_pend--;
481			/* Reset event if there are still frames available */
482			if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list))
483				sync_set_event(hmsg_mgr->sync_event);
484func_cont:
485			/* Exit critical section */
486			spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
487		}
488	}
489func_end:
490	return status;
491}
492
493/*
494 *  ======== bridge_msg_register_notify ========
495 */
496int bridge_msg_register_notify(struct msg_queue *msg_queue_obj,
497				   u32 event_mask, u32 notify_type,
498				   struct dsp_notification *hnotification)
499{
500	int status = 0;
501
502	if (!msg_queue_obj || !hnotification) {
503		status = -ENOMEM;
504		goto func_end;
505	}
506
507	if (!(event_mask == DSP_NODEMESSAGEREADY || event_mask == 0)) {
508		status = -EPERM;
509		goto func_end;
510	}
511
512	if (notify_type != DSP_SIGNALEVENT) {
513		status = -EBADR;
514		goto func_end;
515	}
516
517	if (event_mask)
518		status = ntfy_register(msg_queue_obj->ntfy_obj, hnotification,
519						event_mask, notify_type);
520	else
521		status = ntfy_unregister(msg_queue_obj->ntfy_obj,
522							hnotification);
523
524	if (status == -EINVAL) {
525		/*  Not registered. Ok, since we couldn't have known. Node
526		 *  notifications are split between node state change handled
527		 *  by NODE, and message ready handled by msg_ctrl. */
528		status = 0;
529	}
530func_end:
531	return status;
532}
533
534/*
535 *  ======== bridge_msg_set_queue_id ========
536 */
537void bridge_msg_set_queue_id(struct msg_queue *msg_queue_obj, u32 msgq_id)
538{
539	/*
540	 *  A message queue must be created when a node is allocated,
541	 *  so that node_register_notify() can be called before the node
542	 *  is created. Since we don't know the node environment until the
543	 *  node is created, we need this function to set msg_queue_obj->msgq_id
544	 *  to the node environment, after the node is created.
545	 */
546	if (msg_queue_obj)
547		msg_queue_obj->msgq_id = msgq_id;
548}
549
550/*
551 *  ======== add_new_msg ========
552 *      Must be called in message manager critical section.
553 */
554static int add_new_msg(struct lst_list *msg_list)
555{
556	struct msg_frame *pmsg;
557	int status = 0;
558
559	pmsg = kzalloc(sizeof(struct msg_frame), GFP_ATOMIC);
560	if (pmsg != NULL) {
561		lst_init_elem((struct list_head *)pmsg);
562		lst_put_tail(msg_list, (struct list_head *)pmsg);
563	} else {
564		status = -ENOMEM;
565	}
566
567	return status;
568}
569
570/*
571 *  ======== delete_msg_mgr ========
572 */
573static void delete_msg_mgr(struct msg_mgr *hmsg_mgr)
574{
575	if (!hmsg_mgr)
576		goto func_end;
577
578	if (hmsg_mgr->queue_list) {
579		if (LST_IS_EMPTY(hmsg_mgr->queue_list)) {
580			kfree(hmsg_mgr->queue_list);
581			hmsg_mgr->queue_list = NULL;
582		}
583	}
584
585	if (hmsg_mgr->msg_free_list) {
586		free_msg_list(hmsg_mgr->msg_free_list);
587		hmsg_mgr->msg_free_list = NULL;
588	}
589
590	if (hmsg_mgr->msg_used_list) {
591		free_msg_list(hmsg_mgr->msg_used_list);
592		hmsg_mgr->msg_used_list = NULL;
593	}
594
595	kfree(hmsg_mgr->sync_event);
596
597	kfree(hmsg_mgr);
598func_end:
599	return;
600}
601
602/*
603 *  ======== delete_msg_queue ========
604 */
605static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp)
606{
607	struct msg_mgr *hmsg_mgr;
608	struct msg_frame *pmsg;
609	u32 i;
610
611	if (!msg_queue_obj ||
612	    !msg_queue_obj->hmsg_mgr || !msg_queue_obj->hmsg_mgr->msg_free_list)
613		goto func_end;
614
615	hmsg_mgr = msg_queue_obj->hmsg_mgr;
616
617	/* Pull off num_to_dsp message frames from Msg manager and free */
618	for (i = 0; i < num_to_dsp; i++) {
619
620		if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list)) {
621			pmsg = (struct msg_frame *)
622			    lst_get_head(hmsg_mgr->msg_free_list);
623			kfree(pmsg);
624		} else {
625			/* Cannot free all of the message frames */
626			break;
627		}
628	}
629
630	if (msg_queue_obj->msg_free_list) {
631		free_msg_list(msg_queue_obj->msg_free_list);
632		msg_queue_obj->msg_free_list = NULL;
633	}
634
635	if (msg_queue_obj->msg_used_list) {
636		free_msg_list(msg_queue_obj->msg_used_list);
637		msg_queue_obj->msg_used_list = NULL;
638	}
639
640	if (msg_queue_obj->ntfy_obj) {
641		ntfy_delete(msg_queue_obj->ntfy_obj);
642		kfree(msg_queue_obj->ntfy_obj);
643	}
644
645	kfree(msg_queue_obj->sync_event);
646	kfree(msg_queue_obj->sync_done);
647	kfree(msg_queue_obj->sync_done_ack);
648
649	kfree(msg_queue_obj);
650func_end:
651	return;
652
653}
654
655/*
656 *  ======== free_msg_list ========
657 */
658static void free_msg_list(struct lst_list *msg_list)
659{
660	struct msg_frame *pmsg;
661
662	if (!msg_list)
663		goto func_end;
664
665	while ((pmsg = (struct msg_frame *)lst_get_head(msg_list)) != NULL)
666		kfree(pmsg);
667
668	DBC_ASSERT(LST_IS_EMPTY(msg_list));
669
670	kfree(msg_list);
671func_end:
672	return;
673}
674