1/*
2 * Copyright (c) 2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * 1.  Redistributions of source code must retain the above copyright
11 *     notice, this list of conditions and the following disclaimer.
12 * 2.  Redistributions in binary form must reproduce the above copyright
13 *     notice, this list of conditions and the following disclaimer in the
14 *     documentation and/or other materials provided with the distribution.
15 * 3.  Neither the name of Apple Inc. ("Apple") nor the names of its
16 *     contributors may be used to endorse or promote products derived from
17 *     this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * Portions of this software have been released under the following terms:
31 *
32 * (c) Copyright 1989-1993 OPEN SOFTWARE FOUNDATION, INC.
33 * (c) Copyright 1989-1993 HEWLETT-PACKARD COMPANY
34 * (c) Copyright 1989-1993 DIGITAL EQUIPMENT CORPORATION
35 *
36 * To anyone who acknowledges that this file is provided "AS IS"
37 * without any express or implied warranty:
38 * permission to use, copy, modify, and distribute this file for any
39 * purpose is hereby granted without fee, provided that the above
40 * copyright notices and this notice appears in all source code copies,
41 * and that none of the names of Open Software Foundation, Inc., Hewlett-
42 * Packard Company or Digital Equipment Corporation be used
43 * in advertising or publicity pertaining to distribution of the software
44 * without specific, written prior permission.  Neither Open Software
45 * Foundation, Inc., Hewlett-Packard Company nor Digital
46 * Equipment Corporation makes any representations about the suitability
47 * of this software for any purpose.
48 *
49 * Copyright (c) 2007, Novell, Inc. All rights reserved.
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 *
54 * 1.  Redistributions of source code must retain the above copyright
55 *     notice, this list of conditions and the following disclaimer.
56 * 2.  Redistributions in binary form must reproduce the above copyright
57 *     notice, this list of conditions and the following disclaimer in the
58 *     documentation and/or other materials provided with the distribution.
59 * 3.  Neither the name of Novell Inc. nor the names of its contributors
60 *     may be used to endorse or promote products derived from this
61 *     this software without specific prior written permission.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
64 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
65 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
66 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY
67 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
68 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
69 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
70 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
71 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
72 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
73 *
74 * @APPLE_LICENSE_HEADER_END@
75 */
76
77/*
78**
79**  NAME:
80**
81**      dgslive.c
82**
83**  FACILITY:
84**
85**      Remote Procedure Call (RPC)
86**
87**  ABSTRACT:
88**
89**  Routines for monitoring liveness of clients.
90**
91**
92*/
93
94#include <dg.h>
95#include <dgsct.h>
96#include <dgslive.h>
97
98#include <dce/convc.h>
99#include <dce/conv.h>
100
101/* ========================================================================= */
102
103INTERNAL void network_monitor_liveness    (void);
104
105/* ========================================================================= */
106
107/*
108 * Number of seconds before declaring a monitored client dead.
109 */
110
111#define LIVE_TIMEOUT_INTERVAL   120
112
113/*
114 * The client table is a hash table with seperate chaining, used by the
115 * server runtime to keep track of client processes which it has been
116 * asked to monitor.
117 *
118 * This table is protected by the global lock.
119 */
120
121#define CLIENT_TABLE_SIZE 29    /* must be prime */
122
123INTERNAL rpc_dg_client_rep_p_t client_table[CLIENT_TABLE_SIZE];
124
125#define CLIENT_HASH_PROBE(cas_uuid, st) \
126    (rpc__dg_uuid_hash(cas_uuid) % CLIENT_TABLE_SIZE)
127
128/*
129 * static variables associated with running a client monitoring thread.
130 *
131 * All are protected by the monitor_mutex lock.
132 */
133
134INTERNAL rpc_mutex_t    monitor_mutex;
135INTERNAL rpc_cond_t     monitor_cond;
136INTERNAL dcethread*  monitor_task;
137INTERNAL boolean    monitor_running = false;
138INTERNAL boolean    monitor_was_running = false;
139INTERNAL boolean    stop_monitor = false;
140INTERNAL unsigned32 active_monitors = 0;
141
142/* ========================================================================= */
143
144/*
145 * F I N D _ C L I E N T
146 *
147 * Utility routine for looking up a client handle, by UUID, in the
148 * global client_rep table.
149 */
150
151INTERNAL rpc_dg_client_rep_p_t find_client (
152        uuid_p_t /*cas_uuid*/
153    );
154
155INTERNAL rpc_dg_client_rep_p_t find_client
156(
157    uuid_p_t cas_uuid
158)
159{
160    rpc_dg_client_rep_p_t client;
161    unsigned16 probe;
162    unsigned32 st;
163
164    probe = CLIENT_HASH_PROBE(cas_uuid, &st);
165    client = client_table[probe];
166
167    while (client != NULL)
168    {
169        if (uuid_equal(cas_uuid, &client->cas_uuid, &st))
170            return(client);
171        client = client->next;
172    }
173    return(NULL);
174}
175
176/*
177 * R P C _ _ D G _ N E T W O R K _ M O N
178 *
179 * This routine is called, via the network listener service, by a server
180 * stub which needs to maintain context for a particular client.  The
181 * client handle is provided, and in the event that the connection to
182 * the client is lost, that handle will be presented to the rundown routine
183 * specified.
184 *
185 * The actual client rep structure is created during the call to
186 * binding_inq_client and is stored in a global table at that time.  When
187 * successful, this routine merely associates a rundown function pointer
188 * with the appropriate client rep structure in the table.
189 */
190
191PRIVATE void rpc__dg_network_mon
192(
193    rpc_binding_rep_p_t binding_r ATTRIBUTE_UNUSED,
194    rpc_client_handle_t client_h,
195    rpc_network_rundown_fn_t rundown,
196    unsigned32 *st
197)
198{
199    rpc_dg_client_rep_p_t ptr, client = (rpc_dg_client_rep_p_t) client_h;
200    unsigned16 probe;
201    uuid_p_t cas_uuid = (uuid_p_t) &client->cas_uuid;
202
203    RPC_MUTEX_LOCK(monitor_mutex);
204
205    /*
206     * Hash into the client rep table based on the handle's UUID.
207     * Scan the chain to find the client handle.
208     */
209
210    probe = CLIENT_HASH_PROBE(cas_uuid, st);
211    ptr = client_table[probe];
212
213    while (ptr != NULL)
214    {
215        if (ptr == client)
216            break;
217        ptr = ptr->next;
218    }
219
220    /*
221     * If the handle passed in is not in the table, it must be bogus.
222     * Also, make sure that we are not already monitoring this client,
223     * indicated by a non-NULL rundown routine pointer.
224     */
225
226    if (ptr == NULL || ptr->rundown != NULL)
227    {
228        *st = -1;         /* !!! Need a real error value */
229        RPC_MUTEX_UNLOCK(monitor_mutex);
230        return;
231    }
232
233    /*
234     * (Re)initialize the table entry, and bump the count of active monitors.
235     */
236
237    client->rundown  = rundown;
238    client->last_update = rpc__clock_stamp();
239    active_monitors++;
240
241    /*
242     * Last, make sure that the monitor timer routine is running.
243     */
244
245    if (! monitor_running)
246    {
247        monitor_running = true;
248        dcethread_create_throw(&monitor_task, NULL,
249            (dcethread_startroutine) network_monitor_liveness,
250            NULL);
251    }
252
253    *st = rpc_s_ok;
254    RPC_MUTEX_UNLOCK(monitor_mutex);
255}
256
257/*
258 * R P C _ _ D G _ N E T W O R K _ S T O P _ M O N
259 *
260 * This routine is called, via the network listener service, by a server stub
261 * when it wishes to discontinue maintaining context for a particular client.
262 * The client will no longer be monitored if the rundown function pointer
263 * is set to NULL.  The actual client handle structure is maintained, with
264 * reference from the SCTE, to avoid doing another callback if the client
265 * needs to be monitored again.
266 */
267
268PRIVATE void rpc__dg_network_stop_mon
269(
270    rpc_binding_rep_p_t binding_r ATTRIBUTE_UNUSED,
271    rpc_client_handle_t client_h,
272    unsigned32 *st
273)
274{
275    rpc_dg_client_rep_p_t client = (rpc_dg_client_rep_p_t) client_h;
276    rpc_dg_client_rep_p_t ptr;
277    uuid_p_t cas_uuid = &client->cas_uuid;
278    unsigned16 probe;
279
280    RPC_MUTEX_LOCK(monitor_mutex);
281
282    /*
283     * Hash into the client rep table based on the client handle's UUID.
284     */
285
286    probe = CLIENT_HASH_PROBE(cas_uuid, st);
287    ptr = client_table[probe];
288
289    /*
290     * Scan down the hash chain, looking for the reference to the client
291     * handle
292     */
293
294    while (ptr != NULL) {
295        if (ptr == client)
296        {
297            /*
298             * To stop monitoring a client handle requires only that
299             * the rundown function pointer be set to NULL.
300             */
301
302            if (client->rundown != NULL)
303            {
304                client->rundown = NULL;
305                active_monitors--;
306            }
307            RPC_MUTEX_UNLOCK(monitor_mutex);
308            *st = rpc_s_ok;
309            return;
310        }
311        ptr = ptr->next;
312    }
313
314    *st = -1;               /* !!! attempt to remove unmonitored client */
315    RPC_MUTEX_UNLOCK(monitor_mutex);
316}
317
318/*
319 * N E T W O R K _ M O N I T O R _ L I V E N E S S
320 *
321 * This routine runs as the base routine of a thread; it periodically
322 * checks for lost client connections.  We can't run this routine from
323 * the timer queue (and thread) because it calls out to the application
324 * (stub) rundown routines and we can't tie up the timer while we do
325 * that.
326 */
327
328INTERNAL void network_monitor_liveness(void)
329{
330    rpc_dg_client_rep_p_t client;
331    unsigned32 i;
332    struct timespec next_ts;
333
334    RPC_DBG_PRINTF(rpc_e_dbg_conv_thread, 1,
335                   ("(network_monitor_liveness) starting up...\n"));
336
337    RPC_MUTEX_LOCK(monitor_mutex);
338
339    while (stop_monitor == false)
340    {
341        /*
342         * Awake every 60 seconds.
343         */
344        rpc__clock_timespec(rpc__clock_stamp()+60, &next_ts);
345
346        RPC_COND_TIMED_WAIT(monitor_cond, monitor_mutex, &next_ts);
347        if (stop_monitor == true)
348            break;
349
350        for (i = 0; i < CLIENT_TABLE_SIZE; i++)
351        {
352            client = client_table[i];
353
354            while (client != NULL && active_monitors != 0)
355            {
356                if (client->rundown != NULL &&
357                    rpc__clock_aged(client->last_update,
358                                    RPC_CLOCK_SEC(LIVE_TIMEOUT_INTERVAL)))
359                {
360                    /*
361                     * If the timer has expired, call the rundown routine.
362                     * Stop monitoring the client handle by setting its rundown
363                     * routine pointer to NULL.
364                     */
365
366                    RPC_DBG_PRINTF(rpc_e_dbg_general, 3,
367                        ("(network_monitor_liveness_timer) Calling rundown function\n"));
368
369                    RPC_MUTEX_UNLOCK(monitor_mutex);
370                    (*client->rundown)((rpc_client_handle_t)client);
371                    RPC_MUTEX_LOCK(monitor_mutex);
372
373                    /*
374                     * The monitor is no longer active.
375                     */
376                    client->rundown = NULL;
377                    active_monitors--;
378                }
379                client = client->next;
380            }
381
382            if (active_monitors == 0)
383            {
384                /*
385                 * While we were executing the rundown function and opened the
386                 * mutex, the fork handler might try to stop us.
387                 */
388                if (stop_monitor == true)
389                    break;
390                /*
391                 * Nothing left to monitor, so terminate the thread.
392                 */
393                dcethread_detach_throw(monitor_task);
394                monitor_running = false;
395                RPC_DBG_PRINTF(rpc_e_dbg_conv_thread, 1,
396                    ("(network_monitor_liveness) shutting down (no active)...\n"));
397                RPC_MUTEX_UNLOCK(monitor_mutex);
398                return;
399            }
400        }
401    }
402    RPC_DBG_PRINTF(rpc_e_dbg_conv_thread, 1,
403                   ("(network_monitor_liveness) shutting down...\n"));
404
405    RPC_MUTEX_UNLOCK(monitor_mutex);
406}
407
408/*
409 * R P C _ _ D G _ C O N V C _ I N D Y
410 *
411 * Server manager routine for monitoring the liveness of clients.
412 */
413
414PRIVATE void rpc__dg_convc_indy
415(
416    idl_uuid_t *cas_uuid
417)
418{
419    rpc_dg_client_rep_p_t client;
420
421    RPC_MUTEX_LOCK(monitor_mutex);
422
423    client = find_client(cas_uuid);
424
425    if (client != NULL)
426    {
427        client->last_update = rpc__clock_stamp();
428    }
429    RPC_MUTEX_UNLOCK(monitor_mutex);
430}
431
432/*
433 * R P C _ _ D G _ B I N D I N G _ I N Q _ C L I E N T
434 *
435 * Inquire what client address space a binding handle refers to.
436 */
437
438PRIVATE void rpc__dg_binding_inq_client
439(
440    rpc_binding_rep_p_t binding_r,
441    rpc_client_handle_t *client_h,
442    unsigned32 *st
443)
444{
445    rpc_dg_binding_server_p_t shand = (rpc_dg_binding_server_p_t) binding_r;
446    rpc_dg_scall_p_t scall = shand->scall;
447    rpc_binding_handle_t h;
448    idl_uuid_t cas_uuid;
449    rpc_dg_client_rep_p_t client;
450    unsigned32 temp_seq, tst;
451
452    *st = rpc_s_ok;
453
454    /*
455     * Lock down and make sure we're in an OK state.
456     */
457
458    RPC_LOCK(0);
459    RPC_DG_CALL_LOCK(&scall->c);
460
461    if (scall->c.state == rpc_e_dg_cs_orphan)
462    {
463        *st = rpc_s_call_orphaned;
464        RPC_DG_CALL_UNLOCK(&scall->c);
465        RPC_UNLOCK(0);
466        return;
467    }
468
469    /*
470     * See if there is already a client handle associated with the scte
471     * associated with this server binding handle.  If there is, just
472     * return it.
473     */
474
475    if (scall->scte->client != NULL)
476    {
477        *client_h = (rpc_client_handle_t) scall->scte->client;
478        RPC_DG_CALL_UNLOCK(&scall->c);
479        RPC_UNLOCK(0);
480        return;
481    }
482
483    /*
484     * No client handle.  We need to do a call back to obtain a UUID
485     * uniquely identifying this particular instance of the client.
486     */
487
488    h = rpc__dg_sct_make_way_binding(scall->scte, st);
489
490    RPC_DG_CALL_UNLOCK(&scall->c);
491    RPC_UNLOCK(0);
492
493    if (h == NULL)
494    {
495        return;
496    }
497
498    RPC_DBG_PRINTF(rpc_e_dbg_general, 3,
499        ("(binding_inq_client) Doing whats-your-proc-id callback\n"));
500
501    DCETHREAD_TRY
502    {
503        (*conv_v3_0_c_epv.conv_who_are_you2)
504            (h, &scall->c.call_actid, rpc_g_dg_server_boot_time,
505            &temp_seq, &cas_uuid, st);
506    }
507    DCETHREAD_CATCH_ALL(THIS_CATCH)
508    {
509        *st = rpc_s_who_are_you_failed;
510    }
511    DCETHREAD_ENDTRY
512
513    rpc_binding_free(&h, &tst);
514
515    if (*st != rpc_s_ok)
516        return;
517
518    /*
519     * Check to see if the UUID returned has already been built into
520     * a client handle associated with another scte.  Since we have no
521     * way of mapping actids to processes, we can't know that two actid
522     * are in the same address space until we get the same address space
523     * UUID from both.  In this case it is necessary to use the same
524     * client handle for both actids.
525     */
526
527    RPC_LOCK(0);
528    RPC_DG_CALL_LOCK(&scall->c);
529
530    if (scall->c.state == rpc_e_dg_cs_orphan)
531    {
532        *st = rpc_s_call_orphaned;
533        RPC_DG_CALL_UNLOCK(&scall->c);
534        RPC_UNLOCK(0);
535        return;
536    }
537
538    RPC_MUTEX_LOCK(monitor_mutex);
539
540    client = find_client(&cas_uuid);
541
542    if (client != NULL)
543    {
544        client->refcnt++;
545        scall->scte->client = client;
546    }
547    else
548    {
549        /*
550         * If not, alloc up a client handle structure and thread
551         * it onto the table.
552         */
553
554        unsigned16 probe;
555
556        probe = CLIENT_HASH_PROBE(&cas_uuid, st);
557
558        RPC_MEM_ALLOC(client, rpc_dg_client_rep_p_t, sizeof *client,
559            RPC_C_MEM_DG_CLIENT_REP, RPC_C_MEM_NOWAIT);
560
561        client->next = client_table[probe];
562        client->rundown = NULL;
563        client->last_update = 0;
564        client->cas_uuid = cas_uuid;
565
566        client_table[probe] = client;
567        scall->scte->client = client;
568        client->refcnt = 2;
569    }
570
571    RPC_MUTEX_UNLOCK(monitor_mutex);
572    RPC_DG_CALL_UNLOCK(&scall->c);
573    RPC_UNLOCK(0);
574
575    *client_h = (rpc_client_handle_t) client;
576}
577
578/*
579 * R P C _ _ D G _ M O N I T O R _  I N I T
580 *
581 * This routine performs any initializations required for the network
582 * listener service maintain/monitor functions.
583 */
584
585PRIVATE void rpc__dg_monitor_init(void)
586{
587
588    /*
589     * Initialize the count of handles currently being monitored.
590     */
591
592    active_monitors = 0;
593    monitor_running = false;
594    monitor_was_running = false;
595    stop_monitor = false;
596    RPC_MUTEX_INIT(monitor_mutex);
597    RPC_COND_INIT(monitor_cond, monitor_mutex);
598}
599
600#ifdef ATFORK_SUPPORTED
601/*
602 * R P C _ _ D G _ M O N I T O R _ F O R K
603 *
604 * Handle fork related processing for this module.
605 */
606
607PRIVATE void rpc__dg_monitor_fork_handler
608(
609    rpc_fork_stage_id_t stage
610)
611{
612    unsigned32 i;
613    unsigned32 st;
614
615    switch ((int)stage)
616    {
617    case RPC_C_PREFORK:
618        RPC_MUTEX_LOCK(monitor_mutex);
619        monitor_was_running = false;
620
621        if (monitor_running)
622        {
623            stop_monitor = true;
624            RPC_COND_SIGNAL(monitor_cond, monitor_mutex);
625            RPC_MUTEX_UNLOCK(monitor_mutex);
626            dcethread_join_throw (monitor_task, (void **) &st);
627            RPC_MUTEX_LOCK(monitor_mutex); /* FIXME: wtf
628				DCETHREAD_TRY	{
629		dcethread_detach_throw(monitor_task);
630				}
631				DCETHREAD_CATCH(dcethread_use_error_e)
632				{}
633				DCETHREAD_ENDTRY; */
634            monitor_running = false;
635            /*
636             * The monitor thread may have nothing to do.
637             */
638            if (active_monitors != 0)
639                monitor_was_running = true;
640            stop_monitor = false;
641        }
642        break;
643    case RPC_C_POSTFORK_PARENT:
644        if (monitor_was_running)
645        {
646            monitor_was_running = false;
647            monitor_running = true;
648            stop_monitor = false;
649            dcethread_create_throw(&monitor_task, NULL,
650                           (dcethread_startroutine) network_monitor_liveness,
651                           NULL);
652        }
653        RPC_MUTEX_UNLOCK(monitor_mutex);
654        break;
655    case RPC_C_POSTFORK_CHILD:
656        monitor_was_running = false;
657        monitor_running = false;
658        stop_monitor = false;
659
660        /*
661         * Initialize the count of handles currently being monitored.
662         */
663
664        active_monitors = 0;
665        for (i = 0; i < CLIENT_TABLE_SIZE; i++)
666            client_table[i] = NULL;
667
668        RPC_MUTEX_UNLOCK(monitor_mutex);
669        break;
670    }
671}
672#endif /* ATFORK_SUPPORTED */
673
674/*
675 * R P C _ _ D G _ C L I E N T _ F R E E
676 *
677 * This routine frees the memory associated with a client handle (created
678 * for the purpose of monitoring client liveness).  It is called by the
679 * the RPC_DG_CLIENT_RELEASE macro when the last scte which refers to this
680 * client handle is freed.  The client handle is also removed from the
681 * client table.
682 */
683
684PRIVATE void rpc__dg_client_free
685(
686    rpc_client_handle_t client_h
687)
688{
689    unsigned16 probe;
690    rpc_dg_client_rep_p_t client = (rpc_dg_client_rep_p_t) client_h;
691    rpc_dg_client_rep_p_t ptr, prev = NULL;
692
693    RPC_MUTEX_LOCK(monitor_mutex);
694
695    /*
696     * Hash into the client rep table based on the client handle's UUID.
697     */
698
699    probe = CLIENT_HASH_PROBE(&client->cas_uuid, &st);
700    ptr = client_table[probe];
701
702    /*
703     * Scan down the hash chain, looking for the reference to the client
704     * handle
705     */
706
707    while (ptr != NULL)
708    {
709        if (ptr == client)
710        {
711            if (prev == NULL)
712                client_table[probe] = ptr->next;
713            else
714                prev->next = ptr->next;
715
716            RPC_MEM_FREE(client, RPC_C_MEM_DG_CLIENT_REP);
717
718            RPC_DBG_PRINTF(rpc_e_dbg_general, 3,
719                ("(client_free) Freeing client handle\n"));
720
721            RPC_MUTEX_UNLOCK(monitor_mutex);
722            return;
723        }
724
725        prev = ptr;
726        ptr = ptr->next;
727    }
728    RPC_MUTEX_UNLOCK(monitor_mutex);
729}
730