1/*
2 * Copyright (c) 2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * 1.  Redistributions of source code must retain the above copyright
11 *     notice, this list of conditions and the following disclaimer.
12 * 2.  Redistributions in binary form must reproduce the above copyright
13 *     notice, this list of conditions and the following disclaimer in the
14 *     documentation and/or other materials provided with the distribution.
15 * 3.  Neither the name of Apple Inc. ("Apple") nor the names of its
16 *     contributors may be used to endorse or promote products derived from
17 *     this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * Portions of this software have been released under the following terms:
31 *
32 * (c) Copyright 1989-1993 OPEN SOFTWARE FOUNDATION, INC.
33 * (c) Copyright 1989-1993 HEWLETT-PACKARD COMPANY
34 * (c) Copyright 1989-1993 DIGITAL EQUIPMENT CORPORATION
35 *
36 * To anyone who acknowledges that this file is provided "AS IS"
37 * without any express or implied warranty:
38 * permission to use, copy, modify, and distribute this file for any
39 * purpose is hereby granted without fee, provided that the above
40 * copyright notices and this notice appears in all source code copies,
41 * and that none of the names of Open Software Foundation, Inc., Hewlett-
42 * Packard Company or Digital Equipment Corporation be used
43 * in advertising or publicity pertaining to distribution of the software
44 * without specific, written prior permission.  Neither Open Software
45 * Foundation, Inc., Hewlett-Packard Company nor Digital
46 * Equipment Corporation makes any representations about the suitability
47 * of this software for any purpose.
48 *
49 * Copyright (c) 2007, Novell, Inc. All rights reserved.
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 *
54 * 1.  Redistributions of source code must retain the above copyright
55 *     notice, this list of conditions and the following disclaimer.
56 * 2.  Redistributions in binary form must reproduce the above copyright
57 *     notice, this list of conditions and the following disclaimer in the
58 *     documentation and/or other materials provided with the distribution.
59 * 3.  Neither the name of Novell Inc. nor the names of its contributors
60 *     may be used to endorse or promote products derived from this
61 *     this software without specific prior written permission.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
64 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
65 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
66 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY
67 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
68 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
69 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
70 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
71 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
72 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
73 *
74 * @APPLE_LICENSE_HEADER_END@
75 */
76
77/*
78**
79**  NAME:
80**
81**      dgexec.c
82**
83**  FACILITY:
84**
85**      Remote Procedure Call (RPC)
86**
87**  ABSTRACT:
88**
89**  DG protocol service routines.
90**
91**
92*/
93
94/* ========================================================================= */
95
96#include <commonp.h>
97#include <dg.h>
98#include <dgpkt.h>
99#include <dgrq.h>
100#include <dgxq.h>
101#include <dgsct.h>
102#include <dgexec.h>
103#include <dgscall.h>
104#include <dgcall.h>
105#include <dghnd.h>
106#include <comcthd.h>
107#include <comauth.h>
108
109/* ========================================================================= */
110
111INTERNAL void queue_mapped_reject (
112        rpc_dg_scall_p_t  /*scall*/,
113        unsigned32        /*st*/
114    );
115
116/* ========================================================================= */
117/*
118 * Declare a global pointer to a routine that can handle calls to
119 * pre-v2 server stubs.  If the compatibility library is used to
120 * register pre-v2 interfaces, then it will initialize this
121 * pointer to the appropriate routine.
122 */
123
124GLOBAL  rpc__dg_pre_v2_server_fn_t rpc_g_dg_pre_v2_server_call_p = NULL;
125
126/* ========================================================================= */
127
128/*
129 * Q U E U E _ M A P P E D _ R E J E C T
130 *
131 * This routine is used by rpc__dg_execute_call when it receives an error
132 * status back from a stub.  First, we translate from the rpc_s_* error
133 * code to an equivalent nca_s_* code, and then we reinitialize the xmitq,
134 * (just like rpc__dg_call_fault()) and queue a reject packet.  Normal
135 * output processing will then be used to send this reject packet to
136 * the client.  Note that a "orphan induced" reject will never make
137 * it to the client (hence we don't really care what the stcode mapping is).
138 * We specify the mapping just so that we don't get spurious messages.
139 */
140
141INTERNAL void queue_mapped_reject
142(
143    rpc_dg_scall_p_t scall,
144    unsigned32 st
145)
146{
147    rpc_iovector_t iovec;
148    unsigned32 tst, mst;
149
150    switch ((int)st)
151    {
152        case rpc_s_who_are_you_failed:
153                                        mst = nca_s_who_are_you_failed;  break;
154        case rpc_s_comm_failure:        mst = nca_s_comm_failure;        break;
155        case rpc_s_unknown_if:          mst = nca_s_unk_if;              break;
156        case rpc_s_protocol_error:      mst = nca_s_proto_error;         break;
157        case rpc_s_unsupported_type:    mst = nca_s_unsupported_type;    break;
158        case rpc_s_manager_not_entered: mst = nca_s_manager_not_entered; break;
159        case rpc_s_op_rng_error:        mst = nca_s_op_rng_error;        break;
160        case rpc_s_call_orphaned:       mst = nca_s_unspec_reject;       break;
161        case rpc_s_unknown_reject:      mst = nca_s_unspec_reject;       break;
162        case rpc_s_unknown_mgr_type:    mst = nca_s_unsupported_type;    break;
163        default:
164            RPC_DBG_GPRINTF(("(queue_mapped_reject) unknown status; st=0x%x\n", st));
165            mst = nca_s_unspec_reject;
166            break;
167    }
168
169    /*
170     * Build the iovector for calling transmit_int
171     */
172
173    iovec.num_elt = 1;
174    iovec.elt[0].buff_dealloc = NULL;
175    iovec.elt[0].flags = rpc_c_iovector_elt_reused;
176    iovec.elt[0].data_addr = (byte_p_t) &mst;
177    iovec.elt[0].data_len = sizeof(st);
178
179    RPC_DG_CALL_LOCK_ASSERT(&scall->c);
180
181    /*
182     * Purge the recvq since it won't be used after this.  The recvq
183     * may currently have lots of rqes on it and freeing it now will
184     * help pkt rationing.  It's likely that the recvq is already empty
185     * however, this is the slow path so do it (again) just to be sure.
186     */
187
188    rpc__dg_recvq_free(&scall->c.rq);
189
190    /*
191     * Toss any pending xmitq pkts and add the fault_info to the xmit
192     * queue just as if it were a response (but whack the proto pkt header
193     * to the fault pkt type).  The call will now be in the xmit state if it
194     * wasn't already there.  Defer the sending of the fault until
195     * the "end of the call" (execute_call).  This prevents the client
196     * from receiving the complete response, completing the call and
197     * generating a new one while the server still thinks the call is
198     * not complete (thinking it must have dropped an ack,...).  The
199     * fault is really just a special response pkt.
200     *
201     * This routine is called by the sstub (the thread executing the
202     * call) so there's no need to signal the call.  We don't actually
203     * want the call's status to be set to a error value; the server
204     * runtime wants to still complete processing the call which involves
205     * sending the fault response to the client (instead of any further
206     * data response).
207     *
208     * Subsequent fault response retransmissions will occur just as if
209     * this were a "normal" call response as well as in reply to a ping.
210     * Of course, faults for idempotent calls don't get remembered or
211     * retransmitted.
212     */
213
214    RPC_DBG_GPRINTF(("(queue_mapped_reject) st=0x%x => 0x%x [%s]\n",
215        st, mst, rpc__dg_act_seq_string(&scall->c.xq.hdr)));
216
217    RPC_DG_XMITQ_REINIT(&scall->c.xq, &scall->c);
218    RPC_DG_HDR_SET_PTYPE(&scall->c.xq.hdr, RPC_C_DG_PT_REJECT);
219
220    rpc__dg_call_transmit_int(&scall->c, &iovec, &tst);
221    /*
222     * The transmit may fail because the call is already orphaned.
223     * It may fail for some other reason as well.  In either case,
224     * we're not gonna get a response to the client.  Just keep
225     * falling through (other calls may fail as well) and clean up.
226     */
227}
228
229/*
230 * R P C _ _ D G _ E X E C U T E _ C A L L
231 *
232 * Perform final validation of the request (including a callback if
233 * necessary).  If everything checks out, do what's necessary to dispatch
234 * to the server stub.
235 */
236
237PRIVATE void rpc__dg_execute_call
238(
239    dce_pointer_t scall_,
240    boolean32 call_was_queued ATTRIBUTE_UNUSED
241)
242{
243    ndr_format_t drep;
244    unsigned32 st, reject_st;
245    boolean broadcast;
246    boolean idem = false;
247    boolean maybe;
248    boolean sent_response;
249    boolean called_stub;
250    rpc_dg_scall_p_t scall = (rpc_dg_scall_p_t) scall_;
251    rpc_dg_pkt_hdr_p_t hdrp;
252    rpc_iovector_elt_t iove;
253    rpc_dg_recvq_elt_p_t rqe;
254    unsigned16 ihint;
255    rpc_dg_binding_server_p_t h;
256    rpc_v2_server_stub_epv_t ss_epv;
257    rpc_mgr_epv_t mgr_epv;
258    rpc_if_rep_p_t ifspec;
259    idl_uuid_t type;
260    int force_way_auth;
261    rpc_key_info_p_t key_info;
262    rpc_dg_auth_epv_p_t auth_epv;
263    unsigned16 opnum;
264    unsigned32 flags;
265    unsigned32 max_calls;
266    unsigned32 max_rpc_size;
267    rpc_if_callback_fn_t if_callback;
268    int prev_cancel_state;
269
270    /*
271     * All of this code (99% of which is never executed) is in the fast path.
272     *
273     * NOTE: This routine is responsible for sending back a correct
274     * cancel pending status to the client under all conditions
275     * (to ensure that cancels don't get lost - i.e. forwarded to the
276     * server, accepted, not delivered and then not reported as
277     * a cancel pending).
278     *
279     * Any "reject response" to the client must be robust (at least for
280     * Non-Idempotent calls).  This is necessary because the client may
281     * have already received a fack causing it to free some pkts that it
282     * would need to "rerun" the call (assuming the stub was never entered)
283     * in the event that a reject was lost.
284     *
285     * Client's recover from lost responses to idempotent calls (including
286     * proper cancel pending resetting) so we don't have to worry about
287     * being robust in this situation.
288     */
289
290    /*
291     * The caller of this routine is responsible for handing off a
292     * call *reference* to us.  We will release our reference when
293     * we're done.
294     */
295
296    RPC_DG_CALL_LOCK_ASSERT(&scall->c);
297
298    /*
299     * We are now executing.
300     */
301    scall->call_is_queued = false;
302
303    /*
304     * Initialize the iove, since in any failure case (i.e. orphan),
305     * it may not be updated correctly; subsequent logic depends on freeing
306     * things based on the proper state of the iove.
307     */
308
309    iove.buff_dealloc = NULL;
310
311    /*
312     * Initialize the "called_stub" flag to false.  If a call gets
313     * rejected, and never enters the stub routine, it's up to us to
314     * free the request RQE.
315     */
316    called_stub = false;
317
318    /*
319     * Initialize the "sent call response" flag to indicate a failure.
320     * This is necessary so that failures resulting in END_OF_CALL
321     * end up transitioning to the proper call state when we wrap-up
322     * call processing (at the end of this routine).
323     */
324    sent_response = false;
325
326    /*
327     * Before continuing, it's likely that the call has been "opened
328     * up" (due to a unlock/lock around call executor handoff) and we
329     * need to check if it is safe to continue...
330     */
331    if (scall->c.state != rpc_e_dg_cs_recv)
332        goto END_OF_CALL;
333
334    /*
335     * If this call does not yet have a reservation, make one now.  Any
336     * call that was queued will not have a reservation; also, even if
337     * a executor thread was initially available for the call, there
338     * might not have been any reservations available at that time.
339     * (Note that the call to make the reservation may block until a
340     * reservation becomes available.)
341     *
342     * The make_reservation routine requires that the global lock be
343     * held.  To respect the locking heirarchy, we need to juggle the
344     * locks around a little, checking that the state of the call doesn't
345     * change during the times when it's unlocked.
346     */
347
348    if (scall->c.n_resvs < scall->c.max_resvs)
349    {
350        RPC_DG_CALL_UNLOCK(&scall->c);
351        RPC_LOCK(0);
352        RPC_DG_CALL_LOCK(&scall->c);
353        if (scall->c.state != rpc_e_dg_cs_recv)
354        {
355            RPC_UNLOCK(0);
356            goto END_OF_CALL;
357        }
358
359        /*
360         * We always start with the maximum reservation because we no longer
361         * reset high_rcv_frag_size and snd_frag_size between the calls.
362         * (The previous call may have used/advertised the larger fragment
363         * size.)
364         *
365         * This is fine in the user space since the packet rationing will
366         * never happen. (We assume that there are always enough packet
367         * buffers available.)
368         *
369         * This may accelerate the packet rationing in the kernel, though
370         * (iff MBF is turned on). Unfortunately, we can't start with the
371         * minimum reservation in the kernel because the other end may be a
372         * user space.
373         */
374        rpc__dg_pkt_adjust_reservation(&scall->c, scall->c.max_resvs, true);
375
376        RPC_UNLOCK(0);
377
378        /*
379         * Since the call's been opened up, we need to check its status.
380         */
381        if (scall->c.state != rpc_e_dg_cs_recv)
382        {
383            RPC_DBG_GPRINTF((
384                "(rpc__dg_execute_call) Cancelled while awaiting pkt reservation\n"));
385            goto END_OF_CALL;
386        }
387
388        /*
389         * Since this call did not have a reservation, any data received for
390         * it was dropped, and the client was told not to send any more.
391         * Since the call can now receive data, prod the client into
392         * retransmitting.
393         */
394        rpc__dg_call_xmit_fack(&scall->c, NULL, ! scall->c.rq.recving_frags);
395    }
396
397    /*
398     * Now's as good a time as any to enable direct cancel posting to
399     * the thread (while we've got the call lock held).  It might have
400     * been nice to defer this to just before the sstub dispatch, but
401     * then we'd have to re-acquire the call lock.
402     *
403     * NOTE: This routine MUST call rpc_cthread_cancel_caf() before
404     * returning (regardless of the return path)!  This requirement
405     * exists because cancels may be (become) pending at any time and
406     * must be flushed (otherwise subsequent calls using this thread
407     * will inherit this call's cancel).
408     */
409
410    rpc__cthread_cancel_enable_post(&scall->c.c);
411
412    /*
413     * Create a server binding handle, if we don't already have one hanging
414     * off the scall.  If we have a cached one, reinit it.
415     */
416
417    if (scall->h != NULL)
418    {
419        h = scall->h;
420        RPC_DG_BINDING_SERVER_REINIT(h);
421    }
422    else
423    {
424        rpc_addr_p_t addr;
425
426        rpc__naf_addr_copy(scall->c.addr, &addr, &st);
427        h = (rpc_dg_binding_server_p_t) rpc__binding_alloc
428            (true, &scall->c.call_object, RPC_C_PROTOCOL_ID_NCADG, addr, &st);
429        if (st != rpc_s_ok)
430        {
431            RPC_DBG_GPRINTF((
432                "(rpc__dg_execute_call) Can't allocate binding, st = 0x%x\n", st));
433            goto END_OF_CALL;
434        }
435
436        RPC_DG_CALL_REFERENCE(&scall->c);
437        h->scall = scall;
438
439        if (!scall->c.is_cbk)
440        {
441            key_info = scall->scte->key_info;
442            if (key_info != NULL)
443            {
444                rpc_auth_info_p_t auth_info = key_info->auth_info;
445                h->c.c.auth_info = auth_info;
446                RPC_DG_AUTH_REFERENCE(auth_info); /* for the handle */
447            }
448        }
449
450        scall->h = h;
451    }
452
453    assert(RPC_DG_CALL_IS_SERVER(&scall->c));
454
455    /*
456     * Dequeue the first pkt off of the receive queue (including it's hdr).
457     *
458     * WARNING: we MUST use comm_receive_int() because comm_receive(),
459     * while it would do the locking for us, doesn't return a useable iove
460     * for 0 length data.
461     *
462     * We're supposed to be in the init state until we know we're accepting
463     * the call (that means after a WAY callback if one is necessary).
464     * Make certain this is the case following the receive.
465     *
466     * WARNING 2: Note that this call verifies the authenticity of the
467     * packet it reads *except* in two cases:
468     *
469     *  - When the packet is from a call on an activity the server doesn't
470     * currently know about (in which case we notice later on that the
471     * authn_proto field in the header is non-zero).
472     *
473     *  - When the authentication check fails with a status code of
474     * "rpc_s_dg_need_way_auth".  Note that in this event, the
475     * "receive_int" is still viewed as having succeeded, albeit with
476     * a non-zero status code.
477     *
478     * In either of these cases, a way_auth callback is made, and,
479     * if it is successful, the authenticity check is retried
480     * (further down in this function).
481     */
482
483    rpc__dg_call_receive_int(&scall->c, &iove, &st);
484    force_way_auth = false;
485    if (st == rpc_s_dg_need_way_auth) {
486        RPC_DBG_PRINTF(rpc_e_dbg_general, 4,
487            ("(rpc__dg_execute_call) will force way callback\n"));
488        st = rpc_s_ok;
489        /*
490         * We don't own the rqe. It's still on recvq.
491         */
492        force_way_auth = true;
493    }
494    else if (st != rpc_s_ok)
495    {
496        RPC_DBG_GPRINTF((
497            "(rpc__dg_execute_call) Receive failed st = 0x%x\n", st));
498        goto END_OF_CALL;
499    }
500
501    rqe = RPC_DG_RECVQ_ELT_FROM_IOVECTOR_ELT(&iove);
502    assert(rqe != NULL && rqe->hdrp != NULL);
503    hdrp = rqe->hdrp;
504    idem  = ((hdrp->flags & RPC_C_DG_PF_IDEMPOTENT) != 0);
505    broadcast = ((hdrp->flags & RPC_C_DG_PF_BROADCAST) != 0);
506    maybe  = ((hdrp->flags & RPC_C_DG_PF_MAYBE) != 0);
507
508    if (scall->c.is_cbk)
509    {
510        RPC_DBG_PRINTF(rpc_e_dbg_general, 3,
511            ("(rpc__dg_execute_call) Callback [%s]\n",
512                rpc__dg_act_seq_string(hdrp)));
513    }
514
515    /*
516     * Perform some of the request pkt verification that was defered.
517     * This includes interface id and operation number.
518     */
519
520    if (!scall->c.is_cbk)
521        key_info = scall->scte->key_info;
522    else
523        key_info = NULL;
524
525    /*
526     * Does the request specify authentication, do we not have auth info
527     * yet, is the call not "maybe", and is this not a callback (!!!
528     * for the callback case)?  If so, then get the auth info now.
529     */
530    if (hdrp->auth_proto != 0 &&
531        key_info == NULL &&
532        ! maybe &&
533        ! scall->c.is_cbk)
534    {
535        rpc_authn_protocol_id_t authn_protocol;
536        rpc_auth_info_p_t auth_info;
537
538        assert(scall->c.key_info == NULL);
539
540        /*
541         * Get the appropiate DG auth EPV.  We need to convert the wire
542         * auth protocol ID into the corresponding API value and then
543         * get the EPV using that latter value.
544         */
545        authn_protocol = rpc__auth_cvt_id_wire_to_api(hdrp->auth_proto, &st);
546        if (st != rpc_s_ok)
547        {
548            reject_st = rpc_s_unknown_reject;
549            goto AFTER_CALL_TO_STUB;
550        }
551        auth_epv = (rpc_dg_auth_epv_p_t)
552                        rpc__auth_rpc_prot_epv
553                            (authn_protocol, RPC_C_PROTOCOL_ID_NCADG);
554        if (auth_epv == NULL)
555        {
556            reject_st = rpc_s_unknown_reject;
557            goto AFTER_CALL_TO_STUB;
558        }
559
560        /*
561         * Call into auth service to create an auth info.
562         *
563         * This generates an auth_info and a key_info.  The auth_info
564         * gets attached to the handle, while the key_info gets
565         * attached to the scte and scall.
566         */
567        key_info = (*auth_epv->create) (&st);
568        if (st != rpc_s_ok)
569        {
570            reject_st = rpc_s_unknown_reject;
571            goto AFTER_CALL_TO_STUB;
572        }
573        scall->c.key_info = key_info;
574        scall->c.auth_epv = auth_epv;
575        /* we have one reference to the key_info already. */
576        scall->scte->key_info = key_info;
577        scall->scte->auth_epv = auth_epv;
578        RPC_DG_KEY_REFERENCE(key_info); /* for the scte */
579
580        /* fill in the auth_info in the handle */
581        auth_info = key_info->auth_info;
582        h->c.c.auth_info = auth_info;
583        RPC_DG_AUTH_REFERENCE(auth_info); /* for the handle */
584    }
585    auth_epv = scall->c.auth_epv;
586
587    /*
588     * If the interface isn't valid, send a rejection.
589     */
590    rpc_object_inq_type(&scall->c.call_object, &type, &st);
591    if (! (st == rpc_s_ok || st == rpc_s_object_not_found))
592    {
593        RPC_DBG_GPRINTF((
594            "(rpc__dg_execute_call) rpc_object_inq_type failed, st=0x%x [%s]\n",
595            st, rpc__dg_act_seq_string(hdrp)));
596        reject_st = st;
597        goto AFTER_CALL_TO_STUB;
598    }
599
600    ihint = hdrp->ihint;
601    rpc__if_lookup2 (&hdrp->if_id, hdrp->if_vers, &type,
602                     &ihint, &ifspec, &ss_epv, &mgr_epv,
603                     &flags, &max_calls, &max_rpc_size,
604                     &if_callback, &st);
605
606    if (st != rpc_s_ok)
607    {
608        RPC_DBG_GPRINTF((
609            "(rpc__dg_execute_call) rpc__if_lookup failed, st=0x%x [%s]\n",
610            st, rpc__dg_act_seq_string(hdrp)));
611        reject_st = st;
612        goto AFTER_CALL_TO_STUB;
613    }
614
615    /*
616     * The interface is valid, update the call ihint so we tell the client.
617     */
618
619    scall->c.call_ihint = ihint;
620
621    /*
622     * Extract a copy of the opnum from the packet header, and check to see that
623     * it's appropriate for this interface.
624     */
625
626    opnum = hdrp->opnum;
627    if (opnum >= ifspec->opcnt)
628    {
629        RPC_DBG_GPRINTF((
630            "(rpc__dg_execute_call) Opnum (%u) out of range [%s]\n",
631            opnum, rpc__dg_act_seq_string(hdrp)));
632        reject_st = rpc_s_op_rng_error;
633        goto AFTER_CALL_TO_STUB;
634    }
635
636    /*
637     * To guarantee at-most-once semantics for non-idempotent RPCs, we
638     * must ensure that the call is filtered based on a WAY validated
639     * sequence number.  If we don't have such a sequence number, then
640     * call back to client to get one (the returned WAY validated seq
641     * must match this RPC's seq - i.e. it must be the RPC that the client
642     * is currently performing).  Note that we may do a way_auth
643     * callback even when we wouldn't otherwise do it because the
644     * underlying authentication layers decided one was needed.
645     *
646     * The analogous processing for non-idempotent callbacks (from a
647     * server manager to the client originating the call, who needs to
648     * validate the callback's seq) was previously taken care of in the
649     * do_request() processing (a WAY validated logical scte high_seq
650     * was already known).
651     *
652     * Note also that maybe calls with large-INs are tagged as
653     * non-idempotent but do not need to be protected against re-runs.
654     * (The architecture specifies that maybe calls can *not* have
655     * at-most-once semantics, but the implementation finds it more
656     * convenient to use the non-idempotent code paths for handling
657     * calls with large-INs.)  For this reason, avoid doing a WAY for
658     * maybe calls (the client may not even be still running!).
659     *
660     * Release and reacquire the call lock while performing this
661     * (slow path / lengthy) WAY and Auth processing.
662     *
663     * We perform the WAY RPC with general cancel delivery disabled.
664     * The RPC prologue is suppose to be transparent and clients can
665     * orphan the call if they get tired of waiting around.
666     */
667
668    if (! maybe &&
669         (force_way_auth || key_info != NULL ||
670         (! idem && ! scall->c.is_cbk)))
671    {
672        if (!force_way_auth && RPC_DG_SCT_IS_WAY_VALIDATED(scall->scte))
673        {
674            /*
675             * We want to make this check because it's better to be safe
676             * than sorry regarding at-most-once semantics.  It's
677             * conceivable that the connection became WAY validated *after*
678             * this call had passed it's initial filtering (if nothing
679             * else, it should protect us from other potential coding
680             * errors :-)
681             */
682            if (scall->c.call_seq != scall->scte->high_seq)
683            {
684                RPC_DBG_PRINTF(rpc_e_dbg_general, 2,
685                    ("(execute_call) Old sequence, previous=%u [%s]\n",
686                    scall->scte->high_seq, rpc__dg_act_seq_string(hdrp)));
687                goto END_OF_CALL;
688            }
689        }
690        else
691        {
692            boolean high_seq_was_way_validated =
693                (boolean)(scall->scte->high_seq_is_way_validated);
694
695            /*
696             * WAY validate the connection and ensure that this call
697             * is the current call.  Unlock the scall while performing the
698             * WAY validation.
699             */
700            rpc_dg_sct_elt_p_t  scte;
701
702            RPC_DG_CALL_UNLOCK(&scall->c);
703
704            /*
705             * The WAY validation routine must be called with the connection
706             * unlocked.  Due to locking hierarchy and the fact that we
707             * unlocked the scall, we've opened up a window... check if
708             * it's safe to continue.
709             */
710            RPC_LOCK(0);
711            RPC_DG_CALL_LOCK(&scall->c);
712            if (scall->c.state != rpc_e_dg_cs_recv)
713            {
714                RPC_UNLOCK(0);
715                goto END_OF_CALL;
716            }
717            scte = scall->scte;
718            RPC_DG_CALL_UNLOCK(&scall->c);
719
720            rpc__dg_sct_way_validate(scte, force_way_auth, &st);
721
722            RPC_UNLOCK(0);
723
724            RPC_DG_CALL_LOCK(&scall->c);
725
726            /*
727             * Before continuing, we've "opened up" the call (due to
728             * the unlock/lock) and we need to check if it is safe to
729             * continue...
730             */
731            if (scall->c.state != rpc_e_dg_cs_recv)
732                goto END_OF_CALL;
733
734            if (st != rpc_s_ok)
735            {
736                reject_st = rpc_s_who_are_you_failed;
737                goto AFTER_CALL_TO_STUB;
738            }
739            else
740            {
741                if (scall->c.call_seq != scall->scte->high_seq)
742                {
743                    RPC_DBG_PRINTF(rpc_e_dbg_general, 2,
744                        ("(rpc__dg_execute_call) Old sequence, previous=%u [%s]\n",
745                        scall->scte->high_seq, rpc__dg_act_seq_string(hdrp)));
746                    goto END_OF_CALL;
747                }
748            }
749
750            /*
751             * If high_seq_was_way_validated, rpc__dg_call_receive_int()
752             * has already verified the packet by calling
753             * (*auth_epv->recv_ck)().
754             * It's ok to call it again here except when using
755             * pkt_privacy where the packet body is already decrypted.
756             * For consistency, we don't verify the packet if it's
757             * already done.
758             */
759            if (key_info != NULL && !force_way_auth
760                && !high_seq_was_way_validated)
761            {
762                unsigned32 blocksize = auth_epv->blocksize;
763                char *cksum;
764                int raw_bodysize;
765
766                /*
767                 * This must be a single buffer fragment.
768                 * The very first fragment!
769                 */
770                if (rqe->hdrp == NULL || rqe->frag_len != rqe->pkt_len)
771                {
772                    reject_st = rpc_s_who_are_you_failed;
773                    goto AFTER_CALL_TO_STUB;
774                }
775
776                /*
777                 * It's not really necessary to round up the packet body
778                 * length here because the sender includes the length of
779                 * padding before the auth trailer in the packet body length.
780                 * However, I think, that's a wrong behavior and we shouldn't
781                 * rely on it.
782                 */
783                raw_bodysize = ((rqe->hdrp->len + blocksize - 1)
784                                / blocksize) * blocksize;
785
786                /*
787                 * Now that we have obtained authentication
788                 * credentials, go back and verify that cksum is
789                 * entirely contained inside the packet, and the
790                 * auth_type is what we expected.  This "shouldn't
791                 * fail" unless someone's playing games with us.
792                 */
793
794                if (((RPC_C_DG_RAW_PKT_HDR_SIZE + raw_bodysize +
795                    auth_epv->overhead) > rqe->frag_len) ||
796                    (rqe->hdrp->auth_proto != auth_epv->auth_proto))
797                {
798                    st = nca_s_proto_error;
799                }
800                else
801                {
802                    /*
803                     * Adjust the packet buffer's pkt_len,
804                     * i.e., excluding the auth trailer.
805                     * Also adjust data_len in the iovector.
806                     */
807                    rqe->pkt_len = raw_bodysize + RPC_C_DG_RAW_PKT_HDR_SIZE;
808                    iove.data_len = raw_bodysize;
809
810                    cksum = rqe->pkt->body.args + raw_bodysize;
811                    RPC_DBG_PRINTF(rpc_e_dbg_general, 4,
812                        ("(rpc__dg_execute_call) calling recv_ck now\n"));
813                    (*auth_epv->recv_ck) (key_info, rqe, cksum, &st);
814                }
815                if (st != rpc_s_ok)
816                {
817                    RPC_DBG_PRINTF(rpc_e_dbg_general, 2,
818                        ("(rpc__dg_execute_call) pkt didn't verify -- %x\n", st));
819                    reject_st = rpc_s_who_are_you_failed;
820                    goto AFTER_CALL_TO_STUB;
821                }
822            }
823            else if (key_info != NULL && force_way_auth)
824            {
825                /*
826                 * Call rpc__dg_call_receive_int() again. This time,
827                 * (*auth_epv->recv_ck)() is supposed to succeed.
828                 */
829                rpc__dg_call_receive_int(&scall->c, &iove, &st);
830                force_way_auth = false;
831                if (st == rpc_s_dg_need_way_auth) {
832                    /*
833                     * We still don't own the rqe...
834                     */
835                    force_way_auth = true;
836                }
837                if (st != rpc_s_ok)
838                {
839                    RPC_DBG_GPRINTF((
840"(rpc__dg_execute_call) Receive failed st = 0x%x after forced WAY auth callback\n", st));
841                    reject_st = rpc_s_who_are_you_failed;
842                    goto AFTER_CALL_TO_STUB;
843                }
844                assert(rqe == RPC_DG_RECVQ_ELT_FROM_IOVECTOR_ELT(&iove));
845            }
846        }
847    }
848
849    assert(force_way_auth == false);
850
851    /*
852     * If we get here, we're accepting the call and we're gonna dispatch
853     * to the server stub!  Setup the required args for the dispatch
854     * (the iove was done above) and run call the server stub.
855     */
856
857    RPC_DG_HDR_INQ_DREP(&drep, hdrp);
858
859    /*
860     * The packet rationing code needs to know that we no longer need
861     * to worry about  doing WAYs.
862     */
863    scall->c.rq.is_way_validated = true;
864
865    /*
866     * Unlock the call lock while in the stub.
867     */
868    RPC_DG_CALL_UNLOCK(&scall->c);
869
870    /*
871     * Note: the stubs are absolutely, positively required to free the
872     * provided iove described buffer (assuming the len > 0), even if
873     * the stub detects and returns an error condition.   Set the
874     * "called_stub" flag to true so that we know we don't have to worry
875     * about freeing the RQE ourselves.
876     */
877    called_stub = true;
878
879    /*
880     * As required by the packet rationing rules, if the I/O vector element
881     * has no data, free it up now because the server stub doesn't bother
882     * to free such elements.  Note that we needed the element until
883     * now for the info that was in its packet header.
884     */
885
886    if (iove.data_len == 0 && iove.buff_dealloc != NULL)
887        RPC_FREE_IOVE_BUFFER(&iove);
888
889    switch (ifspec->stub_rtl_if_vers)
890    {
891        /*
892         * If this is an old v0 or v1 stub runtime interface.  Do the
893         * dirty work out of line.
894         */
895        case RPC_C_STUB_RTL_IF_VERS_NCS_1_0:
896        case RPC_C_STUB_RTL_IF_VERS_NCS_1_5:
897            if (rpc_g_dg_pre_v2_server_call_p == NULL)
898            {
899                /*
900                 * rpc_m_pre_v2_ss
901                 * "(%s) Can't handle pre-v2 server stubs"
902                 */
903                rpc_dce_svc_printf (
904                    __FILE__, __LINE__,
905                    "%s",
906                    rpc_svc_server_call,
907                    svc_c_sev_fatal | svc_c_action_abort,
908                    rpc_m_pre_v2_ss,
909                    "rpc__dg_execute_call" );
910            }
911
912            prev_cancel_state = dcethread_enableinterrupt_throw(0);
913            (*rpc_g_dg_pre_v2_server_call_p)(
914                ifspec,
915                opnum,
916                (handle_t) h,
917                (rpc_call_handle_t) scall,
918                &iove,
919                drep,
920                ss_epv,
921                mgr_epv,
922                &reject_st);
923            dcethread_enableinterrupt_throw(prev_cancel_state);
924            break;
925
926        /*
927         * This is the v2 (new) stub runtime interface.
928         */
929        case RPC_C_STUB_RTL_IF_VERS_DCE_1_0:
930            prev_cancel_state = dcethread_enableinterrupt_throw(0);
931            (*(ss_epv[opnum]))(
932                    (handle_t) h,
933                    (rpc_call_handle_t) scall,
934                    &iove,
935                    &drep,
936                    &ndr_g_transfer_syntax,
937                    mgr_epv,
938                    &reject_st);
939            dcethread_enableinterrupt_throw(prev_cancel_state);
940            break;
941
942        /*
943         * Unknown version
944         */
945
946        default:
947            RPC_DBG_GPRINTF((
948                "(rpc__dg_execute_call) Unknown rtl/if version. 0x%x\n",
949               ifspec->stub_rtl_if_vers));
950            RPC_DG_CALL_LOCK(&scall->c);
951
952            if (iove.buff_dealloc != NULL)
953                RPC_FREE_IOVE_BUFFER(&iove);
954
955            goto END_OF_CALL;
956    }
957
958    /*
959     * While the stub may have returned due to call orphaning, this will
960     * not typically be the case.  Even if it completed succesfully
961     * we could become orphaned further down in this processing (e.g.
962     * in xmitq_push).  Defer orphan checking and cleanup till we only
963     * have to do it once; the extra work done if we are orphaned won't
964     * kill us.
965     */
966
967    /*
968     * Acquire the call lock since we need it for several pieces of
969     * processing from here on in.
970     *
971     * Before continuing, we've "opened up" the call (due to the
972     * unlock/lock) and we need to check if it is safe to continue...
973     */
974
975    RPC_DG_CALL_LOCK(&scall->c);
976    if (scall->c.state != rpc_e_dg_cs_recv
977        && scall->c.state != rpc_e_dg_cs_xmit)
978    {
979        goto END_OF_CALL;
980    }
981
982    /*
983     * Error cases detected before we get to calling the stub and that want
984     * to send a "reject" re-enter here.
985     */
986AFTER_CALL_TO_STUB:
987
988    RPC_DG_CALL_LOCK_ASSERT(&scall->c);
989
990    /*
991     * If this was a broadcast request and we're either rejecting the call
992     * or the call faulted, just skip to the end.
993     */
994
995    if (broadcast &&
996        (reject_st != rpc_s_ok ||
997         RPC_DG_HDR_INQ_PTYPE(&scall->c.xq.hdr) == RPC_C_DG_PT_FAULT))
998    {
999        goto END_OF_CALL;
1000    }
1001
1002    /*
1003     * The stub was obligated to call the iove's dealloc routine,
1004     * so we don't have to free that.  We don't need the recvq anymore.
1005     * In normal cases, the list will already be empty, so having this
1006     * in the fast path doesn't hurt and (in the error cases) it frees
1007     * up resources while we potentially wait in xmitq_push() (or
1008     * awaiting a xqe for a reject or no [outs] response).
1009     */
1010
1011    if (scall->c.rq.head != NULL)
1012        rpc__dg_recvq_free(&scall->c.rq);
1013
1014    /*
1015     * If a reject condition exists, prepare the reject response.
1016     * Otherwise, handle the case where the stub has no [outs] and it's
1017     * not a maybe call; we still need to generate a response pkt.
1018     *
1019     * We depend on both of these response queuing operations
1020     * to only queue the response and not send it since we've yet
1021     * setup the return cancel_pending status for the client.
1022     */
1023
1024    if (reject_st != rpc_s_ok)
1025    {
1026        /*
1027         * If the reject path caused us to jump over the call into the
1028         * stub, we need to free the request RQE here.
1029         *
1030         * If we are forced to do WAY auth and havn't done it so, don't free
1031         * it because we don't own the rqe.
1032         */
1033
1034        if (! called_stub && !force_way_auth && iove.buff_dealloc != NULL)
1035            RPC_FREE_IOVE_BUFFER(&iove);
1036
1037        queue_mapped_reject(scall, reject_st);
1038    }
1039    else
1040    {
1041        if (scall->c.state == rpc_e_dg_cs_recv && !maybe)
1042        {
1043            rpc_iovector_t  xmit_data;
1044
1045            xmit_data.num_elt = 0;
1046            rpc__dg_call_transmit_int(&scall->c, &xmit_data, &st);
1047            /*
1048             * The transmit may fail because the call is already orphaned.
1049             * It may fail for some other reason as well.  In either case,
1050             * we're not gonna get a response to the client.  Just keep
1051             * falling through (other calls may fail as well) and clean up.
1052             */
1053        }
1054    }
1055
1056    /*
1057     * At this point, we can stop accepting forwarded cancels.  Determine
1058     * the cancel pending disposition of the call and set the call's
1059     * xq cancel_pending flag accordingly so that the response (or at
1060     * least the last pkt of the response) gets sent with the proper
1061     * state.  This is the single point where the "send response"
1062     * path ensures that it has flushed any pending cancels from the
1063     * call executor thread; this includes cancels generated by
1064     * a received cancel-request or a cancel induced by orphan call
1065     * processing.
1066     *
1067     * We could have stopped accepting cancels as soon as the stub
1068     * returned, but we really wanted to wait till here before setting
1069     * up the return cancel_pending status.  After this, we shouldn't
1070     * screw around anymore with the xq (i.e. re-initing it).  There
1071     * should be a reject, fault or normal response queued up and
1072     * it should go out with the correct cancel_pending flag.
1073     * That is of course, unless that call has been orphaned, in which
1074     * case no further response of any kind will be sent to the client
1075     * (setting the cancel_pending flag will not affect the client;
1076     * which is a *requirement* under this condition).
1077     */
1078
1079    if (rpc__cthread_cancel_caf(&scall->c.c))
1080    {
1081        RPC_DBG_PRINTF(rpc_e_dbg_cancel, 5,
1082            ("(rpc__dg_execute_call) setting cancel_pending\n"));
1083        scall->c.xq.base_flags2 |= RPC_C_DG_PF2_CANCEL_PENDING;
1084    }
1085
1086    /*
1087     * Assuming that the call isn't already orphaned, finally push
1088     * out the remainder of the response.  The push may fail
1089     * because orphaning occurs during the push or for some
1090     * other reason; just continue to cleanup processing. Indicate
1091     * whether or not the response was sent so we can determine
1092     * the appropriate call state when we're done.
1093     */
1094
1095    if (scall->c.state != rpc_e_dg_cs_orphan)
1096    {
1097        rpc__dg_call_xmitq_push(&scall->c, &st);
1098        if (st == rpc_s_ok)
1099            sent_response = true;
1100        else
1101            RPC_DBG_GPRINTF((
1102                "(rpc__dg_execute_call) xmitq_push returns 0x%x\n", st));
1103    }
1104
1105    /*
1106     * Error cases that want to skip the reply-sending machinery re-enter here.
1107     */
1108END_OF_CALL:
1109
1110    RPC_DG_CALL_LOCK_ASSERT(&scall->c);
1111
1112    /*
1113     * End of the fast path.
1114     *
1115     * Any response has been sent (or at least all the pkts have been
1116     * sent once).  Perform final call wrap-up processing / state
1117     * transitioning.  In the event that we didn't take the send
1118     * response path, we still need to flush any pending cancels.
1119     * In the event that we took the send response path but the response
1120     * wasn't succesfully sent, we'll call the following twice but
1121     * that's ok.
1122     */
1123
1124    if (! sent_response)
1125        (void) rpc__cthread_cancel_caf(&scall->c.c);
1126
1127    /*
1128     * If the call is not "idempotent" we must defer complete end of
1129     * call processing until the client's ack is received.  (Note: "maybe"
1130     * and "broadcast" are tagged as "idempotent".)  For idempotent calls
1131     * with small outs, we can clean up right now (if the client never
1132     * gets the response, it can rerun the call).
1133     *
1134     * Idempotent calls with large outs are treated similarly to
1135     * non-idempotent calls.  We retain the outs until "acknowledged"
1136     * by the client or the retransmit logic gives up.  This is required
1137     * to prevent the undesireable situation of the client receiving
1138     * a "nocall" in response to a "ping" after the client has already
1139     * received some of the outs.
1140     *
1141     * If we didn't (seemingly) successfully send a response, skip the
1142     * final state (this covers orphan processing as well).  Furthermore,
1143     * if the call has been orphaned stay in that state.
1144     *
1145     * An orphaned call has already been disassociated from its SCTE
1146     * (ccall in the case of a cbk_scall) and there should be a maximum
1147     * of two references to the orphaned SCALL; the call executor's and
1148     * the timer thread.  The only actions required are to release any
1149     * remaining resources held by the call and release one reference
1150     * to the SCALL (the timer thread will eventually complete to job
1151     * of destroying the scall).
1152     */
1153
1154    if ((! idem || RPC_DG_FLAG_IS_SET(scall->c.xq.base_flags, RPC_C_DG_PF_FRAG))
1155        && sent_response)
1156    {
1157        RPC_DG_CALL_SET_STATE(&scall->c, rpc_e_dg_cs_final);
1158    }
1159    else
1160    {
1161        /*
1162         * It's really the end of the call, so we can free the xmitq.
1163         */
1164
1165        if (scall->c.xq.head != NULL)
1166            rpc__dg_xmitq_free(&scall->c.xq, &scall->c);
1167
1168        /*
1169         * Typically, the call goes back to the idle state, ready to
1170         * handle the next call.  First, If this was a callback, update
1171         * the callback sequence number in the associated client callback
1172         * handle.
1173         *
1174         * If the call was orphaned, we can't to do either of the above
1175         * (we just want to let the scall's timer complete the job of
1176         * destroying the scall).
1177         */
1178
1179        if (scall->c.state != rpc_e_dg_cs_orphan)
1180        {
1181            if (scall->c.is_cbk)
1182            {
1183                scall->cbk_ccall->c.high_seq = scall->c.call_seq;
1184            }
1185
1186            RPC_DG_CALL_SET_STATE(&scall->c, rpc_e_dg_cs_idle);
1187        }
1188    }
1189
1190    /*
1191     * Give up the packet reservation for this call.
1192     */
1193
1194    rpc__dg_pkt_cancel_reservation(&scall->c);
1195
1196    if (scall->c.is_cbk && scall->cbk_ccall != NULL)
1197    {
1198        /*
1199         * Update the original ccall's high_rcv_frag_size and snd_frag_size.
1200         */
1201
1202        scall->cbk_ccall->c.rq.high_rcv_frag_size =
1203            scall->c.rq.high_rcv_frag_size;
1204        scall->cbk_ccall->c.xq.snd_frag_size = scall->c.xq.snd_frag_size;
1205    }
1206    /*
1207     * We're now done with our scall lock/reference.
1208     */
1209
1210    scall->has_call_executor_ref = false;
1211    RPC_DG_SCALL_RELEASE(&scall);
1212}
1213