1/*
2 * Copyright (c) 2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * 1.  Redistributions of source code must retain the above copyright
11 *     notice, this list of conditions and the following disclaimer.
12 * 2.  Redistributions in binary form must reproduce the above copyright
13 *     notice, this list of conditions and the following disclaimer in the
14 *     documentation and/or other materials provided with the distribution.
15 * 3.  Neither the name of Apple Inc. ("Apple") nor the names of its
16 *     contributors may be used to endorse or promote products derived from
17 *     this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * Portions of this software have been released under the following terms:
31 *
32 * (c) Copyright 1989-1993 OPEN SOFTWARE FOUNDATION, INC.
33 * (c) Copyright 1989-1993 HEWLETT-PACKARD COMPANY
34 * (c) Copyright 1989-1993 DIGITAL EQUIPMENT CORPORATION
35 *
36 * To anyone who acknowledges that this file is provided "AS IS"
37 * without any express or implied warranty:
38 * permission to use, copy, modify, and distribute this file for any
39 * purpose is hereby granted without fee, provided that the above
40 * copyright notices and this notice appears in all source code copies,
41 * and that none of the names of Open Software Foundation, Inc., Hewlett-
42 * Packard Company or Digital Equipment Corporation be used
43 * in advertising or publicity pertaining to distribution of the software
44 * without specific, written prior permission.  Neither Open Software
45 * Foundation, Inc., Hewlett-Packard Company nor Digital
46 * Equipment Corporation makes any representations about the suitability
47 * of this software for any purpose.
48 *
49 * Copyright (c) 2007, Novell, Inc. All rights reserved.
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 *
54 * 1.  Redistributions of source code must retain the above copyright
55 *     notice, this list of conditions and the following disclaimer.
56 * 2.  Redistributions in binary form must reproduce the above copyright
57 *     notice, this list of conditions and the following disclaimer in the
58 *     documentation and/or other materials provided with the distribution.
59 * 3.  Neither the name of Novell Inc. nor the names of its contributors
60 *     may be used to endorse or promote products derived from this
61 *     this software without specific prior written permission.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
64 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
65 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
66 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY
67 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
68 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
69 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
70 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
71 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
72 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
73 *
74 * @APPLE_LICENSE_HEADER_END@
75 */
76
77/*
78**
79**  NAME
80**
81**      cnxfer.c
82**
83**  FACILITY:
84**
85**      Remote Procedure Call (RPC)
86**
87**  ABSTRACT:
88**
89**  Entrypoints to support buffered data transfer within the
90**  Connection-oriented protocol services component of the RPC
91**  runtime.
92**
93**
94*/
95
96#include <commonp.h>    /* Common declarations for all RPC runtime */
97#include <com.h>        /* Common communications services */
98#include <comprot.h>    /* Common protocol services */
99#include <cnp.h>        /* NCA Connection private declarations */
100#include <cnfbuf.h>     /* NCA Connection fragment buffer declarations */
101#include <cnpkt.h>      /* NCA Connection protocol header */
102#include <cncall.h>     /* NCA Connection call service */
103#include <cnassoc.h>    /* NCA Connection association service */
104#include <cnxfer.h>
105
106/*
107 * Prototype for internal entrypoints.
108 */
109
110INTERNAL void rpc__cn_prep_next_iovector_elmt (
111        rpc_cn_call_rep_p_t /*call_rep*/,
112        unsigned32     * /*status*/
113    );
114
115
116/*
117**++
118**
119**  ROUTINE NAME:       rpc__cn_copy_buffer
120**
121**  SCOPE:              PRIVATE
122**
123**  DESCRIPTION:
124**
125**  Copies an iovector element to the iovector array in the
126**  call rep.  This routine will buffer data until the total
127**  byte count reaches the max segment size for the transport
128**  or when we have exhausted the size of the iovector in the
129**  call rep.  If either of those conditions hold, the data
130**  would be transferred.
131**
132**  INPUTS:
133**
134**      call_rep        The call rep.
135**
136**      iov_elt_p       The iovector element we are copying.
137**
138**  INPUTS/OUTPUTS:     none
139**
140**  OUTPUTS:
141**
142**      status          The completion status
143**
144**  IMPLICIT INPUTS:    none
145**
146**  IMPLICIT OUTPUTS:   none
147**
148**  FUNCTION VALUE:     none
149**
150**  SIDE EFFECTS:       none
151**
152**--
153**/
154
155PRIVATE void rpc__cn_copy_buffer
156(
157  rpc_cn_call_rep_p_t     call_rep,
158  rpc_iovector_elt_p_t    iov_elt_p,
159  unsigned32              *status
160)
161{
162    unsigned32              xfer_size;
163    unsigned32              bytes_to_segment_size;
164    unsigned32              bytes_left_to_xfer;
165    unsigned32              cur_iov_index;
166    byte_p_t                src;
167
168    src = iov_elt_p->data_addr;
169    bytes_left_to_xfer = iov_elt_p->data_len;
170    *status = rpc_s_ok;
171
172    bytes_to_segment_size = call_rep->max_seg_size -
173                            RPC_CN_CREP_ACC_BYTCNT (call_rep);
174
175    cur_iov_index = RPC_CN_CREP_CUR_IOV_INDX (call_rep);
176    while (bytes_left_to_xfer > 0)
177    {
178        /*
179         * See if we've reached our transmit segment size.
180         * If so, send what we have accumulated so far.
181         */
182        if (bytes_to_segment_size == 0)
183        {
184            /*
185             * Transmit all the data buffered thus far.
186             */
187            rpc__cn_transmit_buffers (call_rep, status);
188            rpc__cn_dealloc_buffered_data (call_rep);
189
190            /*
191             * Fix up the iovector in the call rep so
192             * that we again have only the cached protocol
193             * header (and no stub data).
194             */
195            RPC_CN_FREE_ALL_EXCEPT_PROT_HDR (call_rep);
196            cur_iov_index = RPC_CN_CREP_CUR_IOV_INDX (call_rep);
197            if (*status != rpc_s_ok)
198            {
199                return;
200            }
201        }
202        /*
203         * Check to see if the current iovector element is full.
204         */
205        else if (RPC_CN_CREP_FREE_BYTES (call_rep) == 0)
206        {
207            /*
208             * If the current iovector element is full and we've
209             * reached the end of our iovector, send what we
210             * have accumulated so far.
211             */
212            if (RPC_CN_CREP_IOVLEN (call_rep) >= RPC_C_MAX_IOVEC_LEN)
213            {
214                /*
215                 * Transmit all the data buffered thus far.
216                 */
217                rpc__cn_transmit_buffers (call_rep, status);
218                rpc__cn_dealloc_buffered_data (call_rep);
219
220                /*
221                 * Fix up the iovector in the call rep so
222                 * that we again have only the cached protocol
223                 * header (and no stub data).
224                 */
225                RPC_CN_FREE_ALL_EXCEPT_PROT_HDR (call_rep);
226                cur_iov_index = RPC_CN_CREP_CUR_IOV_INDX (call_rep);
227                if (*status != rpc_s_ok)
228                {
229                    return;
230                }
231
232            }
233            else
234            {
235                /*
236                 * We have not reached the end of our iovector.
237                 * In this case, we can use a new iovector element.
238                 */
239                rpc__cn_prep_next_iovector_elmt (call_rep, status);
240                cur_iov_index ++;
241            }
242        }
243
244        /*
245         * Copy the minimum of:
246         *   1) what will fit into current fragment,
247         *   2) number of bytes left to transfer,
248         *   3) remaining bytes left before we reach max_seg_size.
249         */
250        xfer_size = RPC_CN_CREP_FREE_BYTES (call_rep);
251        bytes_to_segment_size = call_rep->max_seg_size -
252                                RPC_CN_CREP_ACC_BYTCNT (call_rep);
253        if (xfer_size > bytes_to_segment_size)
254        {
255            xfer_size = bytes_to_segment_size;
256        }
257        if (xfer_size > bytes_left_to_xfer)
258        {
259            xfer_size = bytes_left_to_xfer;
260        }
261
262        memcpy (RPC_CN_CREP_FREE_BYTE_PTR (call_rep), src, xfer_size);
263        bytes_left_to_xfer -= xfer_size;
264        RPC_CN_CREP_ACC_BYTCNT (call_rep) += xfer_size;
265        src += xfer_size;
266        RPC_CN_CREP_FREE_BYTE_PTR (call_rep) += xfer_size;
267        RPC_CN_CREP_FREE_BYTES (call_rep) -= xfer_size;
268        RPC_CN_CREP_IOV (call_rep) [cur_iov_index].data_len +=
269            xfer_size;
270    }
271}
272
273
274/*
275**++
276**
277**  ROUTINE NAME:       rpc__cn_add_new_iovector_elmt
278**
279**  SCOPE:              PRIVATE
280**
281**  DESCRIPTION:
282**
283**  Append the specified buffer as a new iovector element to
284**  the iovector in the call rep.  This routine will transfer
285**  data as necessary over the association until the total
286**  accumulated data in the call rep is less than the
287**  negotiated segment size.
288**
289**  INPUTS:
290**
291**      call_rep        The call rep.
292**
293**      iovector_elmt   The iovector element describing the
294**                      data to add.
295**
296**  INPUTS/OUTPUTS:     none
297**
298**  OUTPUTS:
299**
300**      status          The completion status
301**
302**  IMPLICIT INPUTS:    none
303**
304**  IMPLICIT OUTPUTS:   none
305**
306**  FUNCTION VALUE:     none
307**
308**  SIDE EFFECTS:       none
309**
310**--
311**/
312
313PRIVATE void rpc__cn_add_new_iovector_elmt
314(
315  rpc_cn_call_rep_p_t     call_rep,
316  rpc_iovector_elt_p_t    iov_elt_p,
317  unsigned32              *status
318)
319{
320    unsigned32              bytes_to_segment_size;
321    unsigned32              cur_iov_index;
322    rpc_iovector_elt_p_t    iov_p;
323
324    *status = rpc_s_ok;
325
326    /*
327     * If the current iovector element is full and we've
328     * reached the end of our iovector, send what we
329     * have accumulated so far.
330     */
331    if (RPC_CN_CREP_IOVLEN (call_rep) >= RPC_C_MAX_IOVEC_LEN)
332    {
333        /*
334         * Transmit all the data buffered thus far.
335         */
336        rpc__cn_transmit_buffers (call_rep, status);
337        rpc__cn_dealloc_buffered_data (call_rep);
338
339        /*
340         * Fix up the iovector in the call rep so
341         * that we again have only the cached protocol
342         * header (and no stub data).
343         */
344        RPC_CN_FREE_ALL_EXCEPT_PROT_HDR (call_rep);
345        if (*status != rpc_s_ok)
346        {
347            return;
348        }
349    }
350
351    /*
352     * At this point, we know that there is at least one
353     * iovector element available for us to use in the
354     * call rep.  There may be other elements between
355     * this and the first element (containing the protocol
356     * header).
357     */
358
359    /*
360     * Fill in a new iovector element.
361     */
362    RPC_CN_CREP_IOVLEN (call_rep)++;
363    cur_iov_index = ++RPC_CN_CREP_CUR_IOV_INDX (call_rep);
364    RPC_CN_CREP_FREE_BYTES (call_rep) = 0;
365
366    iov_p = &(RPC_CN_CREP_IOV (call_rep)[cur_iov_index]);
367    *iov_p = *iov_elt_p;
368
369    /*
370     * If the new iovector element causes the total amount
371     * of buffered data to exceed our max segment size,
372     * transmit chunks of data from the current iovector
373     * element until the total remaining size (including
374     * protocol header) is less than our segment size.
375     */
376    bytes_to_segment_size = call_rep->max_seg_size -
377                            RPC_CN_CREP_ACC_BYTCNT (call_rep);
378
379    /*
380     * Only invoke rpc__cn_transmit_buffers() if iov_elt_p->data_len is
381     * greater than bytes_to_segment_size.
382     */
383    while (iov_elt_p->data_len > bytes_to_segment_size)
384    {
385        /*
386         * Adjust the new iovector element to reflect only
387         * enough data that can fit into current segment;
388         * send it.
389         */
390        iov_p->data_len = bytes_to_segment_size;
391        RPC_CN_CREP_ACC_BYTCNT (call_rep) += bytes_to_segment_size;
392        rpc__cn_transmit_buffers (call_rep, status);
393        if (*status != rpc_s_ok)
394        {
395            /*
396             * Fix up the iovector in the call rep so
397             * that we again have only the cached protocol
398             * header (and no stub data).
399             */
400            rpc__cn_dealloc_buffered_data (call_rep);
401            RPC_CN_FREE_ALL_EXCEPT_PROT_HDR (call_rep);
402            return;
403        }
404        iov_elt_p->data_len -= bytes_to_segment_size;
405        iov_elt_p->data_addr += bytes_to_segment_size;
406
407        /*
408         * Deallocate all the buffers except the 1st and last.
409         * Then adjust iovector so that we have only 2 elements:
410         * the header plus the current stub data.
411         */
412        if (RPC_CN_CREP_IOVLEN (call_rep) > (call_rep->sec != NULL) ? 3 : 2)
413        {
414            /*
415             * rpc__cn_dealloc_buffered_data will always skip the
416             * first iovector element (protocol header).  Decrementing
417             * iovlen will cause it to skip the last element also.
418             */
419            RPC_CN_CREP_IOVLEN (call_rep) --;
420            rpc__cn_dealloc_buffered_data (call_rep);
421
422            /*
423             * Now we rebuild the iovector.  It will have only
424             * 2 elements: the header, plus the iovector element
425             * which we are processing.
426             */
427            RPC_CN_CREP_IOVLEN (call_rep) = (call_rep->sec != NULL) ? 3 : 2;
428            RPC_CN_CREP_CUR_IOV_INDX (call_rep) = 1;
429            iov_p = &(RPC_CN_CREP_IOV (call_rep)[1]);
430        }
431
432        /*
433         * Now logically, the only data is the header.
434         * We are going to chain on the next iovector element
435         * during the next iteration of this while loop.
436         */
437        RPC_CN_CREP_ACC_BYTCNT (call_rep) = RPC_CN_CREP_SIZEOF_HDR (call_rep);
438        RPC_CN_CREP_IOV(call_rep)[0].data_len =
439                RPC_CN_CREP_SIZEOF_HDR (call_rep);
440
441        *iov_p = *iov_elt_p;
442
443        bytes_to_segment_size = call_rep->max_seg_size -
444                            RPC_CN_CREP_ACC_BYTCNT (call_rep);
445
446    }
447
448    /*
449     * At this point, the iovector element added (plus the
450     * header) cannot exceed the max segment size.
451     */
452
453    /* If we started out with an iovector element whose
454     * size (combined with the header) is a multiple of
455     * our segment size, then the newly added iovector
456     * element would have length = 0 (since all the data
457     * would have been transmitted in the while loop).
458     * Free the element in this case.
459     *
460     */
461    if (iov_p->data_len == 0)
462    {
463        if (iov_p->buff_dealloc != (rpc_buff_dealloc_fn_t) NULL)
464        {
465            (iov_p->buff_dealloc) (iov_p->buff_addr);
466        }
467        RPC_CN_CREP_IOVLEN (call_rep) --;
468        RPC_CN_CREP_CUR_IOV_INDX (call_rep) --;
469    }
470    else
471    {
472        /*
473         * Update the total bytecount to account for the new
474         * iovector element.
475         */
476        RPC_CN_CREP_ACC_BYTCNT (call_rep) += iov_elt_p->data_len;
477
478        /*
479         * Set free bytes to 0 so that we would allocate
480         * a new iovector element next time instead of
481         * copying data past the end of the current iovector
482         * element.
483         */
484        RPC_CN_CREP_FREE_BYTES (call_rep) = 0;
485    }
486}
487
488#if 0
489
490/*
491**++
492**
493**  ROUTINE NAME:       rpc__cn_flush_buffers
494**
495**  SCOPE:              PRIVATE - declared in cnxfer.h
496**
497**  DESCRIPTION:
498**
499**  Transmit a final fragment if any or all of the iovector element
500**  buffers would have to be copied as indicated by the make "reusable"
501**  bit. A final fragment can only be sent if the total number of bytes
502**  is greater than the RT->stub guaranteed minumum. The data sent must
503**  be a mutiple of 8 bytes.
504**
505**  INPUTS:
506**
507**      call_rep        The call rep.
508**
509**  INPUTS/OUTPUTS:     none
510**
511**  OUTPUTS:
512**
513**      status          The completion status
514**
515**  IMPLICIT INPUTS:    none
516**
517**  IMPLICIT OUTPUTS:   none
518**
519**  FUNCTION VALUE:     none
520**
521**  SIDE EFFECTS:       none
522**
523**--
524**/
525
526PRIVATE void rpc__cn_flush_buffers
527(
528  rpc_cn_call_rep_p_t     call_rep,
529  unsigned32              *status
530)
531{
532    unsigned32          i;
533    rpc_iovector_elt_t  *iov_p;
534
535    *status = rpc_s_ok;
536
537    if (RPC_CN_CREP_ACC_BYTCNT (call_rep) >= rpc_c_assoc_must_recv_frag_size)
538    {
539        /*
540         * There's enough data to do another send.
541         */
542        rpc__cn_transmit_buffers (call_rep, status);
543        rpc__cn_dealloc_buffered_data (call_rep);
544        RPC_CN_FREE_ALL_EXCEPT_PROT_HDR (call_rep);
545    }
546    else
547    {
548        /*
549         * There's not enough data to send. Copy all that's
550         * buffered.
551         */
552        for (i = 1;
553             i < RPC_CN_CREP_IOVLEN (call_rep);
554             i++)
555        {
556            rpc__cn_copy_buffer (call_rep,
557                                 &RPC_CN_CREP_IOV (call_rep)[i],
558                                 status);
559        }
560    }
561}
562#endif /* 0 */
563
564
565/*
566**++
567**
568**  ROUTINE NAME:       rpc__cn_transmit_buffers
569**
570**  SCOPE:              PRIVATE
571**
572**  DESCRIPTION:
573**
574**  Transmits the data buffered in the call rep's iovector
575**  over the association.
576**
577**  INPUTS:
578**
579**      call_rep        The call rep.
580**
581**  INPUTS/OUTPUTS:     none
582**
583**  OUTPUTS:
584**
585**      status          The completion status
586**
587**  IMPLICIT INPUTS:    none
588**
589**  IMPLICIT OUTPUTS:   none
590**
591**  FUNCTION VALUE:     none
592**
593**  SIDE EFFECTS:       none
594**
595**--
596**/
597
598PRIVATE void rpc__cn_transmit_buffers
599(
600 rpc_cn_call_rep_p_t     call_rep,
601 unsigned32              *status
602)
603{
604    rpc_cn_packet_p_t   header_p;
605
606    /*
607     * Write the bytecount accumulated thus far into the fragment
608     * length field of the cached protocol header.
609     */
610    *status = rpc_s_ok;
611    header_p = (rpc_cn_packet_p_t) RPC_CN_CREP_SEND_HDR (call_rep);
612    RPC_CN_PKT_FRAG_LEN (header_p) = RPC_CN_CREP_ACC_BYTCNT (call_rep);
613
614    /*
615     * Set the alloc hint; appears that NetApp's RPC implementation
616     * depends on this.
617     * Three possible cases to deal with
618     * 1) call_rep->alloc_hint is not 0 - just use that value
619     * 2) call_rep->alloc_hint is 0 AND both first/last frag flags set -
620            calculate alloc_hint
621     * 3) call_rep->alloc_hint is 0 AND both first/last frag flags not set -
622            leave alloc_hint at 0 so its ignored and print warning
623            since this should not happen.
624     */
625    if (call_rep->alloc_hint != 0)
626    {
627        RPC_CN_PKT_ALLOC_HINT (header_p) = call_rep->alloc_hint;
628        call_rep->alloc_hint -= (RPC_CN_CREP_ACC_BYTCNT (call_rep) - RPC_CN_CREP_SIZEOF_HDR (call_rep));
629    }
630    else
631    {
632        if ( (RPC_CN_PKT_FLAGS (header_p) & RPC_C_CN_FLAGS_FIRST_FRAG) &&
633            (RPC_CN_PKT_FLAGS (header_p) & RPC_C_CN_FLAGS_LAST_FRAG) )
634        {
635            /* single fragment being set, so we can calculate alloc_hint */
636            RPC_CN_PKT_ALLOC_HINT (header_p) = RPC_CN_CREP_ACC_BYTCNT (call_rep) -
637                RPC_CN_CREP_SIZEOF_HDR (call_rep);
638        }
639        else
640        {
641            /* not a single fragment, yet call_rep->alloc_hint is 0
642             print out a warning as this should be fixed */
643            RPC_DBG_PRINTF (rpc_es_dbg_general, RPC_C_CN_DBG_GENERAL,
644                            ("(rpc__cn_transmit_buffers) setting alloc_hint is 0\n"));
645        }
646    }
647
648    if (RPC_CALL_IS_CLIENT (((rpc_call_rep_t *) call_rep)))
649    {
650        /*
651         * Check for pending cancels if sending a request. Set the flag
652         * in the request header to forward the cancel if there is one
653         * pending and this is the first fragment of the request.
654         */
655        if (RPC_CN_PKT_FLAGS (header_p) & RPC_C_CN_FLAGS_FIRST_FRAG)
656        {
657            if (call_rep->u.client.cancel.local_count)
658            {
659                RPC_DBG_PRINTF (rpc_e_dbg_cancel, RPC_C_CN_DBG_CANCEL,
660                               ("(rpc__cn_transmit_buffers) setting alert pending bit in request header for queued cancel\n"));
661                RPC_CN_PKT_FLAGS (header_p) |= RPC_C_CN_FLAGS_ALERT_PENDING;
662                call_rep->u.client.cancel.local_count--;
663            }
664            else
665            {
666                DCETHREAD_TRY
667                {
668                    dcethread_checkinterrupt ();
669                }
670                DCETHREAD_CATCH (dcethread_interrupt_e)
671                {
672                    RPC_DBG_PRINTF (rpc_e_dbg_cancel, RPC_C_CN_DBG_CANCEL,
673                                   ("(rpc__cn_transmit_buffers) setting alert pending bit in request header for cancel just detected\n"));
674                    RPC_CN_PKT_FLAGS (header_p) |= RPC_C_CN_FLAGS_ALERT_PENDING;
675                    rpc__cn_call_start_cancel_timer (call_rep, status);
676                }
677                DCETHREAD_ENDTRY
678            }
679            if (*status != rpc_s_ok)
680            {
681                return;
682            }
683        }
684        RPC_DBG_PRINTF (rpc_e_dbg_cancel, RPC_C_CN_DBG_CANCEL,
685                       ("(rpc__cn_transmit_buffers) setting flag indicating first frag has been sent\n"));
686        call_rep->u.client.cancel.server_is_accepting = true;
687        call_rep->num_pkts = 0;
688    }
689
690    /*
691     * If security was requested attach the authentication trailer
692     * to the last iovector element. Make sure to add padding, if
693     * required to the stub data to ensure the trailer starts on a
694     * 4-byte boundary.
695     */
696    if (call_rep->sec != NULL)
697    {
698        rpc_iovector_elt_p_t    iov_p;
699        rpc_cn_auth_tlr_t       *auth_tlr;
700
701        /*
702         * Remove the authentication trailer size from the header
703         * iovector element. This was added by
704         * RPC_CN_CREP_ADJ_IOV_FOR_TLR.
705         */
706        (RPC_CN_CREP_IOV(call_rep)[0]).data_len -= call_rep->prot_tlr->data_size;
707
708        /*
709         * Now adjust some fields in the auth trailer. The auth
710         * trailer must start on a 4-byte boundary. Pad the user, or
711         * stub, data to make it so. The amount of padding is
712         * contained in the auth trailer so that the receiver can
713         * determine the real user data size.
714         */
715        auth_tlr = (rpc_cn_auth_tlr_t *)call_rep->prot_tlr->data_p;
716        auth_tlr->stub_pad_length =
717            (4 - ((RPC_CN_CREP_ACC_BYTCNT (call_rep) -
718                   call_rep->prot_tlr->data_size) & 0x03)) & 0x03;
719        (RPC_CN_CREP_IOV(call_rep)[RPC_CN_CREP_IOVLEN(call_rep) - 2]).data_len +=
720                                   auth_tlr->stub_pad_length;
721        RPC_CN_PKT_FRAG_LEN (header_p) +=
722            auth_tlr->stub_pad_length -
723            RPC_CN_CREP_SIZEOF_TLR_PAD (call_rep);
724
725        /*
726         * Hook the auth trailer iovector element after the last
727         * iovector element.
728         */
729        iov_p = &(RPC_CN_CREP_IOV(call_rep)[RPC_CN_CREP_IOVLEN(call_rep) - 1]);
730        iov_p->buff_dealloc = NULL;
731        iov_p->data_len =
732            call_rep->prot_tlr->data_size -
733            RPC_CN_CREP_SIZEOF_TLR_PAD (call_rep) ;
734        iov_p->data_addr = (byte_p_t) call_rep->prot_tlr->data_p;
735    }
736
737    /*
738     * Send the buffers in the iovector out over the association.
739     */
740    rpc__cn_assoc_send_frag (call_rep->assoc,
741                             &(call_rep->buffered_output.iov),
742                             call_rep->sec,
743                             status);
744
745    /*
746     * Clear the first frag flag bit in the cached protocol header
747     * so that subsequent packets will not have the bit set.
748     */
749    RPC_CN_PKT_FLAGS (header_p) &= ~RPC_C_CN_FLAGS_FIRST_FRAG;
750
751    /*
752     * Update the count of packets sent and received for this call.
753     */
754    call_rep->num_pkts++;
755}
756
757
758/*
759**++
760**
761**  ROUTINE NAME:       rpc__cn_prep_iovector_elmt
762**
763**  SCOPE:              INTERNAL
764**
765**  DESCRIPTION:
766**
767**  Prepare a new iovector element from the call rep for use
768**  in buffering data.
769**
770**  INPUTS:
771**
772**      call_rep        The call rep.
773**
774**
775**  INPUTS/OUTPUTS:     none
776**
777**  OUTPUTS:
778**
779**      status          The completion status
780**
781**  IMPLICIT INPUTS:    none
782**
783**  IMPLICIT OUTPUTS:   none
784**
785**  FUNCTION VALUE:     none
786**
787**  SIDE EFFECTS:       none
788**
789**--
790**/
791
792INTERNAL void rpc__cn_prep_next_iovector_elmt
793(
794  rpc_cn_call_rep_p_t     call_rep,
795  unsigned32              *status
796)
797{
798    unsigned32              cur_iov_index;
799    rpc_iovector_elt_p_t    iov_p;
800    rpc_cn_fragbuf_p_t      buf_p;
801
802    /*
803     * Allocate a new [large] fragment buffer.
804     */
805    buf_p = rpc__cn_fragbuf_alloc (true);
806    if (buf_p == NULL)
807    {
808        *status = rpc_s_no_memory;
809        return;
810    }
811
812    /*
813     * Make the next iovector element point to it.
814     * Initialize pointers.
815     */
816    RPC_CN_CREP_IOVLEN (call_rep) ++;
817    cur_iov_index = ++ (RPC_CN_CREP_CUR_IOV_INDX (call_rep));
818    iov_p = &(RPC_CN_CREP_IOV (call_rep)[cur_iov_index]);
819    iov_p->buff_dealloc = (rpc_buff_dealloc_fn_t)buf_p->fragbuf_dealloc;
820    iov_p->buff_addr = (byte_p_t) buf_p;
821    iov_p->buff_len = buf_p->max_data_size;
822    iov_p->data_addr = (byte_p_t) buf_p->data_p;
823    iov_p->data_len = 0;
824
825    RPC_CN_CREP_FREE_BYTES (call_rep) = rpc_g_cn_large_frag_size;
826    RPC_CN_CREP_FREE_BYTE_PTR (call_rep) = (byte_p_t) buf_p->data_p;
827    *status = rpc_s_ok;
828
829}
830
831/*
832**++
833**
834**  ROUTINE NAME:       rpc__cn_dealloc_buffered_data
835**
836**  SCOPE:              PRIVATE
837**
838**  DESCRIPTION:
839**
840**  Deallocates all the elements of an iovector (except for the
841**  first element).  The first element is assumed to contain the
842**  protocol header which will be reused on subsequent transfers.
843**
844**  NOTE that this routine does not adjust any of the data
845**  pointers (cur_iov_indx, iovlen, etc.) in the call rep.
846**  This is done so that the caller can have better control.
847**
848**  INPUTS:
849**
850**      call_rep        The call rep.
851**
852**  INPUTS/OUTPUTS:     none
853**
854**  OUTPUTS:            none
855**
856**  IMPLICIT INPUTS:    none
857**
858**  IMPLICIT OUTPUTS:   none
859**
860**  FUNCTION VALUE:     none
861**
862**  SIDE EFFECTS:       none
863**
864**--
865**/
866
867PRIVATE void rpc__cn_dealloc_buffered_data
868(
869  rpc_cn_call_rep_p_t     call_rep
870)
871{
872    unsigned32      cur_iov_index;
873    unsigned32      iov_elmnts;
874
875    iov_elmnts = RPC_CN_CREP_IOVLEN (call_rep);
876    /*
877     * If authenticated RPC is used, the last iovector
878     * element is the auth trailer.
879     * Don't free it; it is freed explicitly in call_end.
880     */
881    if (call_rep->sec != NULL)
882    {
883	iov_elmnts--;
884    }
885
886    for (cur_iov_index = 1;
887         cur_iov_index < iov_elmnts;
888         cur_iov_index++)
889    {
890        if (RPC_CN_CREP_IOV (call_rep) [cur_iov_index].buff_dealloc != NULL)
891        {
892            (RPC_CN_CREP_IOV (call_rep) [cur_iov_index].buff_dealloc)
893                (RPC_CN_CREP_IOV (call_rep) [cur_iov_index].buff_addr);
894        }
895        RPC_CN_CREP_IOV (call_rep) [cur_iov_index].buff_addr = NULL;
896    }
897}
898
899/*
900 **++
901 **
902 **  ROUTINE NAME:       rpc__cn_get_alloc_hint
903 **
904 **  SCOPE:              PRIVATE
905 **
906 **  DESCRIPTION:
907 **
908 **  Walks through all the iov elements and adds up their data lengths.
909 **  The total amount is the alloc_hint.
910 **
911 **  INPUTS:
912 **
913 **      stub_data_p     iovector containing data to be added up
914 **
915 **  INPUTS/OUTPUTS:     none
916 **
917 **  OUTPUTS:            none
918 **
919 **  IMPLICIT INPUTS:    none
920 **
921 **  IMPLICIT OUTPUTS:   none
922 **
923 **  FUNCTION VALUE:     sum of the data lengths which is alloc_hint
924 **
925 **  SIDE EFFECTS:       none
926 **
927 **--
928 **/
929
930PRIVATE unsigned32 rpc__cn_get_alloc_hint
931(
932 rpc_iovector_p_t stub_data_p
933)
934{
935    rpc_iovector_elt_p_t iov_elt_p;
936    unsigned32 i;
937    unsigned32 alloc_hint = 0;
938
939    for (i = 0, iov_elt_p = stub_data_p->elt;
940         i < stub_data_p->num_elt;
941         i++, iov_elt_p++)
942    {
943        alloc_hint += iov_elt_p->data_len;
944    }
945
946    return (alloc_hint);
947}
948