1/*
2 * Copyright (c) 2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * 1.  Redistributions of source code must retain the above copyright
11 *     notice, this list of conditions and the following disclaimer.
12 * 2.  Redistributions in binary form must reproduce the above copyright
13 *     notice, this list of conditions and the following disclaimer in the
14 *     documentation and/or other materials provided with the distribution.
15 * 3.  Neither the name of Apple Inc. ("Apple") nor the names of its
16 *     contributors may be used to endorse or promote products derived from
17 *     this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * Portions of this software have been released under the following terms:
31 *
32 * (c) Copyright 1989-1993 OPEN SOFTWARE FOUNDATION, INC.
33 * (c) Copyright 1989-1993 HEWLETT-PACKARD COMPANY
34 * (c) Copyright 1989-1993 DIGITAL EQUIPMENT CORPORATION
35 *
36 * To anyone who acknowledges that this file is provided "AS IS"
37 * without any express or implied warranty:
38 * permission to use, copy, modify, and distribute this file for any
39 * purpose is hereby granted without fee, provided that the above
40 * copyright notices and this notice appears in all source code copies,
41 * and that none of the names of Open Software Foundation, Inc., Hewlett-
42 * Packard Company or Digital Equipment Corporation be used
43 * in advertising or publicity pertaining to distribution of the software
44 * without specific, written prior permission.  Neither Open Software
45 * Foundation, Inc., Hewlett-Packard Company nor Digital
46 * Equipment Corporation makes any representations about the suitability
47 * of this software for any purpose.
48 *
49 * Copyright (c) 2007, Novell, Inc. All rights reserved.
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 *
54 * 1.  Redistributions of source code must retain the above copyright
55 *     notice, this list of conditions and the following disclaimer.
56 * 2.  Redistributions in binary form must reproduce the above copyright
57 *     notice, this list of conditions and the following disclaimer in the
58 *     documentation and/or other materials provided with the distribution.
59 * 3.  Neither the name of Novell Inc. nor the names of its contributors
60 *     may be used to endorse or promote products derived from this
61 *     this software without specific prior written permission.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
64 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
65 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
66 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY
67 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
68 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
69 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
70 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
71 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
72 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
73 *
74 * @APPLE_LICENSE_HEADER_END@
75 */
76
77/*
78**  NAME:
79**
80**      ernodtbl.c
81**
82**  FACILITY:
83**
84**      IDL Stub Runtime Support
85**
86**  ABSTRACT:
87**
88**      Node table (address <=> node number) maintenance
89**
90**  VERSION: DCE 1.0
91*/
92#if HAVE_CONFIG_H
93#include <config.h>
94#endif
95
96/*
97OVERALL OVERVIEW OF NODE MANAGEMENT ROUTINES AND STRUCTURES
98
99The RPC IDL compiler terms chunks of memory pointed to by [ptr] pointer
100parameters "nodes".  One of the primary datastructures used to manage these
101nodes is a tree.  The elements of trees are commonly called "nodes".
102Therfore, whenever I think the possibility of confusion exists, I will call
103the chunks passed in as parameters, "RPC nodes", and I will call the
104elements of the tree, "tree nodes".
105
106This module provides routines and data structures for node management,
107including alias detection and preservation, and deleted node
108tracking (added for DCE V1.1).  In order to do this, some basic capabilities
109are needed:
110        Associating a node number with an address,
111        Looking up a node by address,
112        Looking up a node by node number,
113        Tracking nodes which are deleted during a call, as opposed
114        to simply being orphaned.
115
116This code does not handle nodes which are partially overlapping.
117
118The fundamental structure used in tracking nodes is named
119"rpc_ss_private_node_table_t". It contains:
120        "rpc_ss_ptr_array_p_t", a pointer to the root node in a tree
121                used to convert from node numbers to node addresses,
122        "rpc_ss_hash_entry_t", a hash table used to convert from node
123                addresses to node numbers
124        "rpc_ss_deleted_nodes_t *", a pointer to a list of pointers
125                to deleted nodes.
126        several other support cells.
127
128STRUCTURES USED TO MAP NODE NUMBERS TO NODE ADDRESSES
129
130Historical note:
131
132When I initially designed and wrote this module, I envisioned the
133structure used to map from node-number to node-address as a
134multi-dimensional array which added dimensions as it needed to store more.
135In retrospect, I understand that it is simply an n-ary tree, which adds
136a new superior node and pushes down the previous tree as a child of the
137new (root) node.  Because people are more comfortable with trees that change
138topology than they are with arrays whose dimensionality changes, I will
139describe it as a tree in this commentary.  For maintainability, the module
140might want to be updated at some point, changing the <mumble>_array names
141into names that reflect the tree-ness of the structure (and changing the
142small amount of commentary already in the module).
143
144End of historical note.
145
146The defined constant "rpc_ss_node_array_size" defines n in n-ary for this
147tree.  In this discussion I will use rpc_ss_node_array_size == 3, which
148happens to be its value when using the internal debugging driver for this
149module.
150
151Each node in the tree holds rpc_ss_node_array_size node addresses if a
152leaf node, or the same number of pointers to child nodes if not a leaf.
153Each node is actually an array of unions of pointers to void and pointers
154to tree nodes.  (Module type rpc_ss_ptr_array_t.)
155When the primary node tracking structure (rpc_ss_pvt_node_table_t) is
156created in rpc_ss_node_table_init, it points to a tree consisting of a
157single node.
158
159            +----------+
160            |          |  rpc_ss_pvt_node_table_t
161            +----------+
162                    |
163                    v
164           +-----+-----+-----+
165           |  1  |  2  |  3  |   rpc_ss_ptr_array_t
166           +-----+-----+-----+
167
168Each cell in the rpc_ss_ptr_array_t can point to a node, so this simple
169initial structure can suffice for the first 3 nodes that are marshalled.
170(Actually, of course, the first rpc_ss_node_array_size nodes.)  When the
171fourth (e.g.) node is marshalled, we are out of room, and need to expand
172mapping structures.  We do this by inserting a new (root) node between the
173existing tree node and the rpc_ss_pvt_node_table_t structure.
174
175            +----------+
176            |          |  rpc_ss_pvt_node_table_t
177            +----------+
178                    |
179                    v
180           +-----+-----+-----+
181           | 1-3 | 4-6 | 7-9 |   rpc_ss_ptr_array_t
182           +-----+-----+-----+
183              |
184              v
185     +-----+-----+-----+
186     |  1  |  2  |  3  |   rpc_ss_ptr_array_t
187     +-----+-----+-----+
188
189Now, we can add a new node as a sibling to our original node, to map nodes
1904 through 6, and eventually nodes 7 through 9.  Thus, with just one level
191of indirection to the leaf nodes, we can handle (rpc_ss_node_array_size
192squared) nodes.
193
194It should be easy to see that when we need to process the 10th node in our
195example we will again be out of room, and of course we expand the
196structure again by adding another level.  The structure now looks like this:
197
198             +----------+
199             |          |  rpc_ss_pvt_node_table_t
200             +----------+
201                     |
202                     v
203            +-----+-----+-----+
204            | 1-9 |10-18|19-27|   rpc_ss_ptr_array_t
205            +-----+-----+-----+
206               |
207               v
208      +-----+-----+-----+
209      | 1-3 | 4-6 | 7-9 |   rpc_ss_ptr_array_t
210      +-----+-----+-----+
211         |
212         v
213+-----+-----+-----+
214|  1  |  2  |  3  |   rpc_ss_ptr_array_t
215+-----+-----+-----+
216
217It is important to note that this structure can be sparse.  Our discussion
218has used an example with node numbers incrementing from one, as indeed nodes
219are numbered on an original call.  However, it is entirely possible in a
220chained call that the first node marshalled is node 27 (e.g.).  In this case,
221only those tree nodes needed to map to node 27 are created.  The other cells
222point to NULL, until a node that requires another tree node is processed.
223
224             +----------+
225             |          |  rpc_ss_pvt_node_table_t
226             +----------+
227                     |
228                     v
229            +-----+-----+-----+
230            | 1-9 |10-18|19-27|   rpc_ss_ptr_array_t
231            +-----+-----+-----+
232                           |
233                           v
234                  +-----+-----+-----+
235                  |19-21|22-24|25-27|   rpc_ss_ptr_array_t
236                  +-----+-----+-----+
237                                 |
238                                 v
239                        +-----+-----+-----+
240                        |  25 |  26 |  27 |   rpc_ss_ptr_array_t
241                        +-----+-----+-----+
242
243It is important to note that leaves are always at the same depth throughout
244the tree, and that all intervening tree nodes at the same level map the same
245number of RPC nodes. (i.e., one level down, each tree node is capable of
246mapping 9 nodes.)
247
248STRUCTURES USED TO MAP NODE ADDRESSES TO NODE NUMBERS
249
250Well, after all that, we can map node numbers into addresses of nodes.
251However, the RPC system is presented with addresses of user data to pass
252to a remote system.  For that we need to map the other way.
253
254We use a hash table that uses chaining to resolve conflicts.  The hash
255algorithm is: shift right 5 bits (to accommodate malloc, which generally
256returns chunks aligned to some boundary), and then masked to the size
257of the hash table, currently 256 slots long.
258
259The size of the table is arbitrary, but depends on two factors.  If it is
260smaller, we get more conflicts and realize less advantage from having
261a hash table at all.  However, the hash table is simply an array of
262rpc_ss_hash_entry_t, whose size is 16 bytes on 32-bit address systems.
263Since the entire array has to be nulled out during the init routine,
264rpc_ss_init_node_table, we don't want it to be too large--some customers
265(notably Sun Microsystems) have noticed the time it takes to zero this
266structure and asked us to reduce it.
267
268The structure rpc_ss_hash_entry_t contains
269        - a next pointer to implement a single-linked list for hash bucket
270          conflict resolution
271        - the node number
272        - the node address
273        - a flag that the node has been marshalled
274        - a flag that the node has been unmarshalled
275        - a pad to bring the actual size of the structure to the size
276          returned by sizeof(rpc_ss_hash_entry_t) (=16)
277
278COMMENTS FOR IMPROVING THE HASH TABLE
279
280There are two fundamental problems with the current hash system:
281        - it's slow to initialize
282        - it's slow to use
283
284This section describes changes to address these problems.
285
286First and foremost, we need to implement a high speed hash table
287instrumentation system, so we can see the hash table utilization in
288practice.  Is the hash table too small?  Is the hash algorithm appropriate?
289Are the chains too long?  We don't know!
290
291Above, we talked about the tension between wanting to increase the size
292of the hash table, and wanting to decrease its size.  The hash table is
293currently an array of hash entries, allocating extra hash entries if it
294needs to chain as a result of a collision. So the first change:
295
296Change the hash table contained in the rpc_ss_node_table_t into an array
297of POINTERS to rpc_ss_hash_entry_t, rather than an array of the entries
298themselves. This makes the array one fourth the size, and should cut
299its initialization time by 4.  This also allows us to make the hash table
300512 or 1024 entries if we find it necessary, without getting performance
301significantly worse than it is now (better, for the 512 case).
302
303Second change: allocate 10, 50, 100... (choose appropriate n) hash entries
304at once from the allocator. Keep the address of the array of hash table
305entries and the index of the last one used in the rpc_ss_node_table_t.  Then
306allocating a hash entry is simply a matter of bumping a count, until we need
307another batch.  This does not need to happen at the creation of the node table,
308and this array does not have to be zeroed at allocation. The node table can
309be initialized such that the first hash entry allocation will result in a
310chunk allocation with no extra code. These chunks do not have to be kept track
311of after all the entries in the chunk have been used, so they do not need to
312be chained.  Simply allocate a new one, point to it from the
313rpc_ss_node_table_t, and forget about the old one.
314
315Third change: there are many times we need to scan the list of hash entries
316linked off the hash entry array.  Currently, this is a linear unordered list,
317so the average walk length is 1/2 the length of the list if the entry is
318found and the length of the list if the entry is not found.  Since every node
319is looked up before it is inserted (to find out if it has to be inserted), we
320have a large miss rate (full length walks).  This code could be much faster
321if the collision resolution were a [balanced] binary tree rather than a
322linked list.  There is currently binary tree code in compiler module
323nametbl.c, that could be adapted to this purpose.  The tree would have to be
324balanced occasionally, since there will likely be non-random insertion.
325Note that if this change is made, the technique used to delete addresses
326from the hash table needs to be changed.  Currently, there are several
327sites in the module that zero out the address field in the hash entry to
328delete it.  That doesn't work in a binary tree, because the value is used
329to navigate the tree.  I suggest that we use one of the two pad bytes to
330implement a "deleted" flag.  This means that the same value could be in the
331tree several times, but that's OK.  Only one of them could be not "deleted".
332
333END OF COMMENTS FOR IMPROVING THE HASH TABLE
334
335ROUTINE DESCRIPTIONS FOR MANAGING THE NUMBER-TO-ADDRESS TREE
336
337Routine rpc_ss_expand_array recursively adds enough superior tree nodes until
338the tree is capable of mapping at least the node number passed in.  This is
339an internal support routine, called only by other routines in this module.
340It does not actually record any node information in the tree, it just makes
341the tree deep enough to accomodate the node with a specified number.
342
343The parameters are:
344        array - actually a pointer to a pointer to the root node in the tree.
345                In practice, this means that the address of a cell in the
346                rpc_ss_pvt_node_table_t is passed in.  This has to be indirect
347                because we will write the address of the new tree node in this
348                cell.
349        currently_mapped - The number of RPC nodes capable of being mapped
350                by a tree of this depth. (Twenty seven in our overview
351                example above.)  Note that this is NOT the number of nodes
352                currently being managed by the structure.
353        depth - The number of levels currently in the tree.
354        num -   The number of the RPC node that we are preparing to map.
355                When we are done, currently_mapped >= num.
356        p_mem_h - a pointer to a memory handle for the run time's memory
357                management system.
358
359The routine allocates a new tree node, pointed to by cell t_array, zeros
360it out and points the zeroth element at the existing tree.  Then it places
361the new node's address in the passed-in tree root pointer.  It updates the
362depth and currently_mapped cells and finally checks that currently_mapped >=
363num.  If not, it recurses. (Does that mean that the first call is a curse?)
364
365This routine does not populate the subtree that will be needed to actually
366manage the node about to be added.  That happens in the next routine,
367rpc_ss_register_node_num.
368
369Routine rpc_ss_register_node_num is an internal support routine, called only
370by other routines in this module.  It actually places the node address
371information in the tree in a way that associates it with the nude's number.
372
373The parameters are:
374        root_array - A pointer to a pointer to the tree's root.
375        currently_mapped - a pointer to the cell recording the number of
376                RPC nodes mapped by the tree.
377        root_depth - a pointer to the cell recording the depth of the tree.
378        num - the numberof the node to be registered.
379        ptr - a pointer to the RPC node to be registered.  This is the
380                value that will ultimately be stored in the tree.
381        p_mem_h - a pointer to a memory handle for the run time's memory
382                management system.
383
384First, the routine checks that the tree is deep enough to
385accommodate the node to be registered (=managed), and if not, it calls the
386previous routine, rpc_ss_expand_array.  One call is guaranteed to make
387the tree deep enough, no matter what node number is passed in (unless an
388exception is raised due to lack of memory).
389
390The routine then walks the tree to the leaf level in a loop, updating local
391copies of array (tree root), depth and mapped (number of RPC nodes mapped by
392the (sub-) tree). It also uses the parameter num as a local, since it was
393passed by value resulting in a local copy already. Each time it drops a level
394in the tree, it has to re-map the node number to be an offset in the range
395of node numbers mapped by this portion of the tree.  (At the top level,
396the node number is already a one-based offset into the range of node numbers
397mapped, since at the top level, all the node numbers are mapped.)
398Let's look at an example.
399
400Remember our early example of three levels, mapping node numbers from 1 - 27?
401Here it is again:
402
403             +----------+
404             |          |  rpc_ss_pvt_node_table_t
405             +----------+
406                     |
407                     v
408            +-----+-----+-----+
409            | 1-9 |10-18|19-27|   rpc_ss_ptr_array_t
410            +-----+-----+-----+
411               |
412               v
413      +-----+-----+-----+
414      | 1-3 | 4-6 | 7-9 |   rpc_ss_ptr_array_t
415      +-----+-----+-----+
416         |
417         v
418+-----+-----+-----+
419|  1  |  2  |  3  |   rpc_ss_ptr_array_t
420+-----+-----+-----+
421
422The address of the cell in the rpc_ss_pvt_node_table_t which points
423to the tree is passed in to the routine.  Let's say we are looking for node
424six.
425
426We have the following state:
427        array = & of the root node
428        depth = 3
429        mapped = 27
430
431First, we find the number of RPC nodes mapped by each tree node in the level
432below this one, so we can figure out which array cell in this level.
433
434     mapped = mapped / rpc_ss_node_array_size; = 27 / 3 = 9
435
436This lets us find the index to use, by using integer division:
437
438     index = (num-1) / mapped; = 5 / 9 = 0
439
440Then we update the node number (offset) to be an offset into the subtree we
441are about to walk to, the subtree pointed to by the selected index at this
442level (0).
443
444     num = num - (index * mapped); = 6 - ( 0 * 9 ) = 6;
445
446OK, so in this example the first iteration didn't change the offset.  This
447will be true anytime we are walking the tree through the zeroth index.
448
449Now we are ready to descend a level in the tree, but first we have to make
450sure it is there.  We check, and yes, the tree node mapping RPC nodes 1 - 9
451is present. Therefore, we point our local cell "array" at the subtree,
452decrement depth and head back up to the top of the loop.
453
454Current state:
455        array = & tree node that maps RPC nodes 1 - 9
456        depth = 2
457        mapped = 9
458        num = 6
459
460Go through the same machinations as before:
461        mapped = mapped / rpc_ss_node_array_size; = 9 / 3 = 3
462        index = (num-1) / mapped; = 5 / 3 = 1;
463        num = num - (index * mapped); = 6 - (1*3) = 3
464
465Again, we're ready to descend a level, and we check and find there is NO NODE!
466So we allocate one, clear it out, and hook it into the node we are currently
467working on.  The tree now looks like this:
468
469                     +----------+
470                     |          |  rpc_ss_pvt_node_table_t
471                     +----------+
472                             |
473                             v
474                    +-----+-----+-----+
475                    | 1-9 |10-18|19-27|   rpc_ss_ptr_array_t
476                    +-----+-----+-----+
477                       |
478                       v
479              +-----+-----+-----+
480              | 1-3 | 4-6 | 7-9 |   rpc_ss_ptr_array_t
481              +-----+-----+-----+
482                 |     |
483                 v     v
484+-----+-----+-----+   +-----+-----+-----+
485|  1  |  2  |  3  |   |  4  |  5  |  6  |  rpc_ss_ptr_array_t (2 of 'em!)
486+-----+-----+-----+   +-----+-----+-----+
487
488After we hook in the new node, we point to it from our temp cell "array",
489and once again decrement depth.
490
491We pop back up to the top of the loop, with the current state:
492
493        array = & of tree node mapping RPC nodes 4 - 6
494        depth = 1
495        mapped = 3
496        num = 3
497
498The loop terminates if depth <= 1 (it will never be < 1, unless there is a
499bug somewhere...), so we are done with the loop.
500
501Now we simply index into our leaf tree node using the new value in num,
502our offset into the node number range mapped in this node (3), and store
503the parameter ptr, which is the address of the RPC node we wish to register.
504
505While this description is rather long, it should be noted that in practice
506this tree is very flat, therefore, we execute the loop a small number of
507times. The loop is executed (depth-1) times, and the tree stores
508rpc_ss_node_array_size to the depth power RPC nodes. At the time this is
509written (21-Mar-93), the production value for rpc_ss_node_array_size is 50,
510so a tree with depth 3 stores 125,000 RPC nodes, and navigating this tree
511requires just two trips through the loop.
512
513These are the only two internal routines that manipulate the node tree.
514The remaining routines that use the node tree are used by the RPC runtime
515or stubs.
516
517Routine rpc_ss_lookup_node_by_num takes a node number and returns the address
518of the corresponding RPC node if the node has been registered, otherwise it
519returns NULL.  The parameters are:
520        tab - an rpc_ss_node_table_t, really a pointer to void, which gets
521                mapped to a pointer to an rpc_ss_pvt_node_table_t, the real
522                node table structure.
523        num - the number of the node to be looked up.
524
525First the routine makes two fast "no-brainer" checks--special conditions
526that mean the pointer is NULL:
527
528If the node number is zero, that indicates a NULL pointer was passed.
529If the node number is greater than the tree currently maps, then the node
530can't possibly be in the tree, so don't look for it.  Return NULL.
531
532Now, we perform a tree walk exactly as described for routine
533rpc_ss_register_node_num, except that if we reach a child pointer that is
534NULL, we return NULL, rather than allocating a child node.
535When we reach a leaf node (depth == 1), return the content of the leaf node
536array indexed by the offset (number).
537
538That is the last of the routines that ONLY manipulate the tree.  Now we
539will look at the few routines that only manipulate the hash table.  Then we
540will look at those that manipulate the two of them together.
541
542ROUTINES THAT MANIPULATE THE HASH TABLE
543
544Macro get_hash_chain_head implements the hash algorithm described above.
545
546Routine rpc_ss_find_hash_entry takes two parameters:
547        - str, the node table
548        - ptr, the address whose hash entry we are trying to find
549
550The routine finds the head of the hash chain for the address and then loops
551through the entries in the chain until it finds the match or the end of the
552chain.
553
554The interesting thing about this routine is that it always returns a hash
555entry--but the returned entry may not match the input.  If this happens, it
556means the address is not registered in the hash table.
557
558This routine is currently a significant performance bottleneck if there are
559a large (> 1000) number of nodes being passed in the interface, or if the
560hash distribution is not good (i.e., anytime there are a large number of
561hash conflicts).  This is because it performs a walk of a linear list, the
562chain used to resolve hash conflicts.  Currently we do not have any way
563of gathering info about the length of the lists in actual use.  This is a
564problem that needs to be resolved.
565
566Well, that concludes the routines that manipulate ONLY the hash table.
567The remainder of the routines in the module manipulate both.
568
569Routine rpc_ss_register_node_by_num is the fundamental routine that registers
570the correspondence between an RPC node's address and its node number.  Even
571though it is in the **EXTERNAL ROUTINES** section, it is an internal routine.
572
573MAINTENANCE NOTE***
574Routine rpc_ss_register_node_by_num should be moved to the internal support
575routines section.
576END MAINTENANCE NOTE***
577
578Its parameters are:
579        tab -- The address of the node table
580        num -- the node number, and
581        ptr -- the node's address.
582
583First, we make sure that the "high water mark" for node numbers is at least
584as high as this one.  This assures us that we will not assign potentially
585overlapping node numbers during the call.  Then we place an entry in the
586hash table and in the node number tree.  The callers of this routine have
587already made sure that the node is not already registered.
588
589This is one of the places that would need to be changed if we were to make
590any of the changes to the hash table recommended above.
591
592Routine rpc_ss_register_node is a marshalling assist routine.  It has four
593parameters:
594        tab -- the address of the node table
595        ptr -- the address of the node being considered for marshalling
596        marshalling -- a flag indicating that we are actually in the process
597               of marshalling
598        has_been_marshalled -- an [out] flag indicating that the node has
599               already been marshalled in a previous call.
600
601The routine returns as a long, the node number of the node, whether or not
602it has been marshalled previously.
603
604The first step is to find out if the node is already registered by calling
605rpc_ss_lookup_node_by_ptr.  If it has been, and if we are in the process
606of marshalling, then we look up whether it has already been marshalled.
607Then we set the marshalled flag for this node to true.  (Currently, we only
608do that if it is not already set to true, but it is more efficient to do it
609unconditionally--should be changed in the future.)   Routine
610rpc_ss_lookup_node_by_ptr's description (below) describes a desireable
611change to make this routine more efficient.  Make sure you see it.
612If the node was already registered, we're done... simply return the node
613number.
614
615If it wasn't already registered, this is the first time we have seen this
616address (more accurately, this node...  we may have seen this address before
617when it represented another node that was deleted.  On deletion, its registry
618was erased).  We increment the highest seen node number and use that as the
619number for this node, and call rpc_ss_register_node_by_num.  If we are in the
620process of marshalling, we mark the node as marshalled in the hash table
621entry, but as NOT already marshalled in the [out] parameter.  This means that
622we will marshall it this time, but not subsequent times.  Then we return the
623node number and exit.
624
625Routine rpc_ss_unregister_node is called when a node is deleted to remove
626the node number <-> address mapping.  It's parameters are:
627        tab -- the address of the node table
628        ptr -- the address of the node being deleted
629
630The interesting fact for this routine is that we ONLY remove the address from
631the hash table.  We do nothing with the node number tree. This is because we
632are  concerned about having a new node with the same address as a previous
633(deleted) node NOT be a match.  The first time we see the new node, its
634address won't be in the hash table (since this routine deleted the former
635node's registration), and therefore it will get a new node number.
636If we add new capabilities (callbacks) in which we are concerned about
637getting a false match by node number, then we would have to revisit
638this routine and also take care of the node number tree.
639
640We remove the address from the hash table by simply NULLing our the address
641field of the hash entry.  We don't bother unlinking and deallocating the
642entry.  This has an implication for the hash table if we make the chains
643into a binary tree.  Since the tree is navigated by key value, we can't
644null out one of the key values.  We either have to really delete the node
645from the tree, or we have to flag the tree node as being for a deleted RPC
646node.
647
648Routine rpc_ss_lookup_node_by_ptr is used to map from a pointer to a node
649number. Its parameters are:
650        tab -- the address of the node table
651        ptr -- the address of the RPC node to look up
652
653This routine operates  in a relatively straightforward way.  It performs a
654rpc_ss_find_hash_entry, and if the lookup succeeds, it pulls the node number
655out of the hash entry.  If the lookup in the hash table fails, this routine
656returns node number 0, a reserved node number used to indicate the NULL
657pointer.
658
659Here is the performance change mentioned above:  This routine is used by
660other routines in this module, and is followed by a call to
661rpc_ss_find_hash_entry--which this routine just did!  Therefore, a desired
662change would be to have an additional [out] parameter to pass back the
663hash entry.  Before doing this, we need to find out if the stubs ever
664accessed this routine from generated code, or if it is used only by the
665stub support library.  If there is any possibility that there is stub code
666out there that references this routine, then we need to clone the routine
667and modify the clone.
668
669On to a group of three very similar routines.  They are
670        rpc_ss_return_pointer_to_node
671        rpc_ss_lookup_pointer_to_node and
672        rpc_ss_inquire_pointer_to_node
673
674These routines are used in the unmarshalling process.  They take in a node
675number and return a node address.  The distinction between the routines, and
676particularly the usage of ...lookup_pointer_to_node are not well understood
677by me.  Be VERY CAREFUL if you are mucking with these routines.
678
679The last routine in this module, other than the stand alone test harness,
680is rpc_ss_init_node_table.  Its parameters are:
681        tab -- an [out] parameter, the address of the node table
682        p_mem_h -- the address of the memory handle
683
684This routine allocates a new node table and initializes it (mostly to
685zero/NULL).  It pre-expands the node number tree to be one level deep
686(assuming that we'll be marshalling at least one node--this doesn't really
687have to be done now).
688*/
689
690/*
691 * Define HASH_STATS to monitor hash table efficiency.
692 * Define UNIT_TEST_ERNODTBL to build stand-alone for unit testing.
693 * Define DEBUG to create a file containing most significant pointer alias events.
694 */
695#ifdef UNIT_TEST_ERNODTBL
696#   define  DCETHREAD_RAISE(x)                puts("Error")
697#   define  rpc_ss_mem_alloc(h, x)  (char *)malloc(x)
698#   define  rpc_void_p_t            char *
699#   define  byte_p_t                char *
700#   define  rpc_ss_node_table_t     byte_p_t
701#   define  rpc_ss_mem_handle       long
702#   define  NULL                    0
703#   define  idl_true                1
704#   define  idl_false               0
705#else
706#   include <dce/idlddefs.h>
707#   include <ndrmi.h>
708#   include <ndrui.h>
709#   include <lsysdep.h>
710#endif
711
712#if defined UNIT_TEST_ERNODTBL || defined DEBUG
713#   include <stdio.h>
714#endif
715
716#ifdef PERFMON
717#include <dce/idl_log.h>
718#endif
719
720#ifdef DEBUG
721    static  int ALIAS_DEBUG = idl_false;
722#   define  DTOPEN          if (ALIAS_DEBUG && !trace_fid) trace_fid = fopen("alias_trace.dmp", "w")
723#   define  DTRACE(ARGS)    if (!trace_fid); else fprintf ARGS
724    static  FILE            *trace_fid = NULL;
725#else
726#   define  DTOPEN          do {;} while (0) /* DTOPEN */
727#   define  DTRACE(ARGS)    do {;} while (0) /* DTRACE */
728#endif
729
730/******************************************************************************/
731/*                                                                            */
732/*              Internal Data structures for node table                       */
733/*                                                                            */
734/******************************************************************************/
735
736#ifdef UNIT_TEST_ERNODTBL
737#define rpc_ss_node_array_size 3
738#else
739#define rpc_ss_node_array_size 50   /* The expansion factor.  With fewer    */
740                                    /* than this nodes lookup is single     */
741                                    /* index operation                      */
742#endif
743
744/*
745 *  Definition of the chain of blocks of deleted nodes used for
746 *  [reflect_deletions]
747 */
748#define RPC_SS_DELETED_NODES_SIZE 2048
749
750typedef struct rpc_ss_deleted_nodes_t {
751    struct rpc_ss_deleted_nodes_t *next;
752    idl_ulong_int node_count;   /* Number of node numbers in this block */
753    idl_ulong_int node_numbers[RPC_SS_DELETED_NODES_SIZE];
754} rpc_ss_deleted_nodes_t;
755
756/*
757 * Entry that contains information about a node address.  These are hung in
758 * a linked list off of the hash table.  The hash table is used to lookup a
759 * node number give a node address.
760 */
761typedef struct rpc_ss_hash_entry_t {
762    struct rpc_ss_hash_entry_t *next;   /* Single linked list of hash entries */
763                                        /* rooted into the has array          */
764
765    byte_p_t ptr;                       /* node address                       */
766
767    idl_ulong_int node;                 /* node number for the address        */
768
769    unsigned char marshalled;           /* True if node has been marshalled   */
770
771    unsigned char unmarshalled;         /* True if node has been unmarshalled */
772
773    short reserved;                     /* Pad out to an even number of bytes */
774} rpc_ss_hash_entry_t;
775
776/*
777 *  Multilevel array which enables quick indexed lookup of a node address give
778 *  a node number.  At the leaf level it contains the node address, at higher
779 *  levels it points to another array. Note that the union defined here
780 *  consists of a single pointer. The type of the pointer's target is
781 *  determined by the union arm selected
782 */
783typedef union rpc_ss_ptr_array_element_t {
784    byte_p_t void_ptr;                              /* node address         */
785
786    union rpc_ss_ptr_array_element_t *array_ptr;    /* next level in array  */
787} rpc_ss_ptr_array_element_t;
788
789typedef rpc_ss_ptr_array_element_t rpc_ss_ptr_array_t [rpc_ss_node_array_size];
790
791typedef rpc_ss_ptr_array_t * rpc_ss_ptr_array_p_t;
792
793/*
794 * Actual node table structure which is completely a self-contained description of
795 * a node number <=> node address mapping.
796 */
797#define RPC_SS_NODE_HASH_TABLE_SIZE 256
798               /* This value must be a power of 2, because of the way the
799                    hash function is defined */
800
801typedef struct rpc_ss_pvt_node_table_t {
802    rpc_ss_ptr_array_p_t array;             /* multi level array used to    */
803                                            /* lookup an address give a     */
804                                            /* node number                  */
805
806    rpc_ss_hash_entry_t hash_table[RPC_SS_NODE_HASH_TABLE_SIZE];
807                                            /* Hash table used to lookup    */
808                                            /* the node number associated   */
809                                            /* with an address and          */
810                                            /* information on               */
811                                            /* marshalling/unmarshalling    */
812                                            /* status                       */
813
814    long depth;                             /* Number of levels in the      */
815                                            /* multi-level array            */
816
817    long currently_mapped;                  /* Number of entry currently    */
818                                            /* available in the multi-level */
819                                            /* array for storing mappings   */
820
821    idl_ulong_int high_num;                 /* Highest node number used     */
822
823    rpc_ss_mem_handle *mem_h;               /* Pointer to the mem handle    */
824                                            /* used to free up local        */
825                                            /* storage at the end of a call */
826
827    rpc_ss_deleted_nodes_t *deletes_list;   /* Pointer to a chain of blocks */
828                                            /* containing idents of deleted */
829                                            /* nodes                        */
830
831    idl_boolean deletes_reflected;          /* TRUE => list of deleted      */
832                                            /* nodes is being maintained    */
833} rpc_ss_pvt_node_table_t;
834
835/******************************************************************************/
836/*                                                                            */
837/*                        Internal Utility Macros                             */
838/*                                                                            */
839/******************************************************************************/
840
841/*
842**++
843** Find the head of the hash chain for a pointer address.
844**--
845*/
846#define get_hash_chain_head(ghch_str,ghch_ptr) \
847&(ghch_str->hash_table[(((long)ghch_ptr>>5) & (RPC_SS_NODE_HASH_TABLE_SIZE-1))])
848
849/******************************************************************************/
850/*                                                                            */
851/*                        Internal Support Routines                           */
852/*                                                                            */
853/******************************************************************************/
854
855/*
856**++
857**  FUNCTIONAL DESCRIPTION:
858**
859**      rpc_ss_expand_array
860**
861**          This internal support routine will take the specified array and
862**          push it a level down in the multilevel array and allocate a new
863**          higher level in the array.
864**
865**  FORMAL PARAMETERS:
866**
867**      array -- A segment of the multilevel array
868**      currently_mapped -- Number of nodes mapped into the multilevel array
869**      depth -- Number of level in the multilevel array
870**      num -- Node number that will be inserted after the expand
871**      p_mem_h -- Local memory management handle
872**
873**  RETURN VALUE:
874**
875**      None
876**
877**--
878*/
879static void rpc_ss_expand_array
880(
881    rpc_ss_ptr_array_p_t * array,
882    long *currently_mapped,
883    long *depth,
884    long num,
885    rpc_ss_mem_handle * p_mem_h
886)
887{
888    /*
889    ** This routine is called when we need to make the node number to pointer
890    ** mapping array another level (dimension) deeper. It allocates a new
891    ** top-level array, and points to the previous top-level from the zeroth
892    ** element.
893    */
894    rpc_ss_ptr_array_p_t t_array;
895
896#ifdef PERFMON
897    RPC_SS_EXPAND_ARRAY_N;
898#endif
899
900    t_array = (rpc_ss_ptr_array_p_t) rpc_ss_mem_alloc (
901              p_mem_h, sizeof(rpc_ss_ptr_array_t));
902
903    /*
904    ** Clear out the new storage
905    */
906    memset( *t_array, 0, sizeof(rpc_ss_ptr_array_t));
907
908    /*
909    ** Add another level of indirection.
910    */
911    (*t_array)[0].array_ptr = (union rpc_ss_ptr_array_element_t*)*array;
912    *array = t_array;
913
914    /*
915    ** We're now more deeply nested, and map more node numbers.
916    */
917    (*depth)++;
918    *currently_mapped = *currently_mapped * rpc_ss_node_array_size;
919
920    DTRACE((
921        trace_fid, "Expanding: array=%p depth=%ld, mapped=%ld\n",
922        array, *depth, *currently_mapped
923    ));
924
925    if (*currently_mapped < num)
926        rpc_ss_expand_array (array, currently_mapped, depth, num, p_mem_h);
927#ifdef PERFMON
928    RPC_SS_EXPAND_ARRAY_X;
929#endif
930
931}
932
933/*
934**++
935**  FUNCTIONAL DESCRIPTION:
936**
937**      rpc_ss_register_node_num
938**
939**          This internal support routine expands the multilevel array to the
940**          proper size to contain the specified node number and then
941**          inserts the specified node number and node address pair into the
942**          node table.
943**
944**  FORMAL PARAMETERS:
945**
946**      root_array -- A segment of the multilevel array
947**      currently_mapped -- Number of nodes mapped into the multilevel array
948**      root_depth -- Number of level in the multilevel array
949**      num -- Node number to be registered
950**      ptr -- Node address to be registered
951**      p_mem_h -- Memory handle to use for memory allocation when needed
952**
953**  RETURN VALUE:
954**
955**      None
956**
957**--
958*/
959static void rpc_ss_register_node_num
960(
961    rpc_ss_ptr_array_p_t *root_array,
962    long *currently_mapped,
963    long *root_depth,
964    long num,
965    byte_p_t ptr,
966    rpc_ss_mem_handle * p_mem_h
967)
968{
969    rpc_ss_ptr_array_p_t array;
970    rpc_ss_ptr_array_p_t t_array;
971    long mapped;
972    long index;
973    long depth;
974
975#ifdef PERFMON
976    RPC_SS_REGISTER_NODE_NUM_N;
977#endif
978
979    if (*currently_mapped < num)
980        rpc_ss_expand_array (root_array, currently_mapped,
981                             root_depth, num, p_mem_h);
982
983    array = *root_array;
984    depth = *root_depth;
985    mapped = *currently_mapped;
986
987    /* The logic in the following loop must parallel the logic of the
988        corresponding loop in rpc_ss_lookup_node_by_num */
989    while (depth > 1)
990    {
991        mapped = mapped / rpc_ss_node_array_size;
992        index = (num-1) / mapped;
993        num = num - (index * mapped);
994
995        if (((*array)[index]).array_ptr == 0)
996        {
997            t_array = (rpc_ss_ptr_array_p_t) rpc_ss_mem_alloc (
998                       p_mem_h, sizeof(rpc_ss_ptr_array_t));
999
1000            memset( *t_array, 0, sizeof(rpc_ss_ptr_array_t));
1001
1002            ((*array)[index]).array_ptr =
1003                     (union rpc_ss_ptr_array_element_t*)t_array;
1004        }
1005
1006        array = (rpc_ss_ptr_array_p_t)((*array)[index]).array_ptr;
1007        depth = depth - 1;
1008    }
1009    ((*array)[num-1]).void_ptr = ptr;
1010#ifdef PERFMON
1011    RPC_SS_REGISTER_NODE_NUM_X;
1012#endif
1013
1014}
1015
1016/*
1017**++
1018**  FUNCTIONAL DESCRIPTION:
1019**
1020**      rpc_ss_find_hash_entry
1021**
1022**          This internal support routine returns the hash entry associated
1023**          with a node address.  If the specified node address is found in the
1024**          hash table, its hash entry is returned otherwise the last hash
1025**          entry in the list is returned.
1026**
1027**  FORMAL PARAMETERS:
1028**
1029**      tab -- Node table
1030**      ptr -- Node address to lookup
1031**
1032**  RETURN VALUE:
1033**
1034**      pointer to hash entry (matching or last in list)
1035**
1036**--
1037*/
1038static rpc_ss_hash_entry_t *rpc_ss_find_hash_entry
1039(
1040    rpc_ss_pvt_node_table_t *str,
1041    byte_p_t ptr
1042)
1043{
1044    rpc_ss_hash_entry_t *hash_entry;
1045
1046#ifdef PERFMON
1047    RPC_SS_FIND_HASH_ENTRY_N;
1048#endif
1049
1050    hash_entry = get_hash_chain_head (str, ptr);
1051
1052    while ((hash_entry->ptr != ptr) && (hash_entry->next))
1053        hash_entry = hash_entry->next;
1054
1055#ifdef PERFMON
1056    RPC_SS_FIND_HASH_ENTRY_X;
1057#endif
1058
1059    return hash_entry;
1060}
1061
1062/*
1063**++
1064**  FUNCTIONAL DESCRIPTION:
1065**
1066**      rpc_ss_register_node_by_num
1067**
1068**          This routine registers the association between the specified node
1069**          number and node address.
1070**
1071**  FORMAL PARAMETERS:
1072**
1073**      tab -- Node table
1074**      num -- Node number to be registerd
1075**      ptr -- Node address to be registered
1076**
1077**  RETURN VALUE:
1078**
1079**      None
1080**
1081**--
1082*/
1083static void rpc_ss_register_node_by_num
1084(
1085    rpc_ss_node_table_t tab,
1086    idl_ulong_int num,
1087    byte_p_t ptr
1088)
1089{
1090    rpc_ss_hash_entry_t *temp;
1091    rpc_ss_hash_entry_t *hash_entry;
1092    rpc_ss_pvt_node_table_t *str;
1093
1094#ifdef PERFMON
1095    RPC_SS_REGISTER_NODE_BY_NUM_N;
1096#endif
1097
1098    str = (rpc_ss_pvt_node_table_t*) tab;
1099
1100    /*
1101    ** Keep track of the highest node number seen.
1102    */
1103    if (num > str->high_num) str->high_num = num;
1104
1105    /*
1106    ** Place an entry in the pointer to node number table, a hash table.
1107    */
1108    hash_entry = get_hash_chain_head (str, ptr);
1109
1110    if (hash_entry->node != 0)
1111    {
1112        temp = hash_entry;
1113        hash_entry = (rpc_ss_hash_entry_t*) rpc_ss_mem_alloc (str->mem_h, sizeof(rpc_ss_hash_entry_t));
1114
1115        hash_entry->next = temp->next;
1116        temp->next = hash_entry;
1117    }
1118
1119    hash_entry->node = num;
1120    hash_entry->ptr = ptr;
1121    hash_entry->marshalled = idl_false;
1122    hash_entry->unmarshalled = idl_false;
1123
1124#ifdef HASH_STATS
1125    printf ("Hash value: %03d Hash chain:", (((long)ptr>>5) & 0xff));
1126    for (temp = get_hash_chain_head (str, ptr); temp; temp=temp->next)
1127        printf (" %lx", temp->ptr);
1128    printf ("\n");
1129#endif
1130
1131    /*
1132    ** Place an entry in the node number to pointer table, a
1133    ** varying depth, multi-level array.
1134    */
1135    rpc_ss_register_node_num ( &str->array,
1136                               &str->currently_mapped,
1137                               &str->depth, num, ptr,
1138                               str->mem_h );
1139#ifdef PERFMON
1140    RPC_SS_REGISTER_NODE_BY_NUM_X;
1141#endif
1142
1143}
1144
1145/*
1146**++
1147**  FUNCTIONAL DESCRIPTION:
1148**
1149**      rpc_ss_lookup_node_by_ptr
1150**
1151**          This routine returns the node number associated with a node
1152**          address.  If the specfied node address is not registered it returns
1153**          node number 0.
1154**
1155**  FORMAL PARAMETERS:
1156**
1157**      tab -- Node table
1158**      ptr -- Node address to lookup
1159**      p_p_hash_entry -- [out] pointer to the hash entry in which the node
1160**                          number was found
1161**
1162**  RETURN VALUE:
1163**
1164**      Node number associated with address or 0 if no association
1165**
1166**--
1167*/
1168static idl_ulong_int rpc_ss_lookup_node_by_ptr
1169(
1170    rpc_ss_node_table_t tab,
1171    byte_p_t ptr,
1172    rpc_ss_hash_entry_t **p_p_hash_entry
1173)
1174{
1175    rpc_ss_hash_entry_t *hash_entry;
1176    idl_ulong_int node;
1177    rpc_ss_pvt_node_table_t * str;
1178
1179#ifdef PERFMON
1180    RPC_SS_LOOKUP_NODE_BY_PTR_N;
1181#endif
1182
1183    str = (rpc_ss_pvt_node_table_t *) tab;
1184
1185    hash_entry = rpc_ss_find_hash_entry (str, ptr);
1186
1187    if (hash_entry->ptr == ptr)
1188        node = hash_entry->node;
1189    else
1190        node = 0;
1191
1192#ifdef PERFMON
1193    RPC_SS_LOOKUP_NODE_BY_PTR_X;
1194#endif
1195
1196    *p_p_hash_entry = hash_entry;
1197    return node;
1198}
1199
1200/******************************************************************************/
1201/*                                                                            */
1202/*  Add a node number to the list of deleted nodes                            */
1203/*  New routine for DCE V1.1                                                  */
1204/*                                                                            */
1205/******************************************************************************/
1206static void rpc_ss_add_delete_to_list
1207(
1208    idl_ulong_int node_number,
1209    rpc_ss_pvt_node_table_t *p_node_table
1210)
1211{
1212    rpc_ss_deleted_nodes_t *p_delete_block;
1213
1214    if (p_node_table->deletes_list == NULL)
1215    {
1216        /* No delete list exists. Start the first block */
1217        p_delete_block = (rpc_ss_deleted_nodes_t *)rpc_ss_mem_alloc(
1218                                    p_node_table->mem_h,
1219                                    sizeof(rpc_ss_deleted_nodes_t));
1220        p_delete_block->next = NULL;
1221        p_delete_block->node_count = 0;
1222        p_node_table->deletes_list = p_delete_block;
1223    }
1224    else if (p_node_table->deletes_list->node_count==RPC_SS_DELETED_NODES_SIZE)
1225    {
1226        /* All existing blocks in the delete list are full.
1227            Add a new one to the head of the chain */
1228        p_delete_block = (rpc_ss_deleted_nodes_t *)rpc_ss_mem_alloc(
1229                                    p_node_table->mem_h,
1230                                    sizeof(rpc_ss_deleted_nodes_t));
1231        p_delete_block->next = p_node_table->deletes_list;
1232        p_delete_block->node_count = 0;
1233        p_node_table->deletes_list = p_delete_block;
1234    }
1235    else
1236    {
1237        /* Use the partly-filled block at the head of the list */
1238        p_delete_block = p_node_table->deletes_list;
1239    }
1240
1241    p_delete_block->node_numbers[p_delete_block->node_count] = node_number;
1242    (p_delete_block->node_count)++;
1243}
1244
1245/******************************************************************************/
1246/*                                                                            */
1247/*                            External Routines                               */
1248/*                                                                            */
1249/******************************************************************************/
1250
1251/******************************************************************************/
1252/*                                                                            */
1253/*  Enable [reflect_deletions] in the node table                              */
1254/*  New routine for DCE V1.1                                                  */
1255/*                                                                            */
1256/******************************************************************************/
1257void rpc_ss_enable_reflect_deletes
1258(
1259    rpc_ss_node_table_t tab
1260)
1261{
1262    rpc_ss_pvt_node_table_t * str;
1263
1264    str = (rpc_ss_pvt_node_table_t *) tab;
1265    str->deletes_reflected = idl_true;
1266}
1267
1268/******************************************************************************/
1269/*                                                                            */
1270/*  Marshall a list of deleted nodes                                          */
1271/*  New routine for DCE V1.1                                                  */
1272/*                                                                            */
1273/******************************************************************************/
1274void rpc_ss_ndr_marsh_deletes
1275(
1276    IDL_msp_t IDL_msp
1277)
1278{
1279    rpc_ss_pvt_node_table_t *p_node_table;
1280    idl_ulong_int delete_count = 0;
1281    rpc_ss_deleted_nodes_t *p_delete_block;
1282
1283    p_node_table = (rpc_ss_pvt_node_table_t *)
1284                                            IDL_msp->IDL_mem_handle.node_table;
1285
1286    if (p_node_table != NULL)
1287    {
1288        for (p_delete_block = p_node_table->deletes_list;
1289             p_delete_block != NULL;
1290             p_delete_block = p_delete_block->next)
1291        {
1292            delete_count += p_delete_block->node_count;
1293        }
1294    }
1295
1296    /* Marshall the Z-value for the conformant array */
1297    IDL_MARSH_ULONG(&delete_count);
1298    if (delete_count == 0)
1299        return;
1300
1301    /* Marshall the blocks of node numbers */
1302    for (p_delete_block = p_node_table->deletes_list;
1303         p_delete_block != NULL;
1304         p_delete_block = p_delete_block->next)
1305    {
1306#ifdef PACKED_SCALAR_ARRAYS
1307        rpc_ss_ndr_marsh_by_pointing(p_delete_block->node_count, 4,
1308                                     (rpc_void_p_t)p_delete_block->node_numbers,
1309                                     IDL_msp);
1310#else
1311        rpc_ss_ndr_marsh_by_looping(p_delete_block->node_count, IDL_DT_ULONG,
1312                                    (rpc_void_p_t)p_delete_block->node_numbers,
1313                                    4, 0, IDL_msp);
1314#endif
1315    }
1316}
1317
1318/******************************************************************************/
1319/*                                                                            */
1320/*  Unmarshall a list of deleted nodes and delete the nodes                   */
1321/*  New routine for DCE V1.1                                                  */
1322/*                                                                            */
1323/******************************************************************************/
1324void rpc_ss_ndr_unmar_deletes
1325(
1326    IDL_msp_t IDL_msp
1327)
1328{
1329    idl_ulong_int delete_count = 0;
1330    idl_ulong_int *delete_list;
1331    unsigned32 i;
1332    rpc_void_p_t node_to_delete;
1333
1334    /* Get size of delete list */
1335    IDL_UNMAR_ULONG(&delete_count);
1336    if (delete_count == 0)
1337        return;
1338
1339    /* Get the delete list */
1340    delete_list = (idl_ulong_int *)rpc_ss_mem_alloc(&IDL_msp->IDL_mem_handle,
1341                                    delete_count * sizeof(idl_ulong_int));
1342#ifdef PACKED_SCALAR_ARRAYS
1343    rpc_ss_ndr_unmar_by_copying(delete_count, 4, (rpc_void_p_t)delete_list,
1344                                IDL_msp);
1345#else
1346    rpc_ss_ndr_unmar_by_looping(delete_count, IDL_DT_ULONG,
1347                                (rpc_void_p_t)delete_list, 4, 0, IDL_msp);
1348#endif
1349
1350    /* Apply the deletes */
1351    for (i=0; i<delete_count; i++)
1352    {
1353        node_to_delete = (rpc_void_p_t)rpc_ss_lookup_node_by_num(
1354                                        IDL_msp->IDL_mem_handle.node_table,
1355                                        delete_list[i]);
1356	rpc_allocator_free(&IDL_msp->IDL_allocator, node_to_delete);
1357    }
1358}
1359
1360/*
1361**++
1362**  FUNCTIONAL DESCRIPTION:
1363**
1364**      rpc_ss_lookup_node_by_num
1365**
1366**          This routine returns the address of a node associated with the
1367**          specified node number.  If the node number is not yet registered, then
1368**          NULL is returned.
1369**
1370**  FORMAL PARAMETERS:
1371**
1372**      tab -- Node table
1373**      num -- Node number to be looked up
1374**
1375**  RETURN VALUE:
1376**
1377**      node address or NULL if unknown
1378**
1379**--
1380*/
1381byte_p_t rpc_ss_lookup_node_by_num
1382(
1383    rpc_ss_node_table_t tab,
1384    unsigned long num
1385)
1386{
1387    unsigned long mapped;
1388    long depth;
1389    long index;
1390    rpc_ss_ptr_array_p_t array;
1391    rpc_ss_pvt_node_table_t * str;
1392
1393#ifdef PERFMON
1394    RPC_SS_LOOKUP_NODE_BY_NUM_N;
1395#endif
1396
1397    /* Node number 0 is reserved for NULL pointers */
1398    if (num == 0)
1399    {
1400
1401#ifdef PERFMON
1402        RPC_SS_LOOKUP_NODE_BY_NUM_X;
1403#endif
1404
1405        return (byte_p_t) NULL;
1406    }
1407
1408    str = (rpc_ss_pvt_node_table_t *) tab;
1409    mapped = str->currently_mapped;
1410
1411    /* Make sure the table is large enough to do a lookup */
1412    if (mapped < num)
1413    {
1414
1415#ifdef PERFMON
1416        RPC_SS_LOOKUP_NODE_BY_NUM_X;
1417#endif
1418
1419        return (byte_p_t) NULL;
1420    }
1421
1422    array = (str->array);
1423    depth = str->depth;
1424
1425    /* The logic in the following loop must parallel the logic of the
1426        corresponding loop in rpc_ss_register_node_by_num */
1427    while (depth > 1)
1428    {
1429        mapped = mapped / rpc_ss_node_array_size;
1430        index = (num-1) / mapped;
1431
1432        if (((*array)[index]).array_ptr == 0)
1433	{
1434
1435#ifdef PERFMON
1436            RPC_SS_LOOKUP_NODE_BY_NUM_X;
1437#endif
1438
1439            return (byte_p_t) NULL;
1440	}
1441        num = num - (index * mapped);
1442        array = (rpc_ss_ptr_array_p_t)((*array)[index]).array_ptr;
1443        depth = depth - 1;
1444    }
1445
1446#ifdef PERFMON
1447    RPC_SS_LOOKUP_NODE_BY_NUM_X;
1448#endif
1449
1450    return ((*array)[num-1]).void_ptr;
1451}
1452
1453/*
1454**++
1455**  FUNCTIONAL DESCRIPTION:
1456**
1457**      rpc_ss_register_node
1458**
1459**          This routine returns the node number associated with the specified
1460**          node address.  If the node address was not yet registered, then the
1461**          next highest node number will be associated with the specified
1462**          address and returned.
1463**
1464**          The intended usage for this routine is during marshalling.  If the
1465**          marshalling flag is set then the routine will either return the
1466**          node number associated with this address (if it had previously been
1467**          registered) and set the has_been_marshalled flag true, or assign
1468**          a node number to this address and mark it as having been marshalled.
1469**
1470**  FORMAL PARAMETERS:
1471**
1472**      tab -- Node table
1473**      ptr -- Node address
1474**      marshalling -- Flag that indicates that the node will be marshalled if necessary
1475**      has_been_marshalled -- [out] true if node has already been marshalled
1476**
1477**  RETURN VALUE:
1478**
1479**      node number associated with the specified address
1480**
1481**--
1482*/
1483idl_ulong_int rpc_ss_register_node
1484(
1485    rpc_ss_node_table_t tab,
1486    byte_p_t ptr,
1487    long marshalling,
1488    long *has_been_marshalled
1489)
1490{
1491    idl_ulong_int num;
1492    rpc_ss_pvt_node_table_t * str;
1493    rpc_ss_hash_entry_t *hash_entry;
1494
1495#ifdef PERFMON
1496    RPC_SS_REGISTER_NODE_N;
1497#endif
1498
1499    /*
1500    ** Return node number 0 for NULL
1501    */
1502    if (ptr == NULL)
1503    {
1504
1505#ifdef PERFMON
1506        RPC_SS_REGISTER_NODE_X;
1507#endif
1508
1509        return 0;
1510    }
1511
1512    str = (rpc_ss_pvt_node_table_t *) tab;
1513
1514    /*
1515    ** Find out if this node is already registered.  If so return its number.
1516    */
1517    if ((num = rpc_ss_lookup_node_by_ptr (tab, ptr, &hash_entry)) != 0)
1518    {
1519        if (marshalling)
1520        {
1521            *has_been_marshalled = hash_entry->marshalled;
1522            hash_entry->marshalled = idl_true;
1523        }
1524
1525#ifdef PERFMON
1526    RPC_SS_REGISTER_NODE_X;
1527#endif
1528
1529        return num;
1530    }
1531
1532    /*
1533    ** Not already registered.  Bump the high count and use it.
1534    */
1535    num = ++(str->high_num);
1536
1537    rpc_ss_register_node_by_num (tab, num, ptr);
1538    if (marshalling)
1539    {
1540        hash_entry = rpc_ss_find_hash_entry (str, ptr);
1541        hash_entry->marshalled = idl_true;
1542        *has_been_marshalled = idl_false;
1543    }
1544
1545    DTRACE((
1546        trace_fid, "Register(%p): num=%d, ptr=%p\n", str, num, ptr
1547    ));
1548
1549#ifdef PERFMON
1550    RPC_SS_REGISTER_NODE_X;
1551#endif
1552
1553    return num;
1554}
1555
1556/*
1557**++
1558**  FUNCTIONAL DESCRIPTION:
1559**
1560**      rpc_ss_unregister_node
1561**
1562**          This routine removes a node address to node number mapping from the
1563**          node table.  It is intended to be called when an address is freed
1564**          from a memory allocator.  If the addr is later returned in a future
1565**          call to the allocator, it will not be matched as an existing node
1566**          and thus will be mapped to a new node on the client.
1567**
1568**  FORMAL PARAMETERS:
1569**
1570**      tab -- Node table
1571**      ptr -- Node address
1572**
1573**  RETURN VALUE:
1574**
1575**      None
1576**
1577**--
1578*/
1579void rpc_ss_unregister_node
1580(
1581    rpc_ss_node_table_t tab,
1582    byte_p_t ptr
1583)
1584{
1585    rpc_ss_pvt_node_table_t * str;
1586    rpc_ss_hash_entry_t *hash_entry;
1587
1588#ifdef PERFMON
1589    RPC_SS_UNREGISTER_NODE_N;
1590#endif
1591
1592    /*
1593    ** Return if NULL
1594    */
1595    if (ptr == NULL)
1596    {
1597
1598#ifdef PERFMON
1599        RPC_SS_UNREGISTER_NODE_X;
1600#endif
1601
1602        return;
1603    }
1604
1605    str = (rpc_ss_pvt_node_table_t *) tab;
1606
1607    /*
1608    ** Find the hash entry for the pointer, and delete it.
1609    */
1610    hash_entry = rpc_ss_find_hash_entry (str, ptr);
1611    if (hash_entry->ptr == ptr)
1612    {
1613        if (str->deletes_reflected)
1614        {
1615            /* Add the node to the list of deleted nodes */
1616            rpc_ss_add_delete_to_list(hash_entry->node, str);
1617        }
1618        /* Remove its entry from the hash table */
1619        hash_entry->ptr = NULL;
1620    }
1621
1622    /*
1623     * No else clause; if they were not equal, it means that the pointer
1624     * was not registered.  No action.
1625     */
1626
1627#ifdef PERFMON
1628    RPC_SS_UNREGISTER_NODE_X;
1629#endif
1630
1631    return;
1632}
1633
1634#if 0
1635This code is not currently needed, but is preserved in case we need
1636it in the future.
1637/*
1638**++
1639**  FUNCTIONAL DESCRIPTION:
1640**
1641**      rpc_ss_replace_address
1642**
1643**          This routine replaces the address associated with a node number.
1644**          It is used to propagate the node number across the multiple
1645**          instances of memory associated with transmit_as and represent_as.
1646**
1647**  FORMAL PARAMETERS:
1648**
1649**      tab -- Node table
1650**      oldptr -- Old Node address
1651**      newptr -- New Node address
1652**
1653**  RETURN VALUE:
1654**
1655**      None
1656**
1657**--
1658*/
1659void rpc_ss_replace_address
1660(
1661    rpc_ss_node_table_t tab,
1662    byte_p_t oldptr,
1663    byte_p_t newptr
1664)
1665{
1666    rpc_ss_pvt_node_table_t * str;
1667    rpc_ss_hash_entry_t *hash_entry;
1668    rpc_ss_hash_entry_t *new_hash_entry;
1669
1670#ifdef PERFMON
1671    RPC_SS_REPLACE_ADDRESS_N;
1672#endif
1673
1674    str = (rpc_ss_pvt_node_table_t *) tab;
1675
1676    /*
1677    ** Return if the old address was NULL, a cheap test against a coding error.
1678    */
1679    if (oldptr == NULL)
1680    {
1681
1682#ifdef PERFMON
1683        RPC_SS_REPLACE_ADDRESS_X;
1684#endif
1685
1686        return;
1687    }
1688
1689    /*
1690    ** Find the hash entry for the pointer, and delete it.
1691    */
1692    hash_entry = rpc_ss_find_hash_entry (str, oldptr);
1693    if (hash_entry->ptr == oldptr)
1694        hash_entry->ptr = NULL;
1695    else
1696    /*
1697    ** If the node lookup failed, this node was not registered.
1698    ** Don't do anything.
1699    */
1700    {
1701#ifdef PERFMON
1702    RPC_SS_REPLACE_ADDRESS_X;
1703#endif
1704        return;
1705    }
1706
1707    /*
1708    ** Register the new mapping.
1709    */
1710    rpc_ss_register_node_by_num ( tab, hash_entry->node, newptr );
1711
1712    /*
1713    ** Propagate the marshalled/unmarshalled flags.
1714    */
1715    new_hash_entry = rpc_ss_find_hash_entry (str, newptr);
1716    new_hash_entry->marshalled = hash_entry->marshalled;
1717    new_hash_entry->unmarshalled = hash_entry->unmarshalled;
1718
1719#ifdef PERFMON
1720    RPC_SS_REPLACE_ADDRESS_X;
1721#endif
1722
1723    return;
1724}
1725/*End of disabled code.*/
1726#endif
1727
1728/*
1729**++
1730**  FUNCTIONAL DESCRIPTION:
1731**
1732**      rpc_ss_return_pointer_to_node
1733**
1734**          This routine is intended for use during unmarshalling.  It accepts
1735**          a node number and information regarding the size and required
1736**          allocator for the node.  If the node address associated with this
1737**          node number is already register, the node address is returned.  If
1738**          there is not yet an association with this node number, one is
1739**          created and the newly allocated node is returned.
1740**
1741**          Also returned is a flag indicating if the node has been
1742**          unmarshalled.  If the node has previously been unmarshalled, then
1743**          this indicates that the stub that it need not do it again.  Upon the
1744**          next call to this routine for this node number, the unmarshalled
1745**          flag will be set to
1746**          true, thus after the first call to rpc_ss_return_pointer_to_node for
1747**          a specific node, the stub must unmarshall its value.
1748**
1749**  FORMAL PARAMETERS:
1750**
1751**      tab -- Node table
1752**      num -- Node number to lookup
1753**      size -- Size of the node
1754**      p_allocate -- Address of allocation routine with the signiture of malloc
1755**      has_been_unmarshalled -- [out] indicates if unmarshalled yet
1756**      new_node -- Optional [out] parameter. When used indicates whether memory
1757**                  was allocated
1758**
1759**  RETURN VALUE:
1760**
1761**      Node address associated the specified node number
1762**
1763**--
1764*/
1765byte_p_t rpc_ss_return_pointer_to_node
1766(
1767    rpc_ss_node_table_t tab,
1768    idl_ulong_int       num,
1769    long                size,
1770    rpc_ss_allocator_t *p_allocator,
1771    long                *has_been_unmarshalled,
1772    long                *new_node               /* NULL or addr of return flag */
1773)
1774{
1775    byte_p_t p;
1776    rpc_ss_pvt_node_table_t * str;
1777    rpc_ss_hash_entry_t * hash_entry;
1778
1779#ifdef PERFMON
1780    RPC_SS_RETURN_POINTER_TO_NODE_N;
1781#endif
1782
1783    str = (rpc_ss_pvt_node_table_t *)tab;
1784    p = rpc_ss_lookup_node_by_num (tab, num);
1785
1786    if (p == NULL)
1787    {
1788        if (new_node != NULL) *new_node = (long)idl_true;
1789        if (!p_allocator)
1790        {
1791            p = rpc_ss_mem_alloc (str->mem_h, size);
1792        }
1793        else
1794        {
1795            if (size == 0) size = 1;
1796            p = (byte_p_t)(*p_allocator->p_allocate)(p_allocator->p_context,
1797						    size);
1798        }
1799        if (p ==NULL)
1800            DCETHREAD_RAISE(rpc_x_no_memory);
1801        rpc_ss_register_node_by_num (tab, num, p);
1802    }
1803    else
1804        if (new_node != NULL) *new_node = (long)idl_false;
1805
1806    /* Return unmarshalled flag */
1807    hash_entry = rpc_ss_find_hash_entry (str, p);
1808    *has_been_unmarshalled = hash_entry->unmarshalled;
1809
1810    /* Mark as already unmarshalled to be returned in next call */
1811    hash_entry->unmarshalled = idl_true;
1812
1813#ifdef PERFMON
1814    RPC_SS_RETURN_POINTER_TO_NODE_X;
1815#endif
1816
1817    return p;
1818}
1819
1820/*
1821**++
1822**  WARNING:
1823**      This routine is used only by stubs generated by the old (OSF) compiler.
1824**      Do not modify this routine unless you understand how old-style stubs
1825**      work
1826**
1827**  FUNCTIONAL DESCRIPTION:
1828**
1829**      rpc_ss_lookup_pointer_to_node
1830**
1831**          This routine is intended for use during unmarshalling.  If the node
1832**          address associated with specified node number is already
1833**          registered, the node address is returned.  This routine is required
1834**          for conformant objects because the size information can be
1835**          unmarshalled only the first time the object is encountered.  Thus
1836**          this routine is called before rpc_ss_return_pointer_to_node to
1837**          determine if a conformant type has been already unmarshalled.  If it
1838**          hasn't then the stub can unmarshall the size information and then
1839**          call rpc_ss_return_pointer_to_node to actually allocate and register
1840**          the node.
1841**
1842**          Also returned is a flag indicating if the node has been
1843**          unmarshalled.  If the node has previously been unmarshalled, then
1844**          this indicates that the stub that it need not do it again.  Upon
1845**          the next call to this routine, the unmarshalled flag will be set to
1846**          true, thus after the first call to rpc_ss_return_pointer_to_node
1847**          for a specific node, the stub must unmarshall its value.
1848**
1849**  FORMAL PARAMETERS:
1850**
1851**      tab -- Node table
1852**      num -- Node number to lookup
1853**      has_been_unmarshalled -- [out] indicates if unmarshalled yet
1854**
1855**  RETURN VALUE:
1856**
1857**      Node address associated the specified node number
1858**
1859**--
1860*/
1861byte_p_t rpc_ss_lookup_pointer_to_node
1862(
1863    rpc_ss_node_table_t tab,
1864    unsigned long num,
1865    long *has_been_unmarshalled
1866)
1867{
1868    byte_p_t p;
1869    rpc_ss_pvt_node_table_t * str;
1870    rpc_ss_hash_entry_t * hash_entry;
1871
1872#ifdef PERFMON
1873    RPC_SS_LOOKUP_POINTER_TO_NODE_N;
1874#endif
1875
1876    p = rpc_ss_lookup_node_by_num (tab, num);
1877
1878    if (p == NULL)
1879    {
1880
1881#ifdef PERFMON
1882    RPC_SS_LOOKUP_POINTER_TO_NODE_X;
1883#endif
1884
1885        *has_been_unmarshalled = false;
1886        return p;
1887    }
1888
1889    /* Return unmarshalled flag */
1890    str = (rpc_ss_pvt_node_table_t *)tab;
1891    hash_entry = rpc_ss_find_hash_entry (str, p);
1892    *has_been_unmarshalled = hash_entry->unmarshalled;
1893
1894    /* Mark as already unmarshalled to be returned in next call */
1895    hash_entry->unmarshalled = idl_true;
1896
1897#ifdef PERFMON
1898    RPC_SS_LOOKUP_POINTER_TO_NODE_X;
1899#endif
1900
1901    return p;
1902}
1903
1904/*
1905**++
1906**  FUNCTIONAL DESCRIPTION:
1907**
1908**      rpc_ss_inquire_pointer_to_node
1909**
1910**          Same functionality as rpc_ss_lookup_pointer_to_node except that
1911**          the "unmarshalled" flag is not set on the table entry for the
1912**          node
1913**
1914**  FORMAL PARAMETERS:
1915**
1916**      tab -- Node table
1917**      num -- Node number to lookup
1918**      has_been_unmarshalled -- [out] indicates if unmarshalled yet
1919**
1920**  RETURN VALUE:
1921**
1922**      Node address associated the specified node number
1923**
1924**--
1925*/
1926byte_p_t rpc_ss_inquire_pointer_to_node
1927(
1928    rpc_ss_node_table_t tab,
1929    unsigned long num,
1930    long *has_been_unmarshalled
1931)
1932{
1933    byte_p_t p;
1934    rpc_ss_pvt_node_table_t * str;
1935    rpc_ss_hash_entry_t * hash_entry;
1936
1937#ifdef PERFMON
1938    RPC_SS_INQUIRE_POINTER_TO_NODE_N;
1939#endif
1940
1941    p = rpc_ss_lookup_node_by_num (tab, num);
1942
1943    if (p == NULL)
1944    {
1945
1946#ifdef PERFMON
1947    RPC_SS_INQUIRE_POINTER_TO_NODE_X;
1948#endif
1949
1950        *has_been_unmarshalled = false;
1951        return p;
1952    }
1953
1954    /* Return unmarshalled flag */
1955    str = (rpc_ss_pvt_node_table_t *)tab;
1956    hash_entry = rpc_ss_find_hash_entry (str, p);
1957    *has_been_unmarshalled = hash_entry->unmarshalled;
1958
1959#ifdef PERFMON
1960    RPC_SS_INQUIRE_POINTER_TO_NODE_X;
1961#endif
1962
1963    return p;
1964}
1965
1966/*
1967**++
1968**  FUNCTIONAL DESCRIPTION:
1969**
1970**      rpc_ss_init_node_table
1971**
1972**          This routine initializes the specified node table to be empty.
1973**
1974**  FORMAL PARAMETERS:
1975**
1976**      tab -- [out] Node table
1977**      p_mem_h -- Address of the mem handle for local memory management
1978**
1979**--
1980*/
1981void rpc_ss_init_node_table
1982(
1983    volatile rpc_ss_node_table_t *p_node_str,
1984    rpc_ss_mem_handle *p_mem_h
1985)
1986{
1987/*
1988** Allocate a node table and initialize it.
1989*/
1990    rpc_ss_pvt_node_table_t *str;
1991
1992#ifdef PERFMON
1993    RPC_SS_INIT_NODE_TABLE_N;
1994#endif
1995
1996#ifdef VERBOSE
1997    printf ("Init'ing new node table.\n");
1998#endif
1999    str = (rpc_ss_pvt_node_table_t*) rpc_ss_mem_alloc (
2000        p_mem_h, sizeof (rpc_ss_pvt_node_table_t));
2001
2002    memset (str, 0, sizeof (rpc_ss_pvt_node_table_t));
2003
2004    str->mem_h = p_mem_h;
2005    str->currently_mapped = 1;
2006    rpc_ss_expand_array (&(str->array), &(str->currently_mapped),
2007        &(str->depth), 1, p_mem_h);
2008    /* The preceding memset includes the effect of
2009     *  str->deletes_list = NULL;
2010     *  str->deletes_reflected = idl_false;
2011     */
2012    p_mem_h->node_table = (rpc_ss_node_table_t) str;
2013    *p_node_str = (rpc_ss_node_table_t) str;
2014
2015    DTOPEN;
2016    DTRACE((trace_fid,"\n\nTable (%p) Initialized\n",str));
2017
2018#ifdef PERFMON
2019    RPC_SS_INIT_NODE_TABLE_X;
2020#endif
2021
2022}
2023
2024/****************************************************************************/
2025/*                                                                          */
2026/*                        Unit Testing support                              */
2027/*                                                                          */
2028/*     This section of code provides a main routine that can be used to     */
2029/*     test the major functions of this module independant of the rest IDL  */
2030/*     support code.                                                        */
2031/****************************************************************************/
2032#ifdef UNIT_TEST_ERNODTBL
2033rpc_ss_node_table_t  str;
2034
2035main ()
2036{
2037idl_ulong_int node;
2038byte_p_t ptr;
2039long high_node;
2040long has_been_marshalled;
2041
2042rpc_ss_init_node_table( &str, 0);
2043
2044printf ("Correct results are in parentheses.\n\n");
2045
2046has_been_marshalled = 0;
2047node=rpc_ss_register_node (str, (char*)10, 1, &has_been_marshalled);
2048printf ("%d (1)", node);
2049if (has_been_marshalled) printf ("  Erroneously flagged as marshalled!");
2050printf ("\n");
2051
2052has_been_marshalled = 0;
2053node=rpc_ss_register_node (str, (char*)40, 1, &has_been_marshalled);
2054printf ("%d (2)", node);
2055if (has_been_marshalled) printf ("  Erroneously flagged as marshalled!");
2056printf ("\n");
2057
2058has_been_marshalled = 0;
2059node=rpc_ss_register_node (str, (char*)10, 1, &has_been_marshalled);
2060printf ("%d (1)", node);
2061if (!has_been_marshalled) printf ("  Erroneously flagged as not marshalled!");
2062printf ("\n");
2063
2064has_been_marshalled = 0;
2065node=rpc_ss_register_node (str, (char*)20, 0, &has_been_marshalled);
2066printf ("%d (3)", node);
2067if (has_been_marshalled) printf ("  Erroneously flagged as marshalled!");
2068printf ("\n");
2069
2070node=rpc_ss_register_node (str, (char*)30, 0, 0);
2071printf ("%d (4)\n", node);
2072
2073rpc_ss_register_node_by_num (str, 17, 50);
2074
2075node=rpc_ss_register_node (str, (char*)1064, 0, 0);
2076printf ("%d (18)\n", node);
2077
2078node=rpc_ss_lookup_node_by_ptr (str, (char*)50);
2079printf ("%d (17)\n", node);
2080node=rpc_ss_lookup_node_by_ptr (str, (char*)10);
2081printf ("%d (1)\n", node);
2082node=rpc_ss_lookup_node_by_ptr (str, (char*)30);
2083printf ("%d (4)\n", node);
2084node=rpc_ss_lookup_node_by_ptr (str, (char*)20);
2085printf ("%d (3)\n", node);
2086node=rpc_ss_lookup_node_by_ptr (str, (char*)1064);
2087printf ("%d (18)\n", node);
2088node=rpc_ss_lookup_node_by_ptr (str, (char*)40);
2089printf ("%d (2)\n", node);
2090
2091ptr=rpc_ss_lookup_node_by_num(str, 1);
2092printf ("%d (10)\n", ptr);
2093ptr=rpc_ss_lookup_node_by_num(str, 2);
2094printf ("%d (40)\n", ptr);
2095ptr=rpc_ss_lookup_node_by_num(str, 3);
2096printf ("%d (20)\n", ptr);
2097ptr=rpc_ss_lookup_node_by_num(str, 4);
2098printf ("%d (30)\n", ptr);
2099ptr=rpc_ss_lookup_node_by_num(str, 17);
2100printf ("%d (50)\n", ptr);
2101ptr=rpc_ss_lookup_node_by_num(str, 18);
2102printf ("%d (1064)\n", ptr);
2103ptr=rpc_ss_lookup_node_by_num(str, 99);
2104printf ("%d (0)\n", ptr);
2105ptr=rpc_ss_lookup_node_by_num(str, 0);
2106printf ("%d (0)\n", ptr);
2107
2108ptr = rpc_ss_return_pointer_to_node (str, 2, 3, 0, &has_been_marshalled,
2109                                     (long *)NULL);
2110printf ("%d (40)", ptr);
2111if (has_been_marshalled) printf ("  Erroneously flagged as unmarshalled!");
2112printf ("\n");
2113ptr = rpc_ss_return_pointer_to_node (str, 2, 3, 0, &has_been_marshalled,
2114                                     (long *)NULL);
2115printf ("%d (40)", ptr);
2116if (!has_been_marshalled) printf ("  Erroneously flagged as not unmarshalled!");
2117printf ("\n");
2118ptr = rpc_ss_return_pointer_to_node (str, 12, 3, 0, &has_been_marshalled,
2119                                     (long *)NULL);
2120printf ("%08X (some reasonable heap pointer value)", ptr);
2121if (has_been_marshalled) printf ("  Erroneously flagged as marshalled!");
2122printf ("\n");
2123
2124}
2125#endif
2126