1/* 2 * Copyright (c) 2010 Apple Inc. All rights reserved. 3 * 4 * @APPLE_LICENSE_HEADER_START@ 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of Apple Inc. ("Apple") nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY 20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY 23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 * 30 * Portions of this software have been released under the following terms: 31 * 32 * (c) Copyright 1989-1993 OPEN SOFTWARE FOUNDATION, INC. 33 * (c) Copyright 1989-1993 HEWLETT-PACKARD COMPANY 34 * (c) Copyright 1989-1993 DIGITAL EQUIPMENT CORPORATION 35 * 36 * To anyone who acknowledges that this file is provided "AS IS" 37 * without any express or implied warranty: 38 * permission to use, copy, modify, and distribute this file for any 39 * purpose is hereby granted without fee, provided that the above 40 * copyright notices and this notice appears in all source code copies, 41 * and that none of the names of Open Software Foundation, Inc., Hewlett- 42 * Packard Company or Digital Equipment Corporation be used 43 * in advertising or publicity pertaining to distribution of the software 44 * without specific, written prior permission. Neither Open Software 45 * Foundation, Inc., Hewlett-Packard Company nor Digital 46 * Equipment Corporation makes any representations about the suitability 47 * of this software for any purpose. 48 * 49 * Copyright (c) 2007, Novell, Inc. All rights reserved. 50 * Redistribution and use in source and binary forms, with or without 51 * modification, are permitted provided that the following conditions 52 * are met: 53 * 54 * 1. Redistributions of source code must retain the above copyright 55 * notice, this list of conditions and the following disclaimer. 56 * 2. Redistributions in binary form must reproduce the above copyright 57 * notice, this list of conditions and the following disclaimer in the 58 * documentation and/or other materials provided with the distribution. 59 * 3. Neither the name of Novell Inc. nor the names of its contributors 60 * may be used to endorse or promote products derived from this 61 * this software without specific prior written permission. 62 * 63 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY 64 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 65 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 66 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY 67 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 68 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 69 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 70 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 71 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 72 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 73 * 74 * @APPLE_LICENSE_HEADER_END@ 75 */ 76 77/* 78** 79** 80** NAME: 81** 82** alfrsupp.c 83** 84** FACILITY: 85** 86** IDL Stub Runtime Support 87** 88** ABSTRACT: 89** 90** Support routines for rpc_ss_allocate, rpc_ss_free 91** 92** VERSION: DCE 1.0 93*/ 94 95#if HAVE_CONFIG_H 96#include <config.h> 97#endif 98 99#include <dce/rpc.h> 100#include <dce/stubbase.h> 101#ifdef MIA 102#include <dce/idlddefs.h> 103#endif 104#include <lsysdep.h> 105#include <assert.h> 106 107/* 1081. Explanation of the per-thread context 109 ------------------------------------- 110 111 The per-thread context enables a group of threads to share the same stub 112memory management mechanism. 113 114 Per-thread context is built from an rpc_ss_thread_indirection_t 115 structure, which consists of 116 indirection (pointer to a rpc_ss_thread_support_ptrs_t) 117 free_referents (indicates whether the referents of indirection must 118 be freed when the thread terminates) 119 and a rpc_ss_thread_support_ptrs_t structure consisting of 120 mutex (to prevent threads in the group accessing the mechanism 121 simultaneously) 122 p_mem_h (pointer to the memory handle used if the rpc_ss_allocate/ 123 rpc_ss_free machinery is in use) 124 p_allocate (pointer to the memory allocator the group is using) 125 p_free (pointer to memory release function compatible with 126 p_allocate) 127 The context information registered with Pthreads is the address of a 128rpc_ss_thread_indirection_t structure and is denoted by 129helper_thread_indirection_pointer. 130 1311.1 Use of the context in a server thread 132 ------------------------------------- 133 134 The memory management mechanism is set up by a server stub if the 135operation includes pointers in its parameters, or the ACF [enable_allocate] 136attribute has been used. It is set up by the routine rpc_ss_create_support_ptrs 137(eenodtbl.c). p_mem_h points into the server stub stack. Memory attached to 138this handle is released before the server stub returns control to the RPC 139runtime. After this memory has been released, rpc_ss_destroy_support_pointers 140(eenodtbl.c) destroys the mutex in the rpc_ss_thread_support_ptrs_t structure, 141releases the rpc_ss_thread_indirection_t structure and explicitly unregisters 142the context with Pthreads. The rpc_ss_thread_support_ptrs_t structure the 143indirection pointer was pointing at is on the server stub's stack, and so will 144be released automatically when control returns to the RPC runtime. 145 The routines rpc_ss_allocate, rpc_ss_free, which are the normal machinery 146used by manager application code, for allocating and releasing memory, need to 147access the memory handle set up in the server stub. They do this by calling 148rpc_ss_get_support_ptrs (eenodtbl.c) to obtain the value of the indirection 149pointer. 150 1511.2 Use of the context in manager helper threads 152 -------------------------------------------- 153 154 A manager helper thread is spawned by manager application code, and so 155belongs to the same group as the server thread from which the manager 156application code was entered. All the manager helper threads in a group need 157to access the same mutex and memory handle. The application achieves this by 158calling rpc_ss_get_thread_handle (alfrsupp.c) to get the value of the 159indirection pointer for the group from an existing thread. 160 A new thread uses rpc_ss_set_thread_handle (alfrsupp.c) to create a new cell 161containing this value and register the address of this new cell as the context 162for this thread. 163 When a manager helper thread teminates, Pthreads invokes the destructor, 164rpc_ss_destroy_thread_ctx (alfrsupp.c) for the context. This frees the 165rpc_ss_thread_indirection_t structure, but takes no other action, because the 166data structure pointed at by the indirection pointer is pointed at by at least 167one other cell, the context for the server stub's thread. 168 1691.3 Use of the context with rpc_ss_enable_allocate, rpc_ss_disable_allocate 170 ----------------------------------------------------------------------- 171 172 This is usage on the client side, i.e. when client code that is not part 173of a manager is being executed. rpc_ss_enable_allocate (alfrsupp.c) is called 174by the client to establish an environment in which rpc_ss_allocate, rpc_ss_free 175can be used. 176 rpc_ss_enable_allocate creates a memory handle on the heap. It then creates 177a rpc_ss_thread_support_ptrs_t structure on the heap which includes a pointer 178to the memory handle. Then it calls rpc_ss_create_support_ptrs to complete the 179required data structures. 180 When the client no longer needs this machinery it calls 181rpc_ss_disable_allocate (alfrsupp.c). This releases any memory owned by the 182memory handle, releases the rpc_ss_thread_support_ptrs_t structure and the 183rpc_ss_thread_indirection_t structure and unregisters the context with Pthreads. 184 1851.4 Other client side uses of the context 186 ------------------------------------- 187 188 These arise when rpc_ss_enable_allocate has not been called and one of the 189following events occurs: 190i) A client stub for an operation whose [out] parameters include a pointer 191 is executed. In the OSF compiler this results in a call to the routine 192 rpc_ss_client_establish_alloc (alfrsupp.c). 193ii) Application code calls rpc_ss_set_client_alloc_free (alfrsupp.c). 194iii) Application code calls rpc_ss_swap_client_alloc_free (alfrsupp.c). 195 In any of these cases, all the data structures established by 196rpc_ss_enable_allocate have been created, although their fields may have 197different values. If a thread in which one of these events has occurred 198terminates, the same set of memory releases that would be performed by 199rpc_ss_disable_allocate occurs. 200 201*/ 202 203#ifdef STUBS_USE_PTHREADS 204typedef void (*destructor_t) 205( 206 rpc_ss_threads_dest_arg_t arg 207); 208#else 209 typedef cma_t_destructor destructor_t; 210#endif 211 212/******************************************************************************/ 213/* */ 214/* Set up CMA machinery required by rpc_ss_allocate, rpc_ss_free */ 215/* */ 216/******************************************************************************/ 217ndr_boolean rpc_ss_allocate_is_set_up = ndr_false; 218 219static RPC_SS_THREADS_ONCE_T allocate_once = RPC_SS_THREADS_ONCE_INIT; 220 221RPC_SS_THREADS_KEY_T rpc_ss_thread_supp_key; 222 223/******************************************************************************/ 224/* */ 225/* rpc_ss_destroy_thread_ctx */ 226/* Destroy client per thread context. */ 227/* */ 228/******************************************************************************/ 229static void rpc_ss_destroy_thread_ctx 230( 231 rpc_ss_thread_indirection_t *thread_indirection_ptr 232) 233{ 234 rpc_ss_thread_support_ptrs_t *p_thread_support_ptrs; 235 236 if (thread_indirection_ptr != NULL) 237 { 238 if (thread_indirection_ptr->free_referents) 239 { 240 p_thread_support_ptrs = thread_indirection_ptr->indirection; 241 242 /* Release any memory owned by the memory handle */ 243 rpc_ss_mem_free( p_thread_support_ptrs->p_mem_h ); 244 245 /* 246 * Free the objects it points at. 247 * Must cast because instance_of 248 * (rpc_ss_thread_support_ptrs_t).p_mem_h 249 * is of type rpc_mem_handle, which is a pointer to volatile, 250 * and free() doesn't take a pointer to volatile. 251 */ 252 free( (idl_void_p_t)p_thread_support_ptrs->p_mem_h ); 253 RPC_SS_THREADS_MUTEX_DELETE( &(p_thread_support_ptrs->mutex) ); 254 255 /* Free the structure */ 256 free( p_thread_support_ptrs ); 257 } 258 259 /* Free the indirection storage */ 260 free( thread_indirection_ptr ); 261 262 /* And destroy the context - this is required for Kernel RPC */ 263 RPC_SS_THREADS_KEY_SET_CONTEXT( rpc_ss_thread_supp_key, NULL ); 264 } 265} 266 267/******************************************************************************/ 268/* */ 269/* rpc_ss_thread_ctx_destructor */ 270/* Destroy per thread context at thread termination */ 271/* */ 272/******************************************************************************/ 273static void rpc_ss_thread_ctx_destructor 274( 275 rpc_ss_threads_dest_arg_t arg 276) 277{ 278 rpc_ss_thread_indirection_t *thread_indirection_ptr = 279 (rpc_ss_thread_indirection_t *) arg; 280 281 rpc_ss_destroy_thread_ctx( thread_indirection_ptr ); 282} 283 284static void rpc_ss_init_allocate( 285 void 286) 287{ 288 /* Key for thread local storage for tree management */ 289 RPC_SS_THREADS_KEY_CREATE( &rpc_ss_thread_supp_key, 290 (destructor_t)rpc_ss_thread_ctx_destructor ); 291} 292 293void rpc_ss_init_allocate_once( 294 void 295) 296{ 297 RPC_SS_THREADS_INIT; 298 RPC_SS_THREADS_ONCE( &allocate_once, rpc_ss_init_allocate ); 299 rpc_ss_allocate_is_set_up = ndr_true; 300} 301 302/******************************************************************************/ 303/* */ 304/* Replacement for malloc guaranteed to allocate something like some */ 305/* versions of malloc do. */ 306/* */ 307/******************************************************************************/ 308static void *rpc_ss_client_default_malloc 309( 310 idl_void_p_t context ATTRIBUTE_UNUSED, 311 idl_size_t size 312) 313{ 314 void *result = NULL; 315 316 if ( size ) 317 { 318 result = malloc(size); 319 } 320 else 321 { 322 result = malloc(1); 323 } 324 325 return result; 326} 327 328static void rpc_ss_client_default_free 329( 330 idl_void_p_t context ATTRIBUTE_UNUSED, 331 idl_void_p_t ptr 332) 333{ 334 free(ptr); 335} 336 337/******************************************************************************/ 338/* */ 339/* Do we currently have thread context data? */ 340/* If not, create local storage with malloc() and free() as the */ 341/* allocate and free routines */ 342/* */ 343/******************************************************************************/ 344static void rpc_ss_client_get_thread_ctx 345( 346 rpc_ss_thread_support_ptrs_t **p_p_support_ptrs 347) 348{ 349 rpc_ss_thread_support_ptrs_t *p_support_ptrs; 350 rpc_ss_thread_indirection_t *thread_indirection_ptr; 351 352#ifndef MEMORY_NOT_WRITTEN_SERIALLY 353 if ( ! rpc_ss_allocate_is_set_up ) 354#endif 355 rpc_ss_init_allocate_once(); 356 357 p_support_ptrs = (rpc_ss_thread_support_ptrs_t *)rpc_ss_get_thread_handle(); 358 if (p_support_ptrs == NULL) 359 { 360 /* We have no context. Make one with the fields we need */ 361 p_support_ptrs = (rpc_ss_thread_support_ptrs_t *) 362 malloc(sizeof(rpc_ss_thread_support_ptrs_t)); 363 if (p_support_ptrs == NULL) 364 { 365 DCETHREAD_RAISE( rpc_x_no_memory ); 366 } 367 368 p_support_ptrs->p_mem_h = (rpc_ss_mem_handle *) 369 calloc(1, sizeof(rpc_ss_mem_handle)); 370 if (p_support_ptrs->p_mem_h == NULL) 371 { 372 DCETHREAD_RAISE( rpc_x_no_memory ); 373 } 374 p_support_ptrs->p_mem_h->memory = NULL; 375 p_support_ptrs->p_mem_h->node_table = NULL; 376 377 RPC_SS_THREADS_MUTEX_CREATE (&(p_support_ptrs->mutex)); 378 379 p_support_ptrs->allocator.p_allocate = rpc_ss_client_default_malloc; 380 p_support_ptrs->allocator.p_free = rpc_ss_client_default_free; 381 p_support_ptrs->allocator.p_context = NULL; 382 383 thread_indirection_ptr = (rpc_ss_thread_indirection_t *) 384 malloc(sizeof(rpc_ss_thread_indirection_t)); 385 if (thread_indirection_ptr == NULL) 386 { 387 DCETHREAD_RAISE( rpc_x_no_memory ); 388 } 389 thread_indirection_ptr->indirection = p_support_ptrs; 390 thread_indirection_ptr->free_referents = idl_true; 391 RPC_SS_THREADS_KEY_SET_CONTEXT( rpc_ss_thread_supp_key, 392 thread_indirection_ptr ); 393 } 394 *p_p_support_ptrs = p_support_ptrs; 395} 396 397/******************************************************************************/ 398/* */ 399/* Do we currently have thread context data? */ 400/* If not, create local storage with malloc() and free() as the */ 401/* allocate and free routines */ 402/* Copy pointers to allocate and free routines to local storage */ 403/* */ 404/******************************************************************************/ 405void rpc_ss_client_establish_alloc 406( 407 rpc_ss_marsh_state_t *p_unmar_params 408) 409{ 410 rpc_ss_thread_support_ptrs_t *p_support_ptrs; 411 412 rpc_ss_client_get_thread_ctx( &p_support_ptrs ); 413 p_unmar_params->allocator = p_support_ptrs->allocator; 414} 415 416/******************************************************************************/ 417/* */ 418/* rpc_ss_get_thread_handle */ 419/* */ 420/******************************************************************************/ 421rpc_ss_thread_handle_t rpc_ss_get_thread_handle 422( void ) 423{ 424 rpc_ss_thread_indirection_t *thread_indirection_ptr; 425 426 RPC_SS_THREADS_KEY_GET_CONTEXT( rpc_ss_thread_supp_key, 427 &thread_indirection_ptr ); 428 if (thread_indirection_ptr == NULL) return(NULL); 429 else return (rpc_ss_thread_handle_t)(thread_indirection_ptr->indirection); 430} 431 432/******************************************************************************/ 433/* */ 434/* rpc_ss_set_thread_handle */ 435/* */ 436/******************************************************************************/ 437void rpc_ss_set_thread_handle 438( 439 rpc_ss_thread_handle_t thread_handle 440) 441{ 442 rpc_ss_thread_indirection_t *helper_thread_indirection_ptr; 443 444 /* If a context exists, destroy it */ 445 RPC_SS_THREADS_KEY_GET_CONTEXT( rpc_ss_thread_supp_key, 446 &helper_thread_indirection_ptr ); 447 if ( helper_thread_indirection_ptr != NULL ) 448 { 449 free( helper_thread_indirection_ptr ); 450 } 451 452 /* Now create the new context */ 453 helper_thread_indirection_ptr = (rpc_ss_thread_indirection_t *) 454 malloc(sizeof(rpc_ss_thread_indirection_t)); 455 if (helper_thread_indirection_ptr == NULL) 456 { 457 DCETHREAD_RAISE( rpc_x_no_memory ); 458 } 459 helper_thread_indirection_ptr->indirection = 460 (rpc_ss_thread_support_ptrs_t *)thread_handle; 461 helper_thread_indirection_ptr->free_referents = idl_false; 462 RPC_SS_THREADS_KEY_SET_CONTEXT( rpc_ss_thread_supp_key, 463 helper_thread_indirection_ptr ); 464} 465 466/******************************************************************************/ 467/* */ 468/* Create thread context with references to named alloc and free rtns */ 469/* */ 470/******************************************************************************/ 471void rpc_ss_set_client_alloc_free 472( 473 rpc_ss_p_alloc_t p_allocate, 474 rpc_ss_p_free_t p_free 475) 476{ 477 rpc_ss_allocator_t allocator; 478 479 allocator.p_allocate = p_allocate; 480 allocator.p_free = p_free; 481 allocator.p_context = NULL; 482 483 rpc_ss_set_client_alloc_free_ex(&allocator); 484} 485 486void rpc_ss_set_client_alloc_free_ex 487( 488 rpc_ss_allocator_t * p_allocator 489) 490{ 491 rpc_ss_thread_support_ptrs_t *p_support_ptrs; 492 493 rpc_ss_client_get_thread_ctx( &p_support_ptrs ); 494 495 /* Make sure we don't lose a context object. */ 496 assert(p_support_ptrs->allocator.p_context == NULL); 497 498 p_support_ptrs->allocator = *p_allocator; 499} 500 501/******************************************************************************/ 502/* */ 503/* Get the existing allocate, free routines and replace them with new ones */ 504/* */ 505/******************************************************************************/ 506void rpc_ss_swap_client_alloc_free 507( 508 rpc_ss_p_alloc_t p_allocate, 509 rpc_ss_p_free_t p_free, 510 rpc_ss_p_alloc_t *p_p_old_allocate, 511 rpc_ss_p_free_t * p_p_old_free 512) 513{ 514 rpc_ss_allocator_t allocator; 515 516 allocator.p_allocate = p_allocate; 517 allocator.p_free = p_free; 518 allocator.p_context = NULL; 519 520 rpc_ss_swap_client_alloc_free_ex(&allocator, &allocator); 521 522 /* Make sure we didn't lose a context object. */ 523 assert(allocator.p_context == NULL); 524 525 *p_p_old_allocate = allocator.p_allocate; 526 *p_p_old_free = allocator.p_free; 527} 528 529void rpc_ss_swap_client_alloc_free_ex 530( 531 rpc_ss_allocator_t * p_allocator, 532 rpc_ss_allocator_t * p_old_allocator 533) 534{ 535 rpc_ss_allocator_t allocator; 536 rpc_ss_thread_support_ptrs_t *p_support_ptrs; 537 538 rpc_ss_client_get_thread_ctx( &p_support_ptrs ); 539 540 /* Make sure it is safe for the old and new pointers to point 541 * to the same allocator. 542 */ 543 allocator = *p_allocator; 544 *p_old_allocator = p_support_ptrs->allocator; 545 p_support_ptrs->allocator = allocator; 546} 547 548/****************************************************************************/ 549/* rpc_ss_client_free */ 550/* */ 551/* Free the specified memory using the free routine from the current memory */ 552/* management environment. This routine provides a simple interface to */ 553/* free memory returned from an RPC call. */ 554/* */ 555/****************************************************************************/ 556void rpc_ss_client_free 557( 558 idl_void_p_t p_mem 559) 560{ 561 rpc_ss_thread_support_ptrs_t *p_support_ptrs; 562 563 /* Get free routine address */ 564 rpc_ss_client_get_thread_ctx( &p_support_ptrs ); 565 /* Invoke free with the specified memory */ 566 rpc_allocator_free(&p_support_ptrs->allocator, p_mem); 567} 568 569/******************************************************************************/ 570/* */ 571/* rpc_ss_enable_allocate */ 572/* Create environment for rpc_ss_allocate to be used */ 573/* */ 574/******************************************************************************/ 575void rpc_ss_enable_allocate 576( void ) 577{ 578 rpc_ss_mem_handle *p_mem_handle; 579 rpc_ss_thread_support_ptrs_t *p_thread_support_ptrs; 580 581 /* Make sure there is a thread context key */ 582#ifndef MEMORY_NOT_WRITTEN_SERIALLY 583 if ( ! rpc_ss_allocate_is_set_up ) 584#endif 585 rpc_ss_init_allocate_once(); 586 587 /* Set up the parts of the required data structure */ 588 p_mem_handle = (rpc_ss_mem_handle *)malloc(sizeof(rpc_ss_mem_handle)); 589 if (p_mem_handle == NULL) 590 { 591 DCETHREAD_RAISE( rpc_x_no_memory ); 592 } 593 p_mem_handle->memory = NULL; 594 p_mem_handle->node_table = NULL; 595 p_thread_support_ptrs = (rpc_ss_thread_support_ptrs_t *) 596 malloc(sizeof(rpc_ss_thread_support_ptrs_t)); 597 if (p_thread_support_ptrs == NULL) 598 { 599 DCETHREAD_RAISE( rpc_x_no_memory ); 600 } 601 602 /* Complete the data structure and associate it with the key */ 603 /* This will make rpc_ss_allocate, rpc_ss_free the allocate/free pair */ 604 rpc_ss_build_indirection_struct( p_thread_support_ptrs, p_mem_handle, 605 idl_true ); 606} 607 608/******************************************************************************/ 609/* */ 610/* rpc_ss_disable_allocate */ 611/* Destroy environment created by rpc_ss_enable_allocate */ 612/* */ 613/******************************************************************************/ 614void rpc_ss_disable_allocate 615( void ) 616{ 617 rpc_ss_thread_indirection_t *helper_thread_indirection_ptr; 618 619 /* Get the thread support pointers structure */ 620 RPC_SS_THREADS_KEY_GET_CONTEXT( rpc_ss_thread_supp_key, 621 &helper_thread_indirection_ptr ); 622 623 rpc_ss_destroy_thread_ctx( helper_thread_indirection_ptr ); 624} 625 626#ifdef MIA 627/******************************************************************************/ 628/* */ 629/* MTS version of */ 630/* */ 631/* Do we currently have thread context data? */ 632/* If not, create local storage with malloc() and free() as the */ 633/* allocate and free routines */ 634/* Copy pointers to allocate and free routines to local storage */ 635/* */ 636/******************************************************************************/ 637void rpc_ss_mts_client_estab_alloc 638( 639 volatile IDL_ms_t * IDL_msp 640) 641{ 642 rpc_ss_thread_support_ptrs_t *p_support_ptrs; 643 644#ifdef PERFMON 645 RPC_SS_CLIENT_ESTABLISH_ALLOC_N; 646#endif 647 648 rpc_ss_client_get_thread_ctx( &p_support_ptrs ); 649 IDL_msp->IDL_allocator = p_support_ptrs->allocator; 650 651#ifdef PERFMON 652 RPC_SS_CLIENT_ESTABLISH_ALLOC_X; 653#endif 654 655} 656#endif 657