monitor_mm.c revision 258343
110154Sache/* $OpenBSD: monitor_mm.c,v 1.18 2013/11/08 00:39:15 djm Exp $ */ 27767Sache/* 37767Sache * Copyright 2002 Niels Provos <provos@citi.umich.edu> 4941Snate * All rights reserved. 57767Sache * 67767Sache * Redistribution and use in source and binary forms, with or without 7941Snate * modification, are permitted provided that the following conditions 8941Snate * are met: 9941Snate * 1. Redistributions of source code must retain the above copyright 10941Snate * notice, this list of conditions and the following disclaimer. 11941Snate * 2. Redistributions in binary form must reproduce the above copyright 12941Snate * notice, this list of conditions and the following disclaimer in the 13941Snate * documentation and/or other materials provided with the distribution. 14941Snate * 15941Snate * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16941Snate * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17941Snate * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18941Snate * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19941Snate * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 2010154Sache * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21941Snate * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22941Snate * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23941Snate * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24941Snate * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25941Snate */ 26941Snate 27941Snate#include "includes.h" 28941Snate 29941Snate#include <sys/types.h> 30941Snate#ifdef HAVE_SYS_MMAN_H 317767Sache#include <sys/mman.h> 327767Sache#endif 337767Sache#include <sys/param.h> 34941Snate#include "openbsd-compat/sys-tree.h" 35941Snate 36941Snate#include <errno.h> 37941Snate#include <stdarg.h> 387767Sache#include <stdlib.h> 39941Snate#include <string.h> 4026872Scharnier 41941Snate#include "xmalloc.h" 42941Snate#include "ssh.h" 43941Snate#include "log.h" 44941Snate#include "monitor_mm.h" 45941Snate 46941Snatestatic int 47941Snatemm_compare(struct mm_share *a, struct mm_share *b) 487767Sache{ 497767Sache long diff = (char *)a->address - (char *)b->address; 507767Sache 51941Snate if (diff == 0) 52941Snate return (0); 53941Snate else if (diff < 0) 54941Snate return (-1); 55941Snate else 56941Snate return (1); 57941Snate} 58941Snate 59941SnateRB_GENERATE(mmtree, mm_share, next, mm_compare) 6010154Sache 61941Snatestatic struct mm_share * 62941Snatemm_make_entry(struct mm_master *mm, struct mmtree *head, 63941Snate void *address, size_t size) 64941Snate{ 65941Snate struct mm_share *tmp, *tmp2; 667767Sache 677767Sache if (mm->mmalloc == NULL) 687767Sache tmp = xcalloc(1, sizeof(struct mm_share)); 69941Snate else 707767Sache tmp = mm_xmalloc(mm->mmalloc, sizeof(struct mm_share)); 71941Snate tmp->address = address; 72941Snate tmp->size = size; 7310154Sache 7410154Sache tmp2 = RB_INSERT(mmtree, head, tmp); 757767Sache if (tmp2 != NULL) 76941Snate fatal("mm_make_entry(%p): double address %p->%p(%lu)", 777767Sache mm, tmp2, address, (u_long)size); 787767Sache 797767Sache return (tmp); 807767Sache} 817767Sache 827767Sache/* Creates a shared memory area of a certain size */ 837767Sache 847767Sachestruct mm_master * 85941Snatemm_create(struct mm_master *mmalloc, size_t size) 867767Sache{ 877767Sache void *address; 887767Sache struct mm_master *mm; 897767Sache 907767Sache if (mmalloc == NULL) 917767Sache mm = xcalloc(1, sizeof(struct mm_master)); 927767Sache else 937767Sache mm = mm_xmalloc(mmalloc, sizeof(struct mm_master)); 947767Sache 957767Sache /* 967767Sache * If the memory map has a mm_master it can be completely 977767Sache * shared including authentication between the child 987767Sache * and the client. 997767Sache */ 1007767Sache mm->mmalloc = mmalloc; 1017767Sache 1027767Sache address = xmmap(size); 1037767Sache if (address == (void *)MAP_FAILED) 1047767Sache fatal("mmap(%lu): %s", (u_long)size, strerror(errno)); 1057767Sache 1067767Sache mm->address = address; 1077767Sache mm->size = size; 1087767Sache 1097767Sache RB_INIT(&mm->rb_free); 1107767Sache RB_INIT(&mm->rb_allocated); 1117767Sache 1127767Sache mm_make_entry(mm, &mm->rb_free, address, size); 1137767Sache 1147767Sache return (mm); 1157767Sache} 1167767Sache 1177767Sache/* Frees either the allocated or the free list */ 1187767Sache 1197767Sachestatic void 120941Snatemm_freelist(struct mm_master *mmalloc, struct mmtree *head) 121941Snate{ 122941Snate struct mm_share *mms, *next; 123941Snate 124941Snate for (mms = RB_ROOT(head); mms; mms = next) { 125941Snate next = RB_NEXT(mmtree, head, mms); 126941Snate RB_REMOVE(mmtree, head, mms); 127941Snate if (mmalloc == NULL) 128941Snate free(mms); 129941Snate else 130941Snate mm_free(mmalloc, mms); 13110154Sache } 1327767Sache} 133941Snate 13435729Salex/* Destroys a memory mapped area */ 135941Snate 136941Snatevoid 137941Snatemm_destroy(struct mm_master *mm) 138941Snate{ 139941Snate mm_freelist(mm->mmalloc, &mm->rb_free); 140941Snate mm_freelist(mm->mmalloc, &mm->rb_allocated); 14110154Sache 14210154Sache#ifdef HAVE_MMAP 143941Snate if (munmap(mm->address, mm->size) == -1) 144941Snate fatal("munmap(%p, %lu): %s", mm->address, (u_long)mm->size, 145941Snate strerror(errno)); 146941Snate#else 147941Snate fatal("%s: UsePrivilegeSeparation=yes and Compression=yes not supported", 1487767Sache __func__); 149941Snate#endif 150941Snate if (mm->mmalloc == NULL) 151941Snate free(mm); 152941Snate else 153941Snate mm_free(mm->mmalloc, mm); 154941Snate} 155941Snate 156941Snatevoid * 157941Snatemm_xmalloc(struct mm_master *mm, size_t size) 158941Snate{ 159941Snate void *address; 160941Snate 1617767Sache address = mm_malloc(mm, size); 162941Snate if (address == NULL) 163941Snate fatal("%s: mm_malloc(%lu)", __func__, (u_long)size); 164941Snate memset(address, 0, size); 165941Snate return (address); 166941Snate} 16710154Sache 16810154Sache 169941Snate/* Allocates data from a memory mapped area */ 1707767Sache 171941Snatevoid * 172941Snatemm_malloc(struct mm_master *mm, size_t size) 173941Snate{ 174941Snate struct mm_share *mms, *tmp; 175941Snate 17610154Sache if (size == 0) 177941Snate fatal("mm_malloc: try to allocate 0 space"); 178941Snate if (size > SIZE_T_MAX - MM_MINSIZE + 1) 179941Snate fatal("mm_malloc: size too big"); 180941Snate 181941Snate size = ((size + (MM_MINSIZE - 1)) / MM_MINSIZE) * MM_MINSIZE; 182941Snate 183941Snate RB_FOREACH(mms, mmtree, &mm->rb_free) { 1847767Sache if (mms->size >= size) 185941Snate break; 186941Snate } 1877767Sache 188941Snate if (mms == NULL) 189941Snate return (NULL); 190941Snate 191941Snate /* Debug */ 192941Snate memset(mms->address, 0xd0, size); 193941Snate 194941Snate tmp = mm_make_entry(mm, &mm->rb_allocated, mms->address, size); 195941Snate 196941Snate /* Does not change order in RB tree */ 197941Snate mms->size -= size; 1987767Sache mms->address = (u_char *)mms->address + size; 199941Snate 200941Snate if (mms->size == 0) { 201941Snate RB_REMOVE(mmtree, &mm->rb_free, mms); 202941Snate if (mm->mmalloc == NULL) 203941Snate free(mms); 204941Snate else 205941Snate mm_free(mm->mmalloc, mms); 206941Snate } 207941Snate 208941Snate return (tmp->address); 2097767Sache} 210941Snate 211941Snate/* Frees memory in a memory mapped area */ 212941Snate 2137767Sachevoid 214941Snatemm_free(struct mm_master *mm, void *address) 215941Snate{ 216941Snate struct mm_share *mms, *prev, tmp; 217941Snate 218941Snate tmp.address = address; 219941Snate mms = RB_FIND(mmtree, &mm->rb_allocated, &tmp); 2207767Sache if (mms == NULL) 2217767Sache fatal("mm_free(%p): can not find %p", mm, address); 222941Snate 223941Snate /* Debug */ 224941Snate memset(mms->address, 0xd0, mms->size); 225941Snate 226941Snate /* Remove from allocated list and insert in free list */ 227941Snate RB_REMOVE(mmtree, &mm->rb_allocated, mms); 228941Snate if (RB_INSERT(mmtree, &mm->rb_free, mms) != NULL) 229941Snate fatal("mm_free(%p): double address %p", mm, address); 230941Snate 2317767Sache /* Find previous entry */ 232941Snate prev = mms; 233941Snate if (RB_LEFT(prev, next)) { 234941Snate prev = RB_LEFT(prev, next); 235941Snate while (RB_RIGHT(prev, next)) 236941Snate prev = RB_RIGHT(prev, next); 237941Snate } else { 238941Snate if (RB_PARENT(prev, next) && 239941Snate (prev == RB_RIGHT(RB_PARENT(prev, next), next))) 240941Snate prev = RB_PARENT(prev, next); 241941Snate else { 242941Snate while (RB_PARENT(prev, next) && 2437767Sache (prev == RB_LEFT(RB_PARENT(prev, next), next))) 244941Snate prev = RB_PARENT(prev, next); 245941Snate prev = RB_PARENT(prev, next); 246941Snate } 247941Snate } 248941Snate 249941Snate /* Check if range does not overlap */ 25010154Sache if (prev != NULL && MM_ADDRESS_END(prev) > address) 251941Snate fatal("mm_free: memory corruption: %p(%lu) > %p", 252941Snate prev->address, (u_long)prev->size, address); 253941Snate 25410154Sache /* See if we can merge backwards */ 255941Snate if (prev != NULL && MM_ADDRESS_END(prev) == address) { 256941Snate prev->size += mms->size; 257941Snate RB_REMOVE(mmtree, &mm->rb_free, mms); 258941Snate if (mm->mmalloc == NULL) 259941Snate free(mms); 260941Snate else 261941Snate mm_free(mm->mmalloc, mms); 262941Snate } else 263941Snate prev = mms; 264941Snate 265941Snate if (prev == NULL) 266941Snate return; 2677767Sache 268941Snate /* Check if we can merge forwards */ 269941Snate mms = RB_NEXT(mmtree, &mm->rb_free, prev); 270941Snate if (mms == NULL) 271941Snate return; 272941Snate 273941Snate if (MM_ADDRESS_END(prev) > mms->address) 274941Snate fatal("mm_free: memory corruption: %p < %p(%lu)", 275941Snate mms->address, prev->address, (u_long)prev->size); 276941Snate if (MM_ADDRESS_END(prev) != mms->address) 277941Snate return; 278941Snate 279941Snate prev->size += mms->size; 280941Snate RB_REMOVE(mmtree, &mm->rb_free, mms); 281941Snate 282941Snate if (mm->mmalloc == NULL) 283941Snate free(mms); 284941Snate else 285941Snate mm_free(mm->mmalloc, mms); 286941Snate} 287941Snate 288941Snatestatic void 289941Snatemm_sync_list(struct mmtree *oldtree, struct mmtree *newtree, 290941Snate struct mm_master *mm, struct mm_master *mmold) 291941Snate{ 292941Snate struct mm_master *mmalloc = mm->mmalloc; 293941Snate struct mm_share *mms, *new; 294941Snate 295941Snate /* Sync free list */ 296941Snate RB_FOREACH(mms, mmtree, oldtree) { 297941Snate /* Check the values */ 298941Snate mm_memvalid(mmold, mms, sizeof(struct mm_share)); 299941Snate mm_memvalid(mm, mms->address, mms->size); 300941Snate 301941Snate new = mm_xmalloc(mmalloc, sizeof(struct mm_share)); 302941Snate memcpy(new, mms, sizeof(struct mm_share)); 303941Snate RB_INSERT(mmtree, newtree, new); 304941Snate } 305941Snate} 306941Snate 307941Snatevoid 3087767Sachemm_share_sync(struct mm_master **pmm, struct mm_master **pmmalloc) 309941Snate{ 310941Snate struct mm_master *mm; 3117767Sache struct mm_master *mmalloc; 312941Snate struct mm_master *mmold; 313941Snate struct mmtree rb_free, rb_allocated; 314941Snate 315941Snate debug3("%s: Share sync", __func__); 3167767Sache 317941Snate mm = *pmm; 318941Snate mmold = mm->mmalloc; 319941Snate mm_memvalid(mmold, mm, sizeof(*mm)); 320941Snate 321941Snate mmalloc = mm_create(NULL, mm->size); 322941Snate mm = mm_xmalloc(mmalloc, sizeof(struct mm_master)); 323941Snate memcpy(mm, *pmm, sizeof(struct mm_master)); 324941Snate mm->mmalloc = mmalloc; 325941Snate 3267767Sache rb_free = mm->rb_free; 32726835Scharnier rb_allocated = mm->rb_allocated; 328941Snate 329941Snate RB_INIT(&mm->rb_free); 330941Snate RB_INIT(&mm->rb_allocated); 331941Snate 332941Snate mm_sync_list(&rb_free, &mm->rb_free, mm, mmold); 333941Snate mm_sync_list(&rb_allocated, &mm->rb_allocated, mm, mmold); 334941Snate 335941Snate mm_destroy(mmold); 336941Snate 337941Snate *pmm = mm; 338941Snate *pmmalloc = mmalloc; 339941Snate 3407767Sache debug3("%s: Share sync end", __func__); 341941Snate} 342941Snate 343941Snatevoid 344941Snatemm_memvalid(struct mm_master *mm, void *address, size_t size) 345941Snate{ 346941Snate void *end = (u_char *)address + size; 347941Snate 3487767Sache if (address < mm->address) 349941Snate fatal("mm_memvalid: address too small: %p", address); 350941Snate if (end < address) 351941Snate fatal("mm_memvalid: end < address: %p < %p", end, address); 352941Snate if (end > (void *)((u_char *)mm->address + mm->size)) 353941Snate fatal("mm_memvalid: address too large: %p", address); 354941Snate} 355941Snate