1/* 2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. 3 * 4 * @APPLE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. Please obtain a copy of the License at 10 * http://www.opensource.apple.com/apsl/ and read it before using this 11 * file. 12 * 13 * The Original Code and all software distributed under the License are 14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 18 * Please see the License for the specific language governing rights and 19 * limitations under the License. 20 * 21 * @APPLE_LICENSE_HEADER_END@ 22 */ 23 24#include "IODataQueueClientPrivate.h" 25#include <IOKit/IODataQueueShared.h> 26 27#include <mach/message.h> 28#include <mach/mach_port.h> 29#include <mach/port.h> 30#include <mach/mach_init.h> 31#include <IOKit/OSMessageNotification.h> 32#include <libkern/OSAtomic.h> 33 34 35static IOReturn _IODataQueueSendDataAvailableNotification(IODataQueueMemory *dataQueue); 36 37Boolean IODataQueueDataAvailable(IODataQueueMemory *dataQueue) 38{ 39 return (dataQueue && (dataQueue->head != dataQueue->tail)); 40} 41 42IODataQueueEntry *IODataQueuePeek(IODataQueueMemory *dataQueue) 43{ 44 IODataQueueEntry *entry = 0; 45 46 if (dataQueue && (dataQueue->head != dataQueue->tail)) { 47 IODataQueueEntry * head = 0; 48 UInt32 headSize = 0; 49 UInt32 headOffset = dataQueue->head; 50 UInt32 queueSize = dataQueue->queueSize; 51 52 head = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset); 53 headSize = head->size; 54 55 // Check if there's enough room before the end of the queue for a header. 56 // If there is room, check if there's enough room to hold the header and 57 // the data. 58 59 if ((headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) || 60 ((headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE) > queueSize)) 61 { 62 // No room for the header or the data, wrap to the beginning of the queue. 63 entry = dataQueue->queue; 64 } else { 65 entry = head; 66 } 67 } 68 69 return entry; 70} 71 72IOReturn 73IODataQueueDequeue(IODataQueueMemory *dataQueue, void *data, uint32_t *dataSize) 74{ 75 IOReturn retVal = kIOReturnSuccess; 76 IODataQueueEntry * entry = 0; 77 UInt32 entrySize = 0; 78 UInt32 newHeadOffset = 0; 79 80 if (dataQueue) { 81 if (dataQueue->head != dataQueue->tail) { 82 IODataQueueEntry * head = 0; 83 UInt32 headSize = 0; 84 UInt32 headOffset = dataQueue->head; 85 UInt32 queueSize = dataQueue->queueSize; 86 87 head = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset); 88 headSize = head->size; 89 90 // we wraped around to beginning, so read from there 91 // either there was not even room for the header 92 if ((headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) || 93 // or there was room for the header, but not for the data 94 ((headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE) > queueSize)) { 95 entry = dataQueue->queue; 96 entrySize = entry->size; 97 newHeadOffset = entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE; 98 // else it is at the end 99 } else { 100 entry = head; 101 entrySize = entry->size; 102 newHeadOffset = headOffset + entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE; 103 } 104 } 105 106 if (entry) { 107 if (data) { 108 if (dataSize) { 109 if (entrySize <= *dataSize) { 110 memcpy(data, &(entry->data), entrySize); 111 OSAtomicCompareAndSwap32Barrier(dataQueue->head, newHeadOffset, (int32_t *)&dataQueue->head); 112 } else { 113 retVal = kIOReturnNoSpace; 114 } 115 } else { 116 retVal = kIOReturnBadArgument; 117 } 118 } else { 119 OSAtomicCompareAndSwap32Barrier(dataQueue->head, newHeadOffset, (int32_t *)&dataQueue->head); 120 } 121 122 // RY: Update the data size here. This will 123 // ensure that dataSize is always updated. 124 if (dataSize) { 125 *dataSize = entrySize; 126 } 127 } else { 128 retVal = kIOReturnUnderrun; 129 } 130 } else { 131 retVal = kIOReturnBadArgument; 132 } 133 134 return retVal; 135} 136 137static IOReturn 138__IODataQueueEnqueue(IODataQueueMemory *dataQueue, uint32_t dataSize, void *data, IODataQueueClientEnqueueReadBytesCallback callback, void * refcon) 139{ 140 UInt32 head = dataQueue->head; // volatile 141 UInt32 tail = dataQueue->tail; 142 UInt32 queueSize = dataQueue->queueSize; 143 UInt32 entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE; 144 IOReturn retVal = kIOReturnSuccess; 145 IODataQueueEntry * entry; 146 147 if ( tail >= head ) 148 { 149 // Is there enough room at the end for the entry? 150 if ( (tail + entrySize) <= queueSize ) 151 { 152 entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail); 153 154 if ( data ) 155 memcpy(&(entry->data), data, dataSize); 156 else if ( callback ) 157 (*callback)(refcon, &(entry->data), dataSize); 158 159 entry->size = dataSize; 160 161 // The tail can be out of bound when the size of the new entry 162 // exactly matches the available space at the end of the queue. 163 // The tail can range from 0 to queueSize inclusive. 164 165 OSAtomicAdd32Barrier(entrySize, (int32_t *)&dataQueue->tail); 166 } 167 else if ( head > entrySize ) // Is there enough room at the beginning? 168 { 169 entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue); 170 171 if ( data ) 172 memcpy(&(entry->data), data, dataSize); 173 else if ( callback ) 174 (*callback)(refcon, &(entry->data), dataSize); 175 176 // Wrap around to the beginning, but do not allow the tail to catch 177 // up to the head. 178 179 entry->size = dataSize; 180 181 // We need to make sure that there is enough room to set the size before 182 // doing this. The user client checks for this and will look for the size 183 // at the beginning if there isn't room for it at the end. 184 185 if ( ( queueSize - tail ) >= DATA_QUEUE_ENTRY_HEADER_SIZE ) 186 { 187 ((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = dataSize; 188 } 189 190 OSAtomicCompareAndSwap32Barrier(dataQueue->tail, entrySize, (int32_t *)&dataQueue->tail); 191 } 192 else 193 { 194 retVal = kIOReturnOverrun; // queue is full 195 } 196 } 197 else 198 { 199 // Do not allow the tail to catch up to the head when the queue is full. 200 // That's why the comparison uses a '>' rather than '>='. 201 202 if ( (head - tail) > entrySize ) 203 { 204 entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail); 205 206 if ( data ) 207 memcpy(&(entry->data), data, dataSize); 208 else if ( callback ) 209 (*callback)(refcon, &(entry->data), dataSize); 210 211 entry->size = dataSize; 212 213 OSAtomicAdd32Barrier(entrySize, (int32_t *)&dataQueue->tail); 214 } 215 else 216 { 217 retVal = kIOReturnOverrun; // queue is full 218 } 219 } 220 221 // Send notification (via mach message) that data is available. 222 223 if ( retVal == kIOReturnSuccess ) { 224 if ( ( head == tail ) /* queue was empty prior to enqueue() */ 225 || ( dataQueue->head == tail ) ) /* queue was emptied during enqueue() */ 226 { 227 retVal = _IODataQueueSendDataAvailableNotification(dataQueue); 228 } 229 } 230 231 else if ( retVal == kIOReturnOverrun ) { 232 // Send extra data available notification, this will fail and we will 233 // get a send possible notification when the client starts responding 234 (void) _IODataQueueSendDataAvailableNotification(dataQueue); 235 } 236 237 return retVal; 238} 239 240IOReturn 241IODataQueueEnqueue(IODataQueueMemory *dataQueue, void *data, uint32_t dataSize) 242{ 243 return __IODataQueueEnqueue(dataQueue, dataSize, data, NULL, NULL); 244} 245 246 247IOReturn 248_IODataQueueEnqueueWithReadCallback(IODataQueueMemory *dataQueue, uint32_t dataSize, IODataQueueClientEnqueueReadBytesCallback callback, void * refcon) 249{ 250 return __IODataQueueEnqueue(dataQueue, dataSize, NULL, callback, refcon); 251} 252 253 254IOReturn IODataQueueWaitForAvailableData(IODataQueueMemory *dataQueue, mach_port_t notifyPort) 255{ 256 IOReturn kr; 257 struct { 258 mach_msg_header_t msgHdr; 259// OSNotificationHeader notifyHeader; 260 mach_msg_trailer_t trailer; 261 } msg; 262 263 if (dataQueue && (notifyPort != MACH_PORT_NULL)) { 264 kr = mach_msg(&msg.msgHdr, MACH_RCV_MSG, 0, sizeof(msg), notifyPort, 0, MACH_PORT_NULL); 265 } else { 266 kr = kIOReturnBadArgument; 267 } 268 269 return kr; 270} 271 272mach_port_t IODataQueueAllocateNotificationPort() 273{ 274 mach_port_t port = MACH_PORT_NULL; 275 mach_port_limits_t limits; 276 mach_msg_type_number_t info_cnt; 277 278 mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &port); 279 280 info_cnt = MACH_PORT_LIMITS_INFO_COUNT; 281 282 mach_port_get_attributes(mach_task_self(), 283 port, 284 MACH_PORT_LIMITS_INFO, 285 (mach_port_info_t)&limits, 286 &info_cnt); 287 288 limits.mpl_qlimit = 1; // Set queue to only 1 message 289 290 mach_port_set_attributes(mach_task_self(), 291 port, 292 MACH_PORT_LIMITS_INFO, 293 (mach_port_info_t)&limits, 294 MACH_PORT_LIMITS_INFO_COUNT); 295 296 return port; 297} 298 299IOReturn IODataQueueSetNotificationPort(IODataQueueMemory *dataQueue, mach_port_t notifyPort) 300{ 301 IODataQueueAppendix * appendix = NULL; 302 UInt32 queueSize = 0; 303 304 if ( !dataQueue ) 305 return kIOReturnBadArgument; 306 307 queueSize = dataQueue->queueSize; 308 309 appendix = (IODataQueueAppendix *)((UInt8 *)dataQueue + queueSize + DATA_QUEUE_MEMORY_HEADER_SIZE); 310 311 appendix->msgh.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0); 312 appendix->msgh.msgh_size = sizeof(appendix->msgh); 313 appendix->msgh.msgh_remote_port = notifyPort; 314 appendix->msgh.msgh_local_port = MACH_PORT_NULL; 315 appendix->msgh.msgh_id = 0; 316 317 return kIOReturnSuccess; 318} 319 320IOReturn _IODataQueueSendDataAvailableNotification(IODataQueueMemory *dataQueue) 321{ 322 IODataQueueAppendix * appendix = NULL; 323 UInt32 queueSize = 0; 324 325 queueSize = dataQueue->queueSize; 326 327 appendix = (IODataQueueAppendix *)((UInt8 *)dataQueue + queueSize + DATA_QUEUE_MEMORY_HEADER_SIZE); 328 329 if ( appendix->msgh.msgh_remote_port == MACH_PORT_NULL ) 330 return kIOReturnSuccess; // return success if no port is declared 331 332 kern_return_t kr; 333 mach_msg_header_t msgh = appendix->msgh; 334 335 kr = mach_msg(&msgh, MACH_SEND_MSG | MACH_SEND_TIMEOUT, msgh.msgh_size, 0, MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); 336 switch(kr) { 337 case MACH_SEND_TIMED_OUT: // Notification already sent 338 case MACH_MSG_SUCCESS: 339 break; 340 default: 341 // perhaps add log here 342 break; 343 } 344 345 return kr; 346} 347