1/** 2 * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved. 3 * Copyright (c) 2010-2012 Broadcom. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. The names of the above-listed copyright holders may not be used 15 * to endorse or promote products derived from this software without 16 * specific prior written permission. 17 * 18 * ALTERNATIVELY, this software may be distributed under the terms of the 19 * GNU General Public License ("GPL") version 2, as published by the Free 20 * Software Foundation. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 23 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 26 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 27 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 29 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 31 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35#include <sys/cdefs.h> 36#include <sys/systm.h> 37#include <sys/device.h> 38#include <sys/file.h> 39#include <sys/filedesc.h> 40#include <sys/kmem.h> 41 42#include "vchiq_core.h" 43#include "vchiq_ioctl.h" 44#include "vchiq_arm.h" 45#include "vchiq_debugfs.h" 46 47#define DEVICE_NAME "vchiq" 48 49/* Override the default prefix, which would be vchiq_arm (from the filename) */ 50#undef MODULE_PARAM_PREFIX 51#define MODULE_PARAM_PREFIX DEVICE_NAME "." 52 53#define VCHIQ_MINOR 0 54 55/* Some per-instance constants */ 56#define MAX_COMPLETIONS 128 57#define MAX_SERVICES 64 58#define MAX_ELEMENTS 8 59#define MSG_QUEUE_SIZE 128 60 61#define KEEPALIVE_VER 1 62#define KEEPALIVE_VER_MIN KEEPALIVE_VER 63 64MALLOC_DEFINE(M_VCHIQ, "vchiq_cdev", "VideoCore cdev memory"); 65 66/* Run time control of log level, based on KERN_XXX level. */ 67int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT; 68int vchiq_susp_log_level = VCHIQ_LOG_ERROR; 69 70#define SUSPEND_TIMER_TIMEOUT_MS 100 71#define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000 72 73#define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */ 74static const char *const suspend_state_names[] = { 75 "VC_SUSPEND_FORCE_CANCELED", 76 "VC_SUSPEND_REJECTED", 77 "VC_SUSPEND_FAILED", 78 "VC_SUSPEND_IDLE", 79 "VC_SUSPEND_REQUESTED", 80 "VC_SUSPEND_IN_PROGRESS", 81 "VC_SUSPEND_SUSPENDED" 82}; 83#define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */ 84static const char *const resume_state_names[] = { 85 "VC_RESUME_FAILED", 86 "VC_RESUME_IDLE", 87 "VC_RESUME_REQUESTED", 88 "VC_RESUME_IN_PROGRESS", 89 "VC_RESUME_RESUMED" 90}; 91/* The number of times we allow force suspend to timeout before actually 92** _forcing_ suspend. This is to cater for SW which fails to release vchiq 93** correctly - we don't want to prevent ARM suspend indefinitely in this case. 94*/ 95#define FORCE_SUSPEND_FAIL_MAX 8 96 97/* The time in ms allowed for videocore to go idle when force suspend has been 98 * requested */ 99#define FORCE_SUSPEND_TIMEOUT_MS 200 100 101 102static void suspend_timer_callback(unsigned long context); 103 104 105typedef struct user_service_struct { 106 VCHIQ_SERVICE_T *service; 107 void *userdata; 108 VCHIQ_INSTANCE_T instance; 109 char is_vchi; 110 char dequeue_pending; 111 char close_pending; 112 int message_available_pos; 113 int msg_insert; 114 int msg_remove; 115 struct semaphore insert_event; 116 struct semaphore remove_event; 117 struct semaphore close_event; 118 VCHIQ_HEADER_T * msg_queue[MSG_QUEUE_SIZE]; 119} USER_SERVICE_T; 120 121struct bulk_waiter_node { 122 struct bulk_waiter bulk_waiter; 123 int pid; 124 struct list_head list; 125}; 126 127struct vchiq_instance_struct { 128 VCHIQ_STATE_T *state; 129 VCHIQ_COMPLETION_DATA_T completions[MAX_COMPLETIONS]; 130 int completion_insert; 131 int completion_remove; 132 struct semaphore insert_event; 133 struct semaphore remove_event; 134 struct mutex completion_mutex; 135 136 int connected; 137 int closing; 138 int pid; 139 int mark; 140 int use_close_delivered; 141 int trace; 142 143 struct list_head bulk_waiter_list; 144 struct mutex bulk_waiter_list_mutex; 145 146 VCHIQ_DEBUGFS_NODE_T debugfs_node; 147}; 148 149typedef struct dump_context_struct { 150 char __user *buf; 151 size_t actual; 152 size_t space; 153 loff_t offset; 154} DUMP_CONTEXT_T; 155 156VCHIQ_STATE_T g_state; 157static DEFINE_SPINLOCK(msg_queue_spinlock); 158 159static const char *const ioctl_names[] = { 160 "CONNECT", 161 "SHUTDOWN", 162 "CREATE_SERVICE", 163 "REMOVE_SERVICE", 164 "QUEUE_MESSAGE", 165 "QUEUE_BULK_TRANSMIT", 166 "QUEUE_BULK_RECEIVE", 167 "AWAIT_COMPLETION", 168 "DEQUEUE_MESSAGE", 169 "GET_CLIENT_ID", 170 "GET_CONFIG", 171 "CLOSE_SERVICE", 172 "USE_SERVICE", 173 "RELEASE_SERVICE", 174 "SET_SERVICE_OPTION", 175 "DUMP_PHYS_MEM", 176 "LIB_VERSION", 177 "CLOSE_DELIVERED" 178}; 179 180vchiq_static_assert((sizeof(ioctl_names)/sizeof(ioctl_names[0])) == 181 (VCHIQ_IOC_MAX + 1)); 182 183static dev_type_open(vchiq_open); 184 185struct cdevsw vchiq_cdevsw = { 186 .d_open = vchiq_open, 187 .d_close = noclose, 188 .d_read = noread, 189 .d_write = nowrite, 190 .d_ioctl = noioctl, 191 .d_stop = nostop, 192 .d_tty = notty, 193 .d_poll = nopoll, 194 .d_mmap = nommap, 195 .d_kqfilter = nokqfilter, 196 .d_discard = nodiscard, 197 .d_flag = D_OTHER | D_MPSAFE 198}; 199 200extern struct cfdriver vchiq_cd; 201 202static int vchiq_ioctl(struct file *, u_long, void *); 203static int vchiq_close(struct file *); 204static int vchiq_read(struct file *, off_t *, struct uio *, kauth_cred_t, int); 205 206static const struct fileops vchiq_fileops = { 207 .fo_name = "vchiq", 208 .fo_read = vchiq_read, 209 .fo_write = fbadop_write, 210 .fo_ioctl = vchiq_ioctl, 211 .fo_fcntl = fnullop_fcntl, 212 .fo_poll = fnullop_poll, 213 .fo_stat = fbadop_stat, 214 .fo_close = vchiq_close, 215 .fo_kqfilter = fnullop_kqfilter, 216}; 217 218#if 0 219static void 220dump_phys_mem(void *virt_addr, uint32_t num_bytes); 221#endif 222 223/**************************************************************************** 224* 225* add_completion 226* 227***************************************************************************/ 228 229static VCHIQ_STATUS_T 230add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason, 231 VCHIQ_HEADER_T *header, USER_SERVICE_T *user_service, 232 void *bulk_userdata) 233{ 234 VCHIQ_COMPLETION_DATA_T *completion; 235 int insert; 236 DEBUG_INITIALISE(g_state.local) 237 238 insert = instance->completion_insert; 239 while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) { 240 /* Out of space - wait for the client */ 241 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 242 vchiq_log_trace(vchiq_arm_log_level, 243 "add_completion - completion queue full"); 244 DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT); 245 246 if (down_interruptible(&instance->remove_event) != 0) { 247 vchiq_log_info(vchiq_arm_log_level, 248 "service_callback interrupted"); 249 return VCHIQ_RETRY; 250 } 251 252 if (instance->closing) { 253 vchiq_log_info(vchiq_arm_log_level, 254 "service_callback closing"); 255 return VCHIQ_SUCCESS; 256 } 257 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 258 } 259 260 completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)]; 261 262 completion->header = header; 263 completion->reason = reason; 264 /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */ 265 completion->service_userdata = user_service->service; 266 completion->bulk_userdata = bulk_userdata; 267 268 if (reason == VCHIQ_SERVICE_CLOSED) { 269 /* Take an extra reference, to be held until 270 this CLOSED notification is delivered. */ 271 lock_service(user_service->service); 272 if (instance->use_close_delivered) 273 user_service->close_pending = 1; 274 } 275 276 /* A write barrier is needed here to ensure that the entire completion 277 record is written out before the insert point. */ 278 wmb(); 279 280 if (reason == VCHIQ_MESSAGE_AVAILABLE) 281 user_service->message_available_pos = insert; 282 283 instance->completion_insert = ++insert; 284 285 up(&instance->insert_event); 286 287 return VCHIQ_SUCCESS; 288} 289 290/**************************************************************************** 291* 292* service_callback 293* 294***************************************************************************/ 295 296static VCHIQ_STATUS_T 297service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header, 298 VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata) 299{ 300 /* How do we ensure the callback goes to the right client? 301 ** The service_user data points to a USER_SERVICE_T record containing 302 ** the original callback and the user state structure, which contains a 303 ** circular buffer for completion records. 304 */ 305 USER_SERVICE_T *user_service; 306 VCHIQ_SERVICE_T *service; 307 VCHIQ_INSTANCE_T instance; 308 int skip_completion = 0; 309 DEBUG_INITIALISE(g_state.local) 310 311 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 312 313 service = handle_to_service(handle); 314 BUG_ON(!service); 315 user_service = (USER_SERVICE_T *)service->base.userdata; 316 instance = user_service->instance; 317 318 if (!instance || instance->closing) 319 return VCHIQ_SUCCESS; 320 321 vchiq_log_trace(vchiq_arm_log_level, 322 "service_callback - service %lx(%d,%p), reason %d, header %lx, " 323 "instance %lx, bulk_userdata %lx", 324 (unsigned long)user_service, 325 service->localport, user_service->userdata, 326 reason, (unsigned long)header, 327 (unsigned long)instance, (unsigned long)bulk_userdata); 328 329 if (header && user_service->is_vchi) { 330 spin_lock(&msg_queue_spinlock); 331 while (user_service->msg_insert == 332 (user_service->msg_remove + MSG_QUEUE_SIZE)) { 333 spin_unlock(&msg_queue_spinlock); 334 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 335 DEBUG_COUNT(MSG_QUEUE_FULL_COUNT); 336 vchiq_log_trace(vchiq_arm_log_level, 337 "service_callback - msg queue full"); 338 /* If there is no MESSAGE_AVAILABLE in the completion 339 ** queue, add one 340 */ 341 if ((user_service->message_available_pos - 342 instance->completion_remove) < 0) { 343 VCHIQ_STATUS_T status; 344 vchiq_log_info(vchiq_arm_log_level, 345 "Inserting extra MESSAGE_AVAILABLE"); 346 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 347 status = add_completion(instance, reason, 348 NULL, user_service, bulk_userdata); 349 if (status != VCHIQ_SUCCESS) { 350 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 351 return status; 352 } 353 } 354 355 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 356 if (down_interruptible(&user_service->remove_event) 357 != 0) { 358 vchiq_log_info(vchiq_arm_log_level, 359 "service_callback interrupted"); 360 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 361 return VCHIQ_RETRY; 362 } else if (instance->closing) { 363 vchiq_log_info(vchiq_arm_log_level, 364 "service_callback closing"); 365 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 366 return VCHIQ_ERROR; 367 } 368 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 369 spin_lock(&msg_queue_spinlock); 370 } 371 372 user_service->msg_queue[user_service->msg_insert & 373 (MSG_QUEUE_SIZE - 1)] = header; 374 user_service->msg_insert++; 375 376 /* If there is a thread waiting in DEQUEUE_MESSAGE, or if 377 ** there is a MESSAGE_AVAILABLE in the completion queue then 378 ** bypass the completion queue. 379 */ 380 if (((user_service->message_available_pos - 381 instance->completion_remove) >= 0) || 382 user_service->dequeue_pending) { 383 user_service->dequeue_pending = 0; 384 skip_completion = 1; 385 } 386 387 spin_unlock(&msg_queue_spinlock); 388 389 up(&user_service->insert_event); 390 391 header = NULL; 392 } 393 394 if (skip_completion) { 395 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 396 return VCHIQ_SUCCESS; 397 } 398 399 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 400 401 return add_completion(instance, reason, header, user_service, 402 bulk_userdata); 403} 404 405/**************************************************************************** 406* 407* user_service_free 408* 409***************************************************************************/ 410static void 411user_service_free(void *userdata) 412{ 413 USER_SERVICE_T *user_service = userdata; 414 415 _sema_destroy(&user_service->insert_event); 416 _sema_destroy(&user_service->remove_event); 417 418 kfree(user_service); 419} 420 421/**************************************************************************** 422* 423* close_delivered 424* 425***************************************************************************/ 426static void close_delivered(USER_SERVICE_T *user_service) 427{ 428 vchiq_log_info(vchiq_arm_log_level, 429 "close_delivered(handle=%x)", 430 user_service->service->handle); 431 432 if (user_service->close_pending) { 433 /* Allow the underlying service to be culled */ 434 unlock_service(user_service->service); 435 436 /* Wake the user-thread blocked in close_ or remove_service */ 437 up(&user_service->close_event); 438 439 user_service->close_pending = 0; 440 } 441} 442 443/**************************************************************************** 444* 445* vchiq_ioctl 446* 447***************************************************************************/ 448 449static int 450vchiq_ioctl(struct file *fp, u_long cmd, void *arg) 451{ 452 VCHIQ_INSTANCE_T instance = fp->f_data; 453 VCHIQ_STATUS_T status = VCHIQ_SUCCESS; 454 VCHIQ_SERVICE_T *service = NULL; 455 int ret = 0; 456 int i, rc; 457 DEBUG_INITIALISE(g_state.local) 458 459/* XXXBSD: HACK! */ 460#define _IOC_NR(x) ((x) & 0xff) 461#define _IOC_TYPE(x) IOCGROUP(x) 462 463 vchiq_log_trace(vchiq_arm_log_level, 464 "vchiq_ioctl - instance %p, cmd %s, arg %p", 465 instance, 466 ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) && 467 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ? 468 ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg); 469 470 switch (cmd) { 471 case VCHIQ_IOC_SHUTDOWN: 472 if (!instance->connected) 473 break; 474 475 /* Remove all services */ 476 i = 0; 477 while ((service = next_service_by_instance(instance->state, 478 instance, &i)) != NULL) { 479 status = vchiq_remove_service(service->handle); 480 unlock_service(service); 481 if (status != VCHIQ_SUCCESS) 482 break; 483 } 484 service = NULL; 485 486 if (status == VCHIQ_SUCCESS) { 487 /* Wake the completion thread and ask it to exit */ 488 instance->closing = 1; 489 up(&instance->insert_event); 490 } 491 492 break; 493 494 case VCHIQ_IOC_CONNECT: 495 if (instance->connected) { 496 ret = -EINVAL; 497 break; 498 } 499 rc = lmutex_lock_interruptible(&instance->state->mutex); 500 if (rc != 0) { 501 vchiq_log_error(vchiq_arm_log_level, 502 "vchiq: connect: could not lock mutex for " 503 "state %d: %d", 504 instance->state->id, rc); 505 ret = -EINTR; 506 break; 507 } 508 status = vchiq_connect_internal(instance->state, instance); 509 lmutex_unlock(&instance->state->mutex); 510 511 if (status == VCHIQ_SUCCESS) 512 instance->connected = 1; 513 else 514 vchiq_log_error(vchiq_arm_log_level, 515 "vchiq: could not connect: %d", status); 516 break; 517 518 case VCHIQ_IOC_CREATE_SERVICE: { 519 VCHIQ_CREATE_SERVICE_T *pargs = arg; 520 VCHIQ_CREATE_SERVICE_T args = *pargs; 521 USER_SERVICE_T *user_service = NULL; 522 void *userdata; 523 int srvstate; 524 525 /* XXXNH kmalloc */ 526 user_service = kzalloc(sizeof(USER_SERVICE_T), GFP_KERNEL); 527 if (!user_service) { 528 ret = -ENOMEM; 529 break; 530 } 531 532 if (args.is_open) { 533 if (!instance->connected) { 534 ret = -ENOTCONN; 535 kfree(user_service); 536 break; 537 } 538 srvstate = VCHIQ_SRVSTATE_OPENING; 539 } else { 540 srvstate = 541 instance->connected ? 542 VCHIQ_SRVSTATE_LISTENING : 543 VCHIQ_SRVSTATE_HIDDEN; 544 } 545 546 userdata = args.params.userdata; 547 pargs->params.callback = service_callback; 548 pargs->params.userdata = user_service; 549 args.params.callback = service_callback; 550 args.params.userdata = user_service; 551 service = vchiq_add_service_internal( 552 instance->state, 553 &pargs->params, srvstate, 554 instance, user_service_free); 555 556 if (service != NULL) { 557 user_service->service = service; 558 user_service->userdata = userdata; 559 user_service->instance = instance; 560 user_service->is_vchi = (args.is_vchi != 0); 561 user_service->dequeue_pending = 0; 562 user_service->close_pending = 0; 563 user_service->message_available_pos = 564 instance->completion_remove - 1; 565 user_service->msg_insert = 0; 566 user_service->msg_remove = 0; 567 _sema_init(&user_service->insert_event, 0); 568 _sema_init(&user_service->remove_event, 0); 569 _sema_init(&user_service->close_event, 0); 570 571 if (args.is_open) { 572 status = vchiq_open_service_internal 573 (service, instance->pid); 574 if (status != VCHIQ_SUCCESS) { 575 vchiq_remove_service(service->handle); 576 service = NULL; 577 ret = (status == VCHIQ_RETRY) ? 578 -EINTR : -EIO; 579 break; 580 } 581 } 582 583#ifdef VCHIQ_IOCTL_DEBUG 584 printf("%s: [CREATE SERVICE] handle = %08x\n", __func__, service->handle); 585#endif 586 pargs->handle = service->handle; 587 588 service = NULL; 589 } else { 590 ret = -EEXIST; 591 kfree(user_service); 592 } 593 } break; 594 595 case VCHIQ_IOC_CLOSE_SERVICE: { 596 VCHIQ_SERVICE_HANDLE_T handle = *(VCHIQ_SERVICE_HANDLE_T *)arg; 597 598#ifdef VCHIQ_IOCTL_DEBUG 599 printf("%s: [CLOSE SERVICE] handle = %08x\n", __func__, handle); 600#endif 601 602 service = find_service_for_instance(instance, handle); 603 if (service != NULL) { 604 USER_SERVICE_T *user_service = 605 (USER_SERVICE_T *)service->base.userdata; 606 /* close_pending is false on first entry, and when the 607 wait in vchiq_close_service has been interrupted. */ 608 if (!user_service->close_pending) { 609 status = vchiq_close_service(service->handle); 610 if (status != VCHIQ_SUCCESS) 611 break; 612 } 613 614 /* close_pending is true once the underlying service 615 has been closed until the client library calls the 616 CLOSE_DELIVERED ioctl, signalling close_event. */ 617 if (user_service->close_pending && 618 down_interruptible(&user_service->close_event)) 619 status = VCHIQ_RETRY; 620 } 621 else 622 ret = -EINVAL; 623 } break; 624 625 case VCHIQ_IOC_REMOVE_SERVICE: { 626 VCHIQ_SERVICE_HANDLE_T handle = *(VCHIQ_SERVICE_HANDLE_T *)arg; 627 628#ifdef VCHIQ_IOCTL_DEBUG 629 printf("%s: [REMOVE SERVICE] handle = %08x\n", __func__, handle); 630#endif 631 632 service = find_service_for_instance(instance, handle); 633 if (service != NULL) { 634 USER_SERVICE_T *user_service = 635 (USER_SERVICE_T *)service->base.userdata; 636 /* close_pending is false on first entry, and when the 637 wait in vchiq_close_service has been interrupted. */ 638 if (!user_service->close_pending) { 639 status = vchiq_remove_service(service->handle); 640 if (status != VCHIQ_SUCCESS) 641 break; 642 } 643 644 /* close_pending is true once the underlying service 645 has been closed until the client library calls the 646 CLOSE_DELIVERED ioctl, signalling close_event. */ 647 if (user_service->close_pending && 648 down_interruptible(&user_service->close_event)) 649 status = VCHIQ_RETRY; 650 } 651 else 652 ret = -EINVAL; 653 } break; 654 655 case VCHIQ_IOC_USE_SERVICE: 656 case VCHIQ_IOC_RELEASE_SERVICE: { 657 VCHIQ_SERVICE_HANDLE_T handle = *(VCHIQ_SERVICE_HANDLE_T *)arg; 658 659#ifdef VCHIQ_IOCTL_DEBUG 660 printf("%s: [%s SERVICE] handle = %08x\n", __func__, 661 cmd == VCHIQ_IOC_USE_SERVICE ? "USE" : "RELEASE", handle); 662#endif 663 664 service = find_service_for_instance(instance, handle); 665 if (service != NULL) { 666 status = (cmd == VCHIQ_IOC_USE_SERVICE) ? 667 vchiq_use_service_internal(service) : 668 vchiq_release_service_internal(service); 669 if (status != VCHIQ_SUCCESS) { 670 vchiq_log_error(vchiq_susp_log_level, 671 "%s: cmd %s returned error %d for " 672 "service %c%c%c%c:%8x", 673 __func__, 674 (cmd == VCHIQ_IOC_USE_SERVICE) ? 675 "VCHIQ_IOC_USE_SERVICE" : 676 "VCHIQ_IOC_RELEASE_SERVICE", 677 status, 678 VCHIQ_FOURCC_AS_4CHARS( 679 service->base.fourcc), 680 service->client_id); 681 ret = -EINVAL; 682 } 683 } else 684 ret = -EINVAL; 685 } break; 686 687 case VCHIQ_IOC_QUEUE_MESSAGE: { 688 VCHIQ_QUEUE_MESSAGE_T *pargs = arg; 689 VCHIQ_QUEUE_MESSAGE_T args = *pargs; 690 691#ifdef VCHIQ_IOCTL_DEBUG 692 printf("%s: [QUEUE MESSAGE] handle = %08x\n", __func__, args.handle); 693#endif 694 695 service = find_service_for_instance(instance, args.handle); 696 697 if ((service != NULL) && (args.count <= MAX_ELEMENTS)) { 698 /* Copy elements into kernel space */ 699 VCHIQ_ELEMENT_T elements[MAX_ELEMENTS]; 700 if (copy_from_user(elements, args.elements, 701 args.count * sizeof(VCHIQ_ELEMENT_T)) == 0) 702 status = vchiq_queue_message 703 (args.handle, 704 elements, args.count); 705 else 706 ret = -EFAULT; 707 } else { 708 ret = -EINVAL; 709 } 710 } break; 711 712 case VCHIQ_IOC_QUEUE_BULK_TRANSMIT: 713 case VCHIQ_IOC_QUEUE_BULK_RECEIVE: { 714 VCHIQ_QUEUE_BULK_TRANSFER_T *pargs = arg; 715 VCHIQ_QUEUE_BULK_TRANSFER_T args = *pargs; 716 struct bulk_waiter_node *waiter = NULL; 717 VCHIQ_BULK_DIR_T dir = 718 (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ? 719 VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE; 720 721 service = find_service_for_instance(instance, args.handle); 722 if (!service) { 723 ret = -EINVAL; 724 break; 725 } 726 727 if (args.mode == VCHIQ_BULK_MODE_BLOCKING) { 728 waiter = kzalloc(sizeof(struct bulk_waiter_node), 729 GFP_KERNEL); 730 if (!waiter) { 731 ret = -ENOMEM; 732 break; 733 } 734 args.userdata = &waiter->bulk_waiter; 735 } else if (args.mode == VCHIQ_BULK_MODE_WAITING) { 736 struct list_head *pos; 737 lmutex_lock(&instance->bulk_waiter_list_mutex); 738 list_for_each(pos, &instance->bulk_waiter_list) { 739 if (list_entry(pos, struct bulk_waiter_node, 740 list)->pid == current->l_proc->p_pid) { 741 waiter = list_entry(pos, 742 struct bulk_waiter_node, 743 list); 744 list_del(pos); 745 break; 746 } 747 748 } 749 lmutex_unlock(&instance->bulk_waiter_list_mutex); 750 if (!waiter) { 751 vchiq_log_error(vchiq_arm_log_level, 752 "no bulk_waiter found for pid %d", 753 current->l_proc->p_pid); 754 ret = -ESRCH; 755 break; 756 } 757 vchiq_log_info(vchiq_arm_log_level, 758 "found bulk_waiter %p for pid %d", 759 waiter, current->l_proc->p_pid); 760 args.userdata = &waiter->bulk_waiter; 761 } 762 status = vchiq_bulk_transfer 763 (args.handle, 764 VCHI_MEM_HANDLE_INVALID, 765 args.data, args.size, 766 args.userdata, args.mode, 767 dir); 768 if (!waiter) 769 break; 770 if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) || 771 !waiter->bulk_waiter.bulk) { 772 if (waiter->bulk_waiter.bulk) { 773 /* Cancel the signal when the transfer 774 ** completes. */ 775 spin_lock(&bulk_waiter_spinlock); 776 waiter->bulk_waiter.bulk->userdata = NULL; 777 spin_unlock(&bulk_waiter_spinlock); 778 } 779 _sema_destroy(&waiter->bulk_waiter.event); 780 kfree(waiter); 781 } else { 782 const VCHIQ_BULK_MODE_T mode_waiting = 783 VCHIQ_BULK_MODE_WAITING; 784 waiter->pid = current->l_proc->p_pid; 785 lmutex_lock(&instance->bulk_waiter_list_mutex); 786 list_add(&waiter->list, &instance->bulk_waiter_list); 787 lmutex_unlock(&instance->bulk_waiter_list_mutex); 788 vchiq_log_info(vchiq_arm_log_level, 789 "saved bulk_waiter %p for pid %d", 790 waiter, current->l_proc->p_pid); 791 792 pargs->mode = mode_waiting; 793 } 794 } break; 795 796 case VCHIQ_IOC_AWAIT_COMPLETION: { 797 VCHIQ_AWAIT_COMPLETION_T *pargs = arg; 798 VCHIQ_AWAIT_COMPLETION_T args = *pargs; 799 int count = 0; 800 801 DEBUG_TRACE(AWAIT_COMPLETION_LINE); 802 if (!instance->connected) { 803 ret = -ENOTCONN; 804 break; 805 } 806 807 lmutex_lock(&instance->completion_mutex); 808 809 DEBUG_TRACE(AWAIT_COMPLETION_LINE); 810 while ((instance->completion_remove == 811 instance->completion_insert) 812 && !instance->closing) { 813 DEBUG_TRACE(AWAIT_COMPLETION_LINE); 814 lmutex_unlock(&instance->completion_mutex); 815 rc = down_interruptible(&instance->insert_event); 816 lmutex_lock(&instance->completion_mutex); 817 if (rc != 0) { 818 DEBUG_TRACE(AWAIT_COMPLETION_LINE); 819 vchiq_log_info(vchiq_arm_log_level, 820 "AWAIT_COMPLETION interrupted"); 821 ret = -EINTR; 822 break; 823 } 824 } 825 DEBUG_TRACE(AWAIT_COMPLETION_LINE); 826 827 if (ret == 0) { 828 int msgbufcount = args.msgbufcount; 829 int remove; 830 831 remove = instance->completion_remove; 832 833 for (count = 0; count < args.count; count++) { 834 VCHIQ_COMPLETION_DATA_T *completion; 835 VCHIQ_SERVICE_T *service1; 836 USER_SERVICE_T *user_service; 837 VCHIQ_HEADER_T *header; 838 839 if (remove == instance->completion_insert) 840 break; 841 842 completion = &instance->completions[ 843 remove & (MAX_COMPLETIONS - 1)]; 844 845 /* A read memory barrier is needed to prevent 846 ** the prefetch of a stale completion record 847 */ 848 rmb(); 849 850 851 service1 = completion->service_userdata; 852 user_service = service1->base.userdata; 853 completion->service_userdata = 854 user_service->userdata; 855 856 header = completion->header; 857 if (header) { 858 void __user *msgbuf; 859 int msglen; 860 861 msglen = header->size + 862 sizeof(VCHIQ_HEADER_T); 863 /* This must be a VCHIQ-style service */ 864 if (args.msgbufsize < msglen) { 865 vchiq_log_error( 866 vchiq_arm_log_level, 867 "header %p: msgbufsize" 868 " %x < msglen %x", 869 header, 870 args.msgbufsize, 871 msglen); 872 WARN(1, "invalid message " 873 "size\n"); 874 if (count == 0) 875 ret = -EMSGSIZE; 876 break; 877 } 878 if (msgbufcount <= 0) 879 /* Stall here for lack of a 880 ** buffer for the message. */ 881 break; 882 /* Get the pointer from user space */ 883 msgbufcount--; 884 if (copy_from_user(&msgbuf, 885 (const void __user *) 886 &args.msgbufs[msgbufcount], 887 sizeof(msgbuf)) != 0) { 888 if (count == 0) 889 ret = -EFAULT; 890 break; 891 } 892 893 /* Copy the message to user space */ 894 if (copy_to_user(msgbuf, header, 895 msglen) != 0) { 896 if (count == 0) 897 ret = -EFAULT; 898 break; 899 } 900 901 /* Now it has been copied, the message 902 ** can be released. */ 903 vchiq_release_message(service1->handle, 904 header); 905 906 /* The completion must point to the 907 ** msgbuf. */ 908 completion->header = msgbuf; 909 } 910 911 if ((completion->reason == 912 VCHIQ_SERVICE_CLOSED) && 913 !instance->use_close_delivered) 914 unlock_service(service1); 915 916 if (copy_to_user((void __user *)( 917 (size_t)args.buf + 918 count * sizeof(VCHIQ_COMPLETION_DATA_T)), 919 completion, 920 sizeof(VCHIQ_COMPLETION_DATA_T)) != 0) { 921 if (count == 0) 922 ret = -EFAULT; 923 break; 924 } 925 926 /* Ensure that the above copy has completed 927 ** before advancing the remove pointer. */ 928 mb(); 929 930 instance->completion_remove = ++remove; 931 } 932 933 pargs->msgbufcount = msgbufcount; 934 pargs->count = count; 935 } 936 if (count != 0) 937 up(&instance->remove_event); 938 939 lmutex_unlock(&instance->completion_mutex); 940 DEBUG_TRACE(AWAIT_COMPLETION_LINE); 941 } break; 942 943 case VCHIQ_IOC_DEQUEUE_MESSAGE: { 944 VCHIQ_DEQUEUE_MESSAGE_T *pargs = arg; 945 VCHIQ_DEQUEUE_MESSAGE_T args = *pargs; 946 USER_SERVICE_T *user_service; 947 VCHIQ_HEADER_T *header; 948 949 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE); 950 service = find_service_for_instance(instance, args.handle); 951 if (!service) { 952 ret = -EINVAL; 953 break; 954 } 955 user_service = (USER_SERVICE_T *)service->base.userdata; 956 if (user_service->is_vchi == 0) { 957 ret = -EINVAL; 958 break; 959 } 960 961 spin_lock(&msg_queue_spinlock); 962 if (user_service->msg_remove == user_service->msg_insert) { 963 if (!args.blocking) { 964 spin_unlock(&msg_queue_spinlock); 965 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE); 966 ret = -EWOULDBLOCK; 967 break; 968 } 969 user_service->dequeue_pending = 1; 970 do { 971 spin_unlock(&msg_queue_spinlock); 972 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE); 973 if (down_interruptible( 974 &user_service->insert_event) != 0) { 975 vchiq_log_info(vchiq_arm_log_level, 976 "DEQUEUE_MESSAGE interrupted"); 977 ret = -EINTR; 978 break; 979 } 980 spin_lock(&msg_queue_spinlock); 981 } while (user_service->msg_remove == 982 user_service->msg_insert); 983 984 if (ret) 985 break; 986 } 987 988 BUG_ON((int)(user_service->msg_insert - 989 user_service->msg_remove) < 0); 990 991 header = user_service->msg_queue[user_service->msg_remove & 992 (MSG_QUEUE_SIZE - 1)]; 993 user_service->msg_remove++; 994 spin_unlock(&msg_queue_spinlock); 995 996 up(&user_service->remove_event); 997 if (header == NULL) 998 ret = -ENOTCONN; 999 else if (header->size <= args.bufsize) { 1000 /* Copy to user space if msgbuf is not NULL */ 1001 if ((args.buf == NULL) || 1002 (copy_to_user((void __user *)args.buf, 1003 header->data, 1004 header->size) == 0)) { 1005 pargs->bufsize = header->size; 1006 vchiq_release_message( 1007 service->handle, 1008 header); 1009 } else 1010 ret = -EFAULT; 1011 } else { 1012 vchiq_log_error(vchiq_arm_log_level, 1013 "header %p: bufsize %x < size %x", 1014 header, args.bufsize, 1015 header->size); 1016 WARN(1, "invalid size\n"); 1017 ret = -EMSGSIZE; 1018 } 1019 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE); 1020 } break; 1021 1022 case VCHIQ_IOC_GET_CLIENT_ID: { 1023 VCHIQ_SERVICE_HANDLE_T handle = *(VCHIQ_SERVICE_HANDLE_T *)arg; 1024 1025 ret = vchiq_get_client_id(handle); 1026 } break; 1027 1028 case VCHIQ_IOC_GET_CONFIG: { 1029 VCHIQ_GET_CONFIG_T *pargs = arg; 1030 VCHIQ_GET_CONFIG_T args = *pargs; 1031 VCHIQ_CONFIG_T config; 1032 1033 if (args.config_size > sizeof(config)) { 1034 ret = -EINVAL; 1035 break; 1036 } 1037 status = vchiq_get_config(instance, args.config_size, &config); 1038 if (status == VCHIQ_SUCCESS) { 1039 if (copy_to_user((void __user *)args.pconfig, 1040 &config, args.config_size) != 0) { 1041 ret = -EFAULT; 1042 break; 1043 } 1044 } 1045 } break; 1046 1047 case VCHIQ_IOC_SET_SERVICE_OPTION: { 1048 VCHIQ_SET_SERVICE_OPTION_T *pargs = arg; 1049 VCHIQ_SET_SERVICE_OPTION_T args = *pargs; 1050 1051 service = find_service_for_instance(instance, args.handle); 1052 if (!service) { 1053 ret = -EINVAL; 1054 break; 1055 } 1056 1057 status = vchiq_set_service_option( 1058 args.handle, args.option, args.value); 1059 } break; 1060 1061 case VCHIQ_IOC_DUMP_PHYS_MEM: { 1062#if 0 1063 VCHIQ_DUMP_MEM_T *pargs = arg; 1064#endif 1065 1066 printf("IMPLEMENT ME: %s:%d\n", __FILE__, __LINE__); 1067#if 0 1068 dump_phys_mem(pargs->virt_addr, pargs->num_bytes); 1069#endif 1070 } break; 1071 1072 case VCHIQ_IOC_LIB_VERSION: { 1073 unsigned int lib_version = (uintptr_t)arg; 1074 1075 if (lib_version < VCHIQ_VERSION_MIN) 1076 ret = -EINVAL; 1077 else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED) 1078 instance->use_close_delivered = 1; 1079 } break; 1080 1081 case VCHIQ_IOC_CLOSE_DELIVERED: { 1082 VCHIQ_SERVICE_HANDLE_T handle = *(VCHIQ_SERVICE_HANDLE_T *)arg; 1083 1084 service = find_closed_service_for_instance(instance, handle); 1085 if (service != NULL) { 1086 USER_SERVICE_T *user_service = 1087 (USER_SERVICE_T *)service->base.userdata; 1088 close_delivered(user_service); 1089 } 1090 else 1091 ret = -EINVAL; 1092 } break; 1093 1094 default: 1095 ret = -ENOTTY; 1096 break; 1097 } 1098 1099 if (service) 1100 unlock_service(service); 1101 1102 if (ret == 0) { 1103 if (status == VCHIQ_ERROR) 1104 ret = -EIO; 1105 else if (status == VCHIQ_RETRY) 1106 ret = -EINTR; 1107 } 1108 1109 if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) && 1110 (ret != -EWOULDBLOCK)) 1111 vchiq_log_info(vchiq_arm_log_level, 1112 " ioctl instance %lx, cmd %s -> status %d, %d", 1113 (unsigned long)instance, 1114 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ? 1115 ioctl_names[_IOC_NR(cmd)] : 1116 "<invalid>", 1117 status, ret); 1118 else 1119 vchiq_log_trace(vchiq_arm_log_level, 1120 " ioctl instance %lx, cmd %s -> status %d, %d", 1121 (unsigned long)instance, 1122 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ? 1123 ioctl_names[_IOC_NR(cmd)] : 1124 "<invalid>", 1125 status, ret); 1126 1127 /* XXXBSD: report BSD-style error to userland */ 1128 if (ret < 0) 1129 ret = -ret; 1130 1131 return ret; 1132} 1133 1134/**************************************************************************** 1135* 1136* vchiq_open 1137* 1138***************************************************************************/ 1139 1140static int 1141vchiq_open(dev_t dev, int flags, int mode, lwp_t *l) 1142{ 1143 VCHIQ_INSTANCE_T instance = NULL; 1144 struct file *fp; 1145 int err, fd; 1146 1147 vchiq_log_info(vchiq_arm_log_level, "vchiq_open"); 1148 1149 /* XXXBSD: do we really need this check? */ 1150 if (device_lookup_private(&vchiq_cd, minor(dev)) != NULL) { 1151 VCHIQ_STATE_T *state; 1152 int i; 1153 1154 for (i=0; i<10; ++i) { 1155 state = vchiq_get_state(); 1156 if (state) 1157 break; 1158 delay(500); 1159 } 1160 1161 if (!state) { 1162 vchiq_log_error(vchiq_arm_log_level, 1163 "vchiq has no connection to VideoCore"); 1164 return -ENOTCONN; 1165 } 1166 1167 instance = kzalloc(sizeof(*instance), GFP_KERNEL); 1168 if (!instance) 1169 return -ENOMEM; 1170 1171 err = fd_allocfile(&fp, &fd); 1172 if (err) { 1173 kfree(instance); 1174 return -err; 1175 } 1176 1177 instance->state = state; 1178 instance->pid = current->l_proc->p_pid; 1179 1180#ifdef notyet 1181 ret = vchiq_debugfs_add_instance(instance); 1182 if (ret != 0) { 1183 kfree(instance); 1184 return ret; 1185 } 1186#endif 1187 1188 _sema_init(&instance->insert_event, 0); 1189 _sema_init(&instance->remove_event, 0); 1190 lmutex_init(&instance->completion_mutex); 1191 lmutex_init(&instance->bulk_waiter_list_mutex); 1192 INIT_LIST_HEAD(&instance->bulk_waiter_list); 1193 1194 } 1195 else { 1196 vchiq_log_error(vchiq_arm_log_level, 1197 "Unknown minor device"); 1198 return -ENXIO; 1199 } 1200 1201 return fd_clone(fp, fd, flags, &vchiq_fileops, instance); 1202} 1203 1204/**************************************************************************** 1205* 1206* vchiq_release 1207* 1208***************************************************************************/ 1209 1210static int 1211vchiq_close(struct file *fp) 1212{ 1213 int ret = 0; 1214 if (1) { 1215 VCHIQ_INSTANCE_T instance = fp->f_data; 1216 VCHIQ_STATE_T *state = vchiq_get_state(); 1217 VCHIQ_SERVICE_T *service; 1218 int i; 1219 1220 vchiq_log_info(vchiq_arm_log_level, 1221 "vchiq_release: instance=%lx", 1222 (unsigned long)instance); 1223 1224 if (!state) { 1225 ret = -EPERM; 1226 goto out; 1227 } 1228 1229 /* Ensure videocore is awake to allow termination. */ 1230 vchiq_use_internal(instance->state, NULL, 1231 USE_TYPE_VCHIQ); 1232 1233 lmutex_lock(&instance->completion_mutex); 1234 1235 /* Wake the completion thread and ask it to exit */ 1236 instance->closing = 1; 1237 up(&instance->insert_event); 1238 1239 lmutex_unlock(&instance->completion_mutex); 1240 1241 /* Wake the slot handler if the completion queue is full. */ 1242 up(&instance->remove_event); 1243 1244 /* Mark all services for termination... */ 1245 i = 0; 1246 while ((service = next_service_by_instance(state, instance, 1247 &i)) != NULL) { 1248 USER_SERVICE_T *user_service = service->base.userdata; 1249 1250 /* Wake the slot handler if the msg queue is full. */ 1251 up(&user_service->remove_event); 1252 1253 vchiq_terminate_service_internal(service); 1254 unlock_service(service); 1255 } 1256 1257 /* ...and wait for them to die */ 1258 i = 0; 1259 while ((service = next_service_by_instance(state, instance, &i)) 1260 != NULL) { 1261 USER_SERVICE_T *user_service = service->base.userdata; 1262 1263 down(&service->remove_event); 1264 1265 BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE); 1266 1267 spin_lock(&msg_queue_spinlock); 1268 1269 while (user_service->msg_remove != 1270 user_service->msg_insert) { 1271 VCHIQ_HEADER_T *header = user_service-> 1272 msg_queue[user_service->msg_remove & 1273 (MSG_QUEUE_SIZE - 1)]; 1274 user_service->msg_remove++; 1275 spin_unlock(&msg_queue_spinlock); 1276 1277 if (header) 1278 vchiq_release_message( 1279 service->handle, 1280 header); 1281 spin_lock(&msg_queue_spinlock); 1282 } 1283 1284 spin_unlock(&msg_queue_spinlock); 1285 1286 unlock_service(service); 1287 } 1288 1289 /* Release any closed services */ 1290 while (instance->completion_remove != 1291 instance->completion_insert) { 1292 VCHIQ_COMPLETION_DATA_T *completion; 1293 VCHIQ_SERVICE_T *service1; 1294 completion = &instance->completions[ 1295 instance->completion_remove & 1296 (MAX_COMPLETIONS - 1)]; 1297 service1 = completion->service_userdata; 1298 if (completion->reason == VCHIQ_SERVICE_CLOSED) 1299 { 1300 USER_SERVICE_T *user_service = 1301 service->base.userdata; 1302 1303 /* Wake any blocked user-thread */ 1304 if (instance->use_close_delivered) 1305 up(&user_service->close_event); 1306 unlock_service(service1); 1307 } 1308 instance->completion_remove++; 1309 } 1310 1311 /* Release the PEER service count. */ 1312 vchiq_release_internal(instance->state, NULL); 1313 1314 { 1315 struct list_head *pos, *next; 1316 list_for_each_safe(pos, next, 1317 &instance->bulk_waiter_list) { 1318 struct bulk_waiter_node *waiter; 1319 waiter = list_entry(pos, 1320 struct bulk_waiter_node, 1321 list); 1322 list_del(pos); 1323 vchiq_log_info(vchiq_arm_log_level, 1324 "bulk_waiter - cleaned up %p " 1325 "for pid %d", 1326 waiter, waiter->pid); 1327 _sema_destroy(&waiter->bulk_waiter.event); 1328 kfree(waiter); 1329 } 1330 } 1331 1332 } 1333 else { 1334 vchiq_log_error(vchiq_arm_log_level, 1335 "Unknown minor device"); 1336 ret = -ENXIO; 1337 } 1338 1339out: 1340 return ret; 1341} 1342 1343/**************************************************************************** 1344* 1345* vchiq_dump 1346* 1347***************************************************************************/ 1348 1349void 1350vchiq_dump(void *dump_context, const char *str, int len) 1351{ 1352 DUMP_CONTEXT_T *context = (DUMP_CONTEXT_T *)dump_context; 1353 1354 if (context->actual < context->space) { 1355 int copy_bytes; 1356 if (context->offset > 0) { 1357 int skip_bytes = min(len, (int)context->offset); 1358 str += skip_bytes; 1359 len -= skip_bytes; 1360 context->offset -= skip_bytes; 1361 if (context->offset > 0) 1362 return; 1363 } 1364 copy_bytes = min(len, (int)(context->space - context->actual)); 1365 if (copy_bytes == 0) 1366 return; 1367 memcpy(context->buf + context->actual, str, copy_bytes); 1368 context->actual += copy_bytes; 1369 len -= copy_bytes; 1370 1371 /* If tne terminating NUL is included in the length, then it 1372 ** marks the end of a line and should be replaced with a 1373 ** carriage return. */ 1374 if ((len == 0) && (str[copy_bytes - 1] == '\0')) { 1375 char cr = '\n'; 1376 memcpy(context->buf + context->actual - 1, &cr, 1); 1377 } 1378 } 1379} 1380 1381/**************************************************************************** 1382* 1383* vchiq_dump_platform_instance_state 1384* 1385***************************************************************************/ 1386 1387void 1388vchiq_dump_platform_instances(void *dump_context) 1389{ 1390 VCHIQ_STATE_T *state = vchiq_get_state(); 1391 char buf[80]; 1392 int len; 1393 int i; 1394 1395 /* There is no list of instances, so instead scan all services, 1396 marking those that have been dumped. */ 1397 1398 for (i = 0; i < state->unused_service; i++) { 1399 VCHIQ_SERVICE_T *service = state->services[i]; 1400 VCHIQ_INSTANCE_T instance; 1401 1402 if (service && (service->base.callback == service_callback)) { 1403 instance = service->instance; 1404 if (instance) 1405 instance->mark = 0; 1406 } 1407 } 1408 1409 for (i = 0; i < state->unused_service; i++) { 1410 VCHIQ_SERVICE_T *service = state->services[i]; 1411 VCHIQ_INSTANCE_T instance; 1412 1413 if (service && (service->base.callback == service_callback)) { 1414 instance = service->instance; 1415 if (instance && !instance->mark) { 1416 len = snprintf(buf, sizeof(buf), 1417 "Instance %p: pid %d,%s completions " 1418 "%d/%d", 1419 instance, instance->pid, 1420 instance->connected ? " connected, " : 1421 "", 1422 instance->completion_insert - 1423 instance->completion_remove, 1424 MAX_COMPLETIONS); 1425 1426 vchiq_dump(dump_context, buf, len + 1); 1427 1428 instance->mark = 1; 1429 } 1430 } 1431 } 1432} 1433 1434/**************************************************************************** 1435* 1436* vchiq_dump_platform_service_state 1437* 1438***************************************************************************/ 1439 1440void 1441vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service) 1442{ 1443 USER_SERVICE_T *user_service = (USER_SERVICE_T *)service->base.userdata; 1444 char buf[80]; 1445 int len; 1446 1447 len = snprintf(buf, sizeof(buf), " instance %p", 1448 service->instance); 1449 1450 if ((service->base.callback == service_callback) && 1451 user_service->is_vchi) { 1452 len += snprintf(buf + len, sizeof(buf) - len, 1453 ", %d/%d messages", 1454 user_service->msg_insert - user_service->msg_remove, 1455 MSG_QUEUE_SIZE); 1456 1457 if (user_service->dequeue_pending) 1458 len += snprintf(buf + len, sizeof(buf) - len, 1459 " (dequeue pending)"); 1460 } 1461 1462 vchiq_dump(dump_context, buf, len + 1); 1463} 1464 1465#ifdef notyet 1466/**************************************************************************** 1467* 1468* dump_user_mem 1469* 1470***************************************************************************/ 1471 1472static void 1473dump_phys_mem(void *virt_addr, uint32_t num_bytes) 1474{ 1475 int rc; 1476 uint8_t *end_virt_addr = virt_addr + num_bytes; 1477 int num_pages; 1478 int offset; 1479 int end_offset; 1480 int page_idx; 1481 int prev_idx; 1482 struct page *page; 1483 struct page **pages; 1484 uint8_t *kmapped_virt_ptr; 1485 1486 /* Align virtAddr and endVirtAddr to 16 byte boundaries. */ 1487 1488 virt_addr = (void *)((unsigned long)virt_addr & ~0x0fuL); 1489 end_virt_addr = (void *)(((unsigned long)end_virt_addr + 15uL) & 1490 ~0x0fuL); 1491 1492 offset = (int)(long)virt_addr & (PAGE_SIZE - 1); 1493 end_offset = (int)(long)end_virt_addr & (PAGE_SIZE - 1); 1494 1495 num_pages = (offset + num_bytes + PAGE_SIZE - 1) / PAGE_SIZE; 1496 1497 pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL); 1498 if (pages == NULL) { 1499 vchiq_log_error(vchiq_arm_log_level, 1500 "Unable to allocation memory for %d pages", 1501 num_pages); 1502 return; 1503 } 1504 1505 down_read(¤t->mm->mmap_sem); 1506 rc = get_user_pages(current, /* task */ 1507 current->mm, /* mm */ 1508 (unsigned long)virt_addr, /* start */ 1509 num_pages, /* len */ 1510 0, /* write */ 1511 0, /* force */ 1512 pages, /* pages (array of page pointers) */ 1513 NULL); /* vmas */ 1514 up_read(¤t->mm->mmap_sem); 1515 1516 prev_idx = -1; 1517 page = NULL; 1518 1519 while (offset < end_offset) { 1520 1521 int page_offset = offset % PAGE_SIZE; 1522 page_idx = offset / PAGE_SIZE; 1523 1524 if (page_idx != prev_idx) { 1525 1526 if (page != NULL) 1527 kunmap(page); 1528 page = pages[page_idx]; 1529 kmapped_virt_ptr = kmap(page); 1530 1531 prev_idx = page_idx; 1532 } 1533 1534 if (vchiq_arm_log_level >= VCHIQ_LOG_TRACE) 1535 vchiq_log_dump_mem("ph", 1536 (uint32_t)(unsigned long)&kmapped_virt_ptr[ 1537 page_offset], 1538 &kmapped_virt_ptr[page_offset], 16); 1539 1540 offset += 16; 1541 } 1542 if (page != NULL) 1543 kunmap(page); 1544 1545 for (page_idx = 0; page_idx < num_pages; page_idx++) 1546 page_cache_release(pages[page_idx]); 1547 1548 kfree(pages); 1549} 1550#endif 1551 1552/**************************************************************************** 1553* 1554* vchiq_read 1555* 1556***************************************************************************/ 1557 1558static int 1559vchiq_read(struct file *file, off_t *ppos, struct uio *uio, kauth_cred_t cred, 1560 int flags) 1561{ 1562 int result; 1563 1564 char *buf = kmem_zalloc(PAGE_SIZE, KM_SLEEP); 1565 1566 DUMP_CONTEXT_T context; 1567 context.buf = buf; 1568 context.actual = 0; 1569 context.space = PAGE_SIZE; 1570 context.offset = *ppos; 1571 1572 vchiq_dump_state(&context, &g_state); 1573 1574 *ppos += context.actual; 1575 1576 result = uiomove(buf, context.actual, uio); 1577 kmem_free(buf, PAGE_SIZE); 1578 1579 return result; 1580} 1581 1582VCHIQ_STATE_T * 1583vchiq_get_state(void) 1584{ 1585 1586 if (g_state.remote == NULL) 1587 printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__); 1588 else if (g_state.remote->initialised != 1) 1589 printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n", 1590 __func__, g_state.remote->initialised); 1591 1592 return ((g_state.remote != NULL) && 1593 (g_state.remote->initialised == 1)) ? &g_state : NULL; 1594} 1595 1596/* 1597 * Autosuspend related functionality 1598 */ 1599 1600int 1601vchiq_videocore_wanted(VCHIQ_STATE_T *state) 1602{ 1603 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 1604 if (!arm_state) 1605 /* autosuspend not supported - always return wanted */ 1606 return 1; 1607 else if (arm_state->blocked_count) 1608 return 1; 1609 else if (!arm_state->videocore_use_count) 1610 /* usage count zero - check for override unless we're forcing */ 1611 if (arm_state->resume_blocked) 1612 return 0; 1613 else 1614 return vchiq_platform_videocore_wanted(state); 1615 else 1616 /* non-zero usage count - videocore still required */ 1617 return 1; 1618} 1619 1620static VCHIQ_STATUS_T 1621vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason, 1622 VCHIQ_HEADER_T *header, 1623 VCHIQ_SERVICE_HANDLE_T service_user, 1624 void *bulk_user) 1625{ 1626 vchiq_log_error(vchiq_susp_log_level, 1627 "%s callback reason %d", __func__, reason); 1628 return 0; 1629} 1630 1631static int 1632vchiq_keepalive_thread_func(void *v) 1633{ 1634 VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v; 1635 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 1636 1637 VCHIQ_STATUS_T status; 1638 VCHIQ_INSTANCE_T instance; 1639 VCHIQ_SERVICE_HANDLE_T ka_handle; 1640 1641 VCHIQ_SERVICE_PARAMS_T params = { 1642 .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'), 1643 .callback = vchiq_keepalive_vchiq_callback, 1644 .version = KEEPALIVE_VER, 1645 .version_min = KEEPALIVE_VER_MIN 1646 }; 1647 1648 status = vchiq_initialise(&instance); 1649 if (status != VCHIQ_SUCCESS) { 1650 vchiq_log_error(vchiq_susp_log_level, 1651 "%s vchiq_initialise failed %d", __func__, status); 1652 goto exit; 1653 } 1654 1655 status = vchiq_connect(instance); 1656 if (status != VCHIQ_SUCCESS) { 1657 vchiq_log_error(vchiq_susp_log_level, 1658 "%s vchiq_connect failed %d", __func__, status); 1659 goto shutdown; 1660 } 1661 1662 status = vchiq_add_service(instance, ¶ms, &ka_handle); 1663 if (status != VCHIQ_SUCCESS) { 1664 vchiq_log_error(vchiq_susp_log_level, 1665 "%s vchiq_open_service failed %d", __func__, status); 1666 goto shutdown; 1667 } 1668 1669 while (1) { 1670 long rc = 0, uc = 0; 1671 if (wait_for_completion_interruptible(&arm_state->ka_evt) 1672 != 0) { 1673 vchiq_log_error(vchiq_susp_log_level, 1674 "%s interrupted", __func__); 1675 flush_signals(current); 1676 continue; 1677 } 1678 1679 /* read and clear counters. Do release_count then use_count to 1680 * prevent getting more releases than uses */ 1681 rc = atomic_xchg(&arm_state->ka_release_count, 0); 1682 uc = atomic_xchg(&arm_state->ka_use_count, 0); 1683 1684 /* Call use/release service the requisite number of times. 1685 * Process use before release so use counts don't go negative */ 1686 while (uc--) { 1687 atomic_inc(&arm_state->ka_use_ack_count); 1688 status = vchiq_use_service(ka_handle); 1689 if (status != VCHIQ_SUCCESS) { 1690 vchiq_log_error(vchiq_susp_log_level, 1691 "%s vchiq_use_service error %d", 1692 __func__, status); 1693 } 1694 } 1695 while (rc--) { 1696 status = vchiq_release_service(ka_handle); 1697 if (status != VCHIQ_SUCCESS) { 1698 vchiq_log_error(vchiq_susp_log_level, 1699 "%s vchiq_release_service error %d", 1700 __func__, status); 1701 } 1702 } 1703 } 1704 1705shutdown: 1706 vchiq_shutdown(instance); 1707exit: 1708 return 0; 1709} 1710 1711 1712 1713VCHIQ_STATUS_T 1714vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state) 1715{ 1716 VCHIQ_STATUS_T status = VCHIQ_SUCCESS; 1717 1718 if (arm_state) { 1719 rwlock_init(&arm_state->susp_res_lock); 1720 1721 init_completion(&arm_state->ka_evt); 1722 atomic_set(&arm_state->ka_use_count, 0); 1723 atomic_set(&arm_state->ka_use_ack_count, 0); 1724 atomic_set(&arm_state->ka_release_count, 0); 1725 1726 init_completion(&arm_state->vc_suspend_complete); 1727 1728 init_completion(&arm_state->vc_resume_complete); 1729 /* Initialise to 'done' state. We only want to block on resume 1730 * completion while videocore is suspended. */ 1731 set_resume_state(arm_state, VC_RESUME_RESUMED); 1732 1733 init_completion(&arm_state->resume_blocker); 1734 /* Initialise to 'done' state. We only want to block on this 1735 * completion while resume is blocked */ 1736 complete_all(&arm_state->resume_blocker); 1737 1738 init_completion(&arm_state->blocked_blocker); 1739 /* Initialise to 'done' state. We only want to block on this 1740 * completion while things are waiting on the resume blocker */ 1741 complete_all(&arm_state->blocked_blocker); 1742 1743 arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS; 1744 arm_state->suspend_timer_running = 0; 1745 init_timer(&arm_state->suspend_timer); 1746 arm_state->suspend_timer.data = (unsigned long)(state); 1747 arm_state->suspend_timer.function = suspend_timer_callback; 1748 1749 arm_state->first_connect = 0; 1750 1751 } 1752 return status; 1753} 1754 1755/* 1756** Functions to modify the state variables; 1757** set_suspend_state 1758** set_resume_state 1759** 1760** There are more state variables than we might like, so ensure they remain in 1761** step. Suspend and resume state are maintained separately, since most of 1762** these state machines can operate independently. However, there are a few 1763** states where state transitions in one state machine cause a reset to the 1764** other state machine. In addition, there are some completion events which 1765** need to occur on state machine reset and end-state(s), so these are also 1766** dealt with in these functions. 1767** 1768** In all states we set the state variable according to the input, but in some 1769** cases we perform additional steps outlined below; 1770** 1771** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time. 1772** The suspend completion is completed after any suspend 1773** attempt. When we reset the state machine we also reset 1774** the completion. This reset occurs when videocore is 1775** resumed, and also if we initiate suspend after a suspend 1776** failure. 1777** 1778** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for 1779** suspend - ie from this point on we must try to suspend 1780** before resuming can occur. We therefore also reset the 1781** resume state machine to VC_RESUME_IDLE in this state. 1782** 1783** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call 1784** complete_all on the suspend completion to notify 1785** anything waiting for suspend to happen. 1786** 1787** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also 1788** initiate resume, so no need to alter resume state. 1789** We call complete_all on the suspend completion to notify 1790** of suspend rejection. 1791** 1792** VC_SUSPEND_FAILED - We failed to initiate videocore suspend. We notify the 1793** suspend completion and reset the resume state machine. 1794** 1795** VC_RESUME_IDLE - Initialise the resume completion at the same time. The 1796** resume completion is in it's 'done' state whenever 1797** videcore is running. Therfore, the VC_RESUME_IDLE state 1798** implies that videocore is suspended. 1799** Hence, any thread which needs to wait until videocore is 1800** running can wait on this completion - it will only block 1801** if videocore is suspended. 1802** 1803** VC_RESUME_RESUMED - Resume has completed successfully. Videocore is running. 1804** Call complete_all on the resume completion to unblock 1805** any threads waiting for resume. Also reset the suspend 1806** state machine to it's idle state. 1807** 1808** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists. 1809*/ 1810 1811inline void 1812set_suspend_state(VCHIQ_ARM_STATE_T *arm_state, 1813 enum vc_suspend_status new_state) 1814{ 1815 /* set the state in all cases */ 1816 arm_state->vc_suspend_state = new_state; 1817 1818 /* state specific additional actions */ 1819 switch (new_state) { 1820 case VC_SUSPEND_FORCE_CANCELED: 1821 complete_all(&arm_state->vc_suspend_complete); 1822 break; 1823 case VC_SUSPEND_REJECTED: 1824 complete_all(&arm_state->vc_suspend_complete); 1825 break; 1826 case VC_SUSPEND_FAILED: 1827 complete_all(&arm_state->vc_suspend_complete); 1828 arm_state->vc_resume_state = VC_RESUME_RESUMED; 1829 complete_all(&arm_state->vc_resume_complete); 1830 break; 1831 case VC_SUSPEND_IDLE: 1832 reinit_completion(&arm_state->vc_suspend_complete); 1833 break; 1834 case VC_SUSPEND_REQUESTED: 1835 break; 1836 case VC_SUSPEND_IN_PROGRESS: 1837 set_resume_state(arm_state, VC_RESUME_IDLE); 1838 break; 1839 case VC_SUSPEND_SUSPENDED: 1840 complete_all(&arm_state->vc_suspend_complete); 1841 break; 1842 default: 1843 BUG(); 1844 break; 1845 } 1846} 1847 1848inline void 1849set_resume_state(VCHIQ_ARM_STATE_T *arm_state, 1850 enum vc_resume_status new_state) 1851{ 1852 /* set the state in all cases */ 1853 arm_state->vc_resume_state = new_state; 1854 1855 /* state specific additional actions */ 1856 switch (new_state) { 1857 case VC_RESUME_FAILED: 1858 break; 1859 case VC_RESUME_IDLE: 1860 reinit_completion(&arm_state->vc_resume_complete); 1861 break; 1862 case VC_RESUME_REQUESTED: 1863 break; 1864 case VC_RESUME_IN_PROGRESS: 1865 break; 1866 case VC_RESUME_RESUMED: 1867 complete_all(&arm_state->vc_resume_complete); 1868 set_suspend_state(arm_state, VC_SUSPEND_IDLE); 1869 break; 1870 default: 1871 BUG(); 1872 break; 1873 } 1874} 1875 1876 1877/* should be called with the write lock held */ 1878inline void 1879start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state) 1880{ 1881 del_timer(&arm_state->suspend_timer); 1882 arm_state->suspend_timer.expires = jiffies + 1883 msecs_to_jiffies(arm_state-> 1884 suspend_timer_timeout); 1885 add_timer(&arm_state->suspend_timer); 1886 arm_state->suspend_timer_running = 1; 1887} 1888 1889/* should be called with the write lock held */ 1890static inline void 1891stop_suspend_timer(VCHIQ_ARM_STATE_T *arm_state) 1892{ 1893 if (arm_state->suspend_timer_running) { 1894 del_timer(&arm_state->suspend_timer); 1895 arm_state->suspend_timer_running = 0; 1896 } 1897} 1898 1899static inline int 1900need_resume(VCHIQ_STATE_T *state) 1901{ 1902 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 1903 return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) && 1904 (arm_state->vc_resume_state < VC_RESUME_REQUESTED) && 1905 vchiq_videocore_wanted(state); 1906} 1907 1908static int 1909block_resume(VCHIQ_ARM_STATE_T *arm_state) 1910{ 1911 int status = VCHIQ_SUCCESS; 1912 const unsigned long timeout_val = 1913 msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS); 1914 int resume_count = 0; 1915 1916 /* Allow any threads which were blocked by the last force suspend to 1917 * complete if they haven't already. Only give this one shot; if 1918 * blocked_count is incremented after blocked_blocker is completed 1919 * (which only happens when blocked_count hits 0) then those threads 1920 * will have to wait until next time around */ 1921 if (arm_state->blocked_count) { 1922 reinit_completion(&arm_state->blocked_blocker); 1923 write_unlock_bh(&arm_state->susp_res_lock); 1924 vchiq_log_info(vchiq_susp_log_level, "%s wait for previously " 1925 "blocked clients", __func__); 1926 if (wait_for_completion_interruptible_timeout( 1927 &arm_state->blocked_blocker, timeout_val) 1928 <= 0) { 1929 vchiq_log_error(vchiq_susp_log_level, "%s wait for " 1930 "previously blocked clients failed" , __func__); 1931 status = VCHIQ_ERROR; 1932 write_lock_bh(&arm_state->susp_res_lock); 1933 goto out; 1934 } 1935 vchiq_log_info(vchiq_susp_log_level, "%s previously blocked " 1936 "clients resumed", __func__); 1937 write_lock_bh(&arm_state->susp_res_lock); 1938 } 1939 1940 /* We need to wait for resume to complete if it's in process */ 1941 while (arm_state->vc_resume_state != VC_RESUME_RESUMED && 1942 arm_state->vc_resume_state > VC_RESUME_IDLE) { 1943 if (resume_count > 1) { 1944 status = VCHIQ_ERROR; 1945 vchiq_log_error(vchiq_susp_log_level, "%s waited too " 1946 "many times for resume" , __func__); 1947 goto out; 1948 } 1949 write_unlock_bh(&arm_state->susp_res_lock); 1950 vchiq_log_info(vchiq_susp_log_level, "%s wait for resume", 1951 __func__); 1952 if (wait_for_completion_interruptible_timeout( 1953 &arm_state->vc_resume_complete, timeout_val) 1954 <= 0) { 1955 vchiq_log_error(vchiq_susp_log_level, "%s wait for " 1956 "resume failed (%s)", __func__, 1957 resume_state_names[arm_state->vc_resume_state + 1958 VC_RESUME_NUM_OFFSET]); 1959 status = VCHIQ_ERROR; 1960 write_lock_bh(&arm_state->susp_res_lock); 1961 goto out; 1962 } 1963 vchiq_log_info(vchiq_susp_log_level, "%s resumed", __func__); 1964 write_lock_bh(&arm_state->susp_res_lock); 1965 resume_count++; 1966 } 1967 reinit_completion(&arm_state->resume_blocker); 1968 arm_state->resume_blocked = 1; 1969 1970out: 1971 return status; 1972} 1973 1974static inline void 1975unblock_resume(VCHIQ_ARM_STATE_T *arm_state) 1976{ 1977 complete_all(&arm_state->resume_blocker); 1978 arm_state->resume_blocked = 0; 1979} 1980 1981/* Initiate suspend via slot handler. Should be called with the write lock 1982 * held */ 1983VCHIQ_STATUS_T 1984vchiq_arm_vcsuspend(VCHIQ_STATE_T *state) 1985{ 1986 VCHIQ_STATUS_T status = VCHIQ_ERROR; 1987 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 1988 1989 if (!arm_state) 1990 goto out; 1991 1992 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 1993 status = VCHIQ_SUCCESS; 1994 1995 1996 switch (arm_state->vc_suspend_state) { 1997 case VC_SUSPEND_REQUESTED: 1998 vchiq_log_info(vchiq_susp_log_level, "%s: suspend already " 1999 "requested", __func__); 2000 break; 2001 case VC_SUSPEND_IN_PROGRESS: 2002 vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in " 2003 "progress", __func__); 2004 break; 2005 2006 default: 2007 /* We don't expect to be in other states, so log but continue 2008 * anyway */ 2009 vchiq_log_error(vchiq_susp_log_level, 2010 "%s unexpected suspend state %s", __func__, 2011 suspend_state_names[arm_state->vc_suspend_state + 2012 VC_SUSPEND_NUM_OFFSET]); 2013 /* fall through */ 2014 case VC_SUSPEND_REJECTED: 2015 case VC_SUSPEND_FAILED: 2016 /* Ensure any idle state actions have been run */ 2017 set_suspend_state(arm_state, VC_SUSPEND_IDLE); 2018 /* fall through */ 2019 case VC_SUSPEND_IDLE: 2020 vchiq_log_info(vchiq_susp_log_level, 2021 "%s: suspending", __func__); 2022 set_suspend_state(arm_state, VC_SUSPEND_REQUESTED); 2023 /* kick the slot handler thread to initiate suspend */ 2024 request_poll(state, NULL, 0); 2025 break; 2026 } 2027 2028out: 2029 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status); 2030 return status; 2031} 2032 2033void 2034vchiq_platform_check_suspend(VCHIQ_STATE_T *state) 2035{ 2036 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2037 int susp = 0; 2038 2039 if (!arm_state) 2040 goto out; 2041 2042 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2043 2044 write_lock_bh(&arm_state->susp_res_lock); 2045 if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED && 2046 arm_state->vc_resume_state == VC_RESUME_RESUMED) { 2047 set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS); 2048 susp = 1; 2049 } 2050 write_unlock_bh(&arm_state->susp_res_lock); 2051 2052 if (susp) 2053 vchiq_platform_suspend(state); 2054 2055out: 2056 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__); 2057 return; 2058} 2059 2060 2061static void 2062output_timeout_error(VCHIQ_STATE_T *state) 2063{ 2064 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2065 char service_err[50] = ""; 2066 int vc_use_count = arm_state->videocore_use_count; 2067 int active_services = state->unused_service; 2068 int i; 2069 2070 if (!arm_state->videocore_use_count) { 2071 snprintf(service_err, 50, " Videocore usecount is 0"); 2072 goto output_msg; 2073 } 2074 for (i = 0; i < active_services; i++) { 2075 VCHIQ_SERVICE_T *service_ptr = state->services[i]; 2076 if (service_ptr && service_ptr->service_use_count && 2077 (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) { 2078 snprintf(service_err, 50, " %c%c%c%c(%8x) service has " 2079 "use count %d%s", VCHIQ_FOURCC_AS_4CHARS( 2080 service_ptr->base.fourcc), 2081 service_ptr->client_id, 2082 service_ptr->service_use_count, 2083 service_ptr->service_use_count == 2084 vc_use_count ? "" : " (+ more)"); 2085 break; 2086 } 2087 } 2088 2089output_msg: 2090 vchiq_log_error(vchiq_susp_log_level, 2091 "timed out waiting for vc suspend (%d).%s", 2092 arm_state->autosuspend_override, service_err); 2093 2094} 2095 2096/* Try to get videocore into suspended state, regardless of autosuspend state. 2097** We don't actually force suspend, since videocore may get into a bad state 2098** if we force suspend at a bad time. Instead, we wait for autosuspend to 2099** determine a good point to suspend. If this doesn't happen within 100ms we 2100** report failure. 2101** 2102** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if 2103** videocore failed to suspend in time or VCHIQ_ERROR if interrupted. 2104*/ 2105VCHIQ_STATUS_T 2106vchiq_arm_force_suspend(VCHIQ_STATE_T *state) 2107{ 2108 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2109 VCHIQ_STATUS_T status = VCHIQ_ERROR; 2110 long rc = 0; 2111 int repeat = -1; 2112 2113 if (!arm_state) 2114 goto out; 2115 2116 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2117 2118 write_lock_bh(&arm_state->susp_res_lock); 2119 2120 status = block_resume(arm_state); 2121 if (status != VCHIQ_SUCCESS) 2122 goto unlock; 2123 if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) { 2124 /* Already suspended - just block resume and exit */ 2125 vchiq_log_info(vchiq_susp_log_level, "%s already suspended", 2126 __func__); 2127 status = VCHIQ_SUCCESS; 2128 goto unlock; 2129 } else if (arm_state->vc_suspend_state <= VC_SUSPEND_IDLE) { 2130 /* initiate suspend immediately in the case that we're waiting 2131 * for the timeout */ 2132 stop_suspend_timer(arm_state); 2133 if (!vchiq_videocore_wanted(state)) { 2134 vchiq_log_info(vchiq_susp_log_level, "%s videocore " 2135 "idle, initiating suspend", __func__); 2136 status = vchiq_arm_vcsuspend(state); 2137 } else if (arm_state->autosuspend_override < 2138 FORCE_SUSPEND_FAIL_MAX) { 2139 vchiq_log_info(vchiq_susp_log_level, "%s letting " 2140 "videocore go idle", __func__); 2141 status = VCHIQ_SUCCESS; 2142 } else { 2143 vchiq_log_warning(vchiq_susp_log_level, "%s failed too " 2144 "many times - attempting suspend", __func__); 2145 status = vchiq_arm_vcsuspend(state); 2146 } 2147 } else { 2148 vchiq_log_info(vchiq_susp_log_level, "%s videocore suspend " 2149 "in progress - wait for completion", __func__); 2150 status = VCHIQ_SUCCESS; 2151 } 2152 2153 /* Wait for suspend to happen due to system idle (not forced..) */ 2154 if (status != VCHIQ_SUCCESS) 2155 goto unblock_resume; 2156 2157 do { 2158 write_unlock_bh(&arm_state->susp_res_lock); 2159 2160 rc = wait_for_completion_interruptible_timeout( 2161 &arm_state->vc_suspend_complete, 2162 msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS)); 2163 2164 write_lock_bh(&arm_state->susp_res_lock); 2165 if (rc < 0) { 2166 vchiq_log_warning(vchiq_susp_log_level, "%s " 2167 "interrupted waiting for suspend", __func__); 2168 status = VCHIQ_ERROR; 2169 goto unblock_resume; 2170 } else if (rc == 0) { 2171 if (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) { 2172 /* Repeat timeout once if in progress */ 2173 if (repeat < 0) { 2174 repeat = 1; 2175 continue; 2176 } 2177 } 2178 arm_state->autosuspend_override++; 2179 output_timeout_error(state); 2180 2181 status = VCHIQ_RETRY; 2182 goto unblock_resume; 2183 } 2184 } while (0 < (repeat--)); 2185 2186 /* Check and report state in case we need to abort ARM suspend */ 2187 if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED) { 2188 status = VCHIQ_RETRY; 2189 vchiq_log_error(vchiq_susp_log_level, 2190 "%s videocore suspend failed (state %s)", __func__, 2191 suspend_state_names[arm_state->vc_suspend_state + 2192 VC_SUSPEND_NUM_OFFSET]); 2193 /* Reset the state only if it's still in an error state. 2194 * Something could have already initiated another suspend. */ 2195 if (arm_state->vc_suspend_state < VC_SUSPEND_IDLE) 2196 set_suspend_state(arm_state, VC_SUSPEND_IDLE); 2197 2198 goto unblock_resume; 2199 } 2200 2201 /* successfully suspended - unlock and exit */ 2202 goto unlock; 2203 2204unblock_resume: 2205 /* all error states need to unblock resume before exit */ 2206 unblock_resume(arm_state); 2207 2208unlock: 2209 write_unlock_bh(&arm_state->susp_res_lock); 2210 2211out: 2212 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status); 2213 return status; 2214} 2215 2216void 2217vchiq_check_suspend(VCHIQ_STATE_T *state) 2218{ 2219 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2220 2221 if (!arm_state) 2222 goto out; 2223 2224 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2225 2226 write_lock_bh(&arm_state->susp_res_lock); 2227 if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED && 2228 arm_state->first_connect && 2229 !vchiq_videocore_wanted(state)) { 2230 vchiq_arm_vcsuspend(state); 2231 } 2232 write_unlock_bh(&arm_state->susp_res_lock); 2233 2234out: 2235 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__); 2236 return; 2237} 2238 2239 2240int 2241vchiq_arm_allow_resume(VCHIQ_STATE_T *state) 2242{ 2243 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2244 int resume = 0; 2245 int ret = -1; 2246 2247 if (!arm_state) 2248 goto out; 2249 2250 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2251 2252 write_lock_bh(&arm_state->susp_res_lock); 2253 unblock_resume(arm_state); 2254 resume = vchiq_check_resume(state); 2255 write_unlock_bh(&arm_state->susp_res_lock); 2256 2257 if (resume) { 2258 if (wait_for_completion_interruptible( 2259 &arm_state->vc_resume_complete) < 0) { 2260 vchiq_log_error(vchiq_susp_log_level, 2261 "%s interrupted", __func__); 2262 /* failed, cannot accurately derive suspend 2263 * state, so exit early. */ 2264 goto out; 2265 } 2266 } 2267 2268 read_lock_bh(&arm_state->susp_res_lock); 2269 if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) { 2270 vchiq_log_info(vchiq_susp_log_level, 2271 "%s: Videocore remains suspended", __func__); 2272 } else { 2273 vchiq_log_info(vchiq_susp_log_level, 2274 "%s: Videocore resumed", __func__); 2275 ret = 0; 2276 } 2277 read_unlock_bh(&arm_state->susp_res_lock); 2278out: 2279 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret); 2280 return ret; 2281} 2282 2283/* This function should be called with the write lock held */ 2284int 2285vchiq_check_resume(VCHIQ_STATE_T *state) 2286{ 2287 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2288 int resume = 0; 2289 2290 if (!arm_state) 2291 goto out; 2292 2293 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2294 2295 if (need_resume(state)) { 2296 set_resume_state(arm_state, VC_RESUME_REQUESTED); 2297 request_poll(state, NULL, 0); 2298 resume = 1; 2299 } 2300 2301out: 2302 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__); 2303 return resume; 2304} 2305 2306#ifdef notyet 2307void 2308vchiq_platform_check_resume(VCHIQ_STATE_T *state) 2309{ 2310 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2311 int res = 0; 2312 2313 if (!arm_state) 2314 goto out; 2315 2316 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2317 2318 write_lock_bh(&arm_state->susp_res_lock); 2319 if (arm_state->wake_address == 0) { 2320 vchiq_log_info(vchiq_susp_log_level, 2321 "%s: already awake", __func__); 2322 goto unlock; 2323 } 2324 if (arm_state->vc_resume_state == VC_RESUME_IN_PROGRESS) { 2325 vchiq_log_info(vchiq_susp_log_level, 2326 "%s: already resuming", __func__); 2327 goto unlock; 2328 } 2329 2330 if (arm_state->vc_resume_state == VC_RESUME_REQUESTED) { 2331 set_resume_state(arm_state, VC_RESUME_IN_PROGRESS); 2332 res = 1; 2333 } else 2334 vchiq_log_trace(vchiq_susp_log_level, 2335 "%s: not resuming (resume state %s)", __func__, 2336 resume_state_names[arm_state->vc_resume_state + 2337 VC_RESUME_NUM_OFFSET]); 2338 2339unlock: 2340 write_unlock_bh(&arm_state->susp_res_lock); 2341 2342 if (res) 2343 vchiq_platform_resume(state); 2344 2345out: 2346 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__); 2347 return; 2348 2349} 2350#endif 2351 2352 2353 2354VCHIQ_STATUS_T 2355vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, 2356 enum USE_TYPE_E use_type) 2357{ 2358 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2359 VCHIQ_STATUS_T ret = VCHIQ_SUCCESS; 2360 char entity[16]; 2361 int *entity_uc; 2362 int local_uc, local_entity_uc; 2363 2364 if (!arm_state) 2365 goto out; 2366 2367 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2368 2369 if (use_type == USE_TYPE_VCHIQ) { 2370 snprintf(entity, sizeof(entity), "VCHIQ: "); 2371 entity_uc = &arm_state->peer_use_count; 2372 } else if (service) { 2373 snprintf(entity, sizeof(entity), "%c%c%c%c:%8x", 2374 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), 2375 service->client_id); 2376 entity_uc = &service->service_use_count; 2377 } else { 2378 vchiq_log_error(vchiq_susp_log_level, "%s null service " 2379 "ptr", __func__); 2380 ret = VCHIQ_ERROR; 2381 goto out; 2382 } 2383 2384 write_lock_bh(&arm_state->susp_res_lock); 2385 while (arm_state->resume_blocked) { 2386 /* If we call 'use' while force suspend is waiting for suspend, 2387 * then we're about to block the thread which the force is 2388 * waiting to complete, so we're bound to just time out. In this 2389 * case, set the suspend state such that the wait will be 2390 * canceled, so we can complete as quickly as possible. */ 2391 if (arm_state->resume_blocked && arm_state->vc_suspend_state == 2392 VC_SUSPEND_IDLE) { 2393 set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED); 2394 break; 2395 } 2396 /* If suspend is already in progress then we need to block */ 2397 if (!try_wait_for_completion(&arm_state->resume_blocker)) { 2398 /* Indicate that there are threads waiting on the resume 2399 * blocker. These need to be allowed to complete before 2400 * a _second_ call to force suspend can complete, 2401 * otherwise low priority threads might never actually 2402 * continue */ 2403 arm_state->blocked_count++; 2404 write_unlock_bh(&arm_state->susp_res_lock); 2405 vchiq_log_info(vchiq_susp_log_level, "%s %s resume " 2406 "blocked - waiting...", __func__, entity); 2407 if (wait_for_completion_killable( 2408 &arm_state->resume_blocker) != 0) { 2409 vchiq_log_error(vchiq_susp_log_level, "%s %s " 2410 "wait for resume blocker interrupted", 2411 __func__, entity); 2412 ret = VCHIQ_ERROR; 2413 write_lock_bh(&arm_state->susp_res_lock); 2414 arm_state->blocked_count--; 2415 write_unlock_bh(&arm_state->susp_res_lock); 2416 goto out; 2417 } 2418 vchiq_log_info(vchiq_susp_log_level, "%s %s resume " 2419 "unblocked", __func__, entity); 2420 write_lock_bh(&arm_state->susp_res_lock); 2421 if (--arm_state->blocked_count == 0) 2422 complete_all(&arm_state->blocked_blocker); 2423 } 2424 } 2425 2426 stop_suspend_timer(arm_state); 2427 2428 local_uc = ++arm_state->videocore_use_count; 2429 local_entity_uc = ++(*entity_uc); 2430 2431 /* If there's a pending request which hasn't yet been serviced then 2432 * just clear it. If we're past VC_SUSPEND_REQUESTED state then 2433 * vc_resume_complete will block until we either resume or fail to 2434 * suspend */ 2435 if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED) 2436 set_suspend_state(arm_state, VC_SUSPEND_IDLE); 2437 2438 if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) { 2439 set_resume_state(arm_state, VC_RESUME_REQUESTED); 2440 vchiq_log_info(vchiq_susp_log_level, 2441 "%s %s count %d, state count %d", 2442 __func__, entity, local_entity_uc, local_uc); 2443 request_poll(state, NULL, 0); 2444 } else 2445 vchiq_log_trace(vchiq_susp_log_level, 2446 "%s %s count %d, state count %d", 2447 __func__, entity, *entity_uc, local_uc); 2448 2449 2450 write_unlock_bh(&arm_state->susp_res_lock); 2451 2452 /* Completion is in a done state when we're not suspended, so this won't 2453 * block for the non-suspended case. */ 2454 if (!try_wait_for_completion(&arm_state->vc_resume_complete)) { 2455 vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume", 2456 __func__, entity); 2457 if (wait_for_completion_killable( 2458 &arm_state->vc_resume_complete) != 0) { 2459 vchiq_log_error(vchiq_susp_log_level, "%s %s wait for " 2460 "resume interrupted", __func__, entity); 2461 ret = VCHIQ_ERROR; 2462 goto out; 2463 } 2464 vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__, 2465 entity); 2466 } 2467 2468 if (ret == VCHIQ_SUCCESS) { 2469 VCHIQ_STATUS_T status = VCHIQ_SUCCESS; 2470 long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0); 2471 while (ack_cnt && (status == VCHIQ_SUCCESS)) { 2472 /* Send the use notify to videocore */ 2473 status = vchiq_send_remote_use_active(state); 2474 if (status == VCHIQ_SUCCESS) 2475 ack_cnt--; 2476 else 2477 atomic_add(ack_cnt, 2478 &arm_state->ka_use_ack_count); 2479 } 2480 } 2481 2482out: 2483 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret); 2484 return ret; 2485} 2486 2487VCHIQ_STATUS_T 2488vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service) 2489{ 2490 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2491 VCHIQ_STATUS_T ret = VCHIQ_SUCCESS; 2492 char entity[16]; 2493 int *entity_uc; 2494 2495 if (!arm_state) 2496 goto out; 2497 2498 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2499 2500 if (service) { 2501 snprintf(entity, sizeof(entity), "%c%c%c%c:%8x", 2502 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), 2503 service->client_id); 2504 entity_uc = &service->service_use_count; 2505 } else { 2506 snprintf(entity, sizeof(entity), "PEER: "); 2507 entity_uc = &arm_state->peer_use_count; 2508 } 2509 2510 write_lock_bh(&arm_state->susp_res_lock); 2511 if (!arm_state->videocore_use_count || !(*entity_uc)) { 2512 /* Don't use BUG_ON - don't allow user thread to crash kernel */ 2513 WARN_ON(!arm_state->videocore_use_count); 2514 WARN_ON(!(*entity_uc)); 2515 ret = VCHIQ_ERROR; 2516 goto unlock; 2517 } 2518 --arm_state->videocore_use_count; 2519 --(*entity_uc); 2520 2521 if (!vchiq_videocore_wanted(state)) { 2522 if (vchiq_platform_use_suspend_timer() && 2523 !arm_state->resume_blocked) { 2524 /* Only use the timer if we're not trying to force 2525 * suspend (=> resume_blocked) */ 2526 start_suspend_timer(arm_state); 2527 } else { 2528 vchiq_log_info(vchiq_susp_log_level, 2529 "%s %s count %d, state count %d - suspending", 2530 __func__, entity, *entity_uc, 2531 arm_state->videocore_use_count); 2532 vchiq_arm_vcsuspend(state); 2533 } 2534 } else 2535 vchiq_log_trace(vchiq_susp_log_level, 2536 "%s %s count %d, state count %d", 2537 __func__, entity, *entity_uc, 2538 arm_state->videocore_use_count); 2539 2540unlock: 2541 write_unlock_bh(&arm_state->susp_res_lock); 2542 2543out: 2544 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret); 2545 return ret; 2546} 2547 2548void 2549vchiq_on_remote_use(VCHIQ_STATE_T *state) 2550{ 2551 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2552 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2553 atomic_inc(&arm_state->ka_use_count); 2554 complete(&arm_state->ka_evt); 2555} 2556 2557void 2558vchiq_on_remote_release(VCHIQ_STATE_T *state) 2559{ 2560 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2561 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2562 atomic_inc(&arm_state->ka_release_count); 2563 complete(&arm_state->ka_evt); 2564} 2565 2566VCHIQ_STATUS_T 2567vchiq_use_service_internal(VCHIQ_SERVICE_T *service) 2568{ 2569 return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE); 2570} 2571 2572VCHIQ_STATUS_T 2573vchiq_release_service_internal(VCHIQ_SERVICE_T *service) 2574{ 2575 return vchiq_release_internal(service->state, service); 2576} 2577 2578#ifdef notyet 2579VCHIQ_DEBUGFS_NODE_T * 2580vchiq_instance_get_debugfs_node(VCHIQ_INSTANCE_T instance) 2581{ 2582 return &instance->debugfs_node; 2583} 2584 2585int 2586vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance) 2587{ 2588 VCHIQ_SERVICE_T *service; 2589 int use_count = 0, i; 2590 i = 0; 2591 while ((service = next_service_by_instance(instance->state, 2592 instance, &i)) != NULL) { 2593 use_count += service->service_use_count; 2594 unlock_service(service); 2595 } 2596 return use_count; 2597} 2598 2599int 2600vchiq_instance_get_pid(VCHIQ_INSTANCE_T instance) 2601{ 2602 return instance->pid; 2603} 2604 2605int 2606vchiq_instance_get_trace(VCHIQ_INSTANCE_T instance) 2607{ 2608 return instance->trace; 2609} 2610 2611void 2612vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance, int trace) 2613{ 2614 VCHIQ_SERVICE_T *service; 2615 int i; 2616 i = 0; 2617 while ((service = next_service_by_instance(instance->state, 2618 instance, &i)) != NULL) { 2619 service->trace = trace; 2620 unlock_service(service); 2621 } 2622 instance->trace = (trace != 0); 2623} 2624#endif 2625 2626static void suspend_timer_callback(unsigned long context) 2627{ 2628 VCHIQ_STATE_T *state = (VCHIQ_STATE_T *)context; 2629 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2630 if (!arm_state) 2631 goto out; 2632 vchiq_log_info(vchiq_susp_log_level, 2633 "%s - suspend timer expired - check suspend", __func__); 2634 vchiq_check_suspend(state); 2635out: 2636 return; 2637} 2638 2639VCHIQ_STATUS_T 2640vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle) 2641{ 2642 VCHIQ_STATUS_T ret = VCHIQ_ERROR; 2643 VCHIQ_SERVICE_T *service = find_service_by_handle(handle); 2644 if (service) { 2645 ret = vchiq_use_internal(service->state, service, 2646 USE_TYPE_SERVICE_NO_RESUME); 2647 unlock_service(service); 2648 } 2649 return ret; 2650} 2651 2652VCHIQ_STATUS_T 2653vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle) 2654{ 2655 VCHIQ_STATUS_T ret = VCHIQ_ERROR; 2656 VCHIQ_SERVICE_T *service = find_service_by_handle(handle); 2657 if (service) { 2658 ret = vchiq_use_internal(service->state, service, 2659 USE_TYPE_SERVICE); 2660 unlock_service(service); 2661 } 2662 return ret; 2663} 2664 2665VCHIQ_STATUS_T 2666vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle) 2667{ 2668 VCHIQ_STATUS_T ret = VCHIQ_ERROR; 2669 VCHIQ_SERVICE_T *service = find_service_by_handle(handle); 2670 if (service) { 2671 ret = vchiq_release_internal(service->state, service); 2672 unlock_service(service); 2673 } 2674 return ret; 2675} 2676 2677void 2678vchiq_dump_service_use_state(VCHIQ_STATE_T *state) 2679{ 2680 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2681 int i, j = 0; 2682 /* Only dump 64 services */ 2683#define local_max_services 64 2684 /* If there's more than 64 services, only dump ones with 2685 * non-zero counts */ 2686 int only_nonzero = 0; 2687 static const char *nz = "<-- preventing suspend"; 2688 2689 enum vc_suspend_status vc_suspend_state; 2690 enum vc_resume_status vc_resume_state; 2691 int peer_count; 2692 int vc_use_count; 2693 int active_services; 2694 struct service_data_struct { 2695 int fourcc; 2696 int clientid; 2697 int use_count; 2698 } service_data[local_max_services]; 2699 2700 if (!arm_state) 2701 return; 2702 2703 read_lock_bh(&arm_state->susp_res_lock); 2704 vc_suspend_state = arm_state->vc_suspend_state; 2705 vc_resume_state = arm_state->vc_resume_state; 2706 peer_count = arm_state->peer_use_count; 2707 vc_use_count = arm_state->videocore_use_count; 2708 active_services = state->unused_service; 2709 if (active_services > local_max_services) 2710 only_nonzero = 1; 2711 2712 for (i = 0; (i < active_services) && (j < local_max_services); i++) { 2713 VCHIQ_SERVICE_T *service_ptr = state->services[i]; 2714 if (!service_ptr) 2715 continue; 2716 2717 if (only_nonzero && !service_ptr->service_use_count) 2718 continue; 2719 2720 if (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE) { 2721 service_data[j].fourcc = service_ptr->base.fourcc; 2722 service_data[j].clientid = service_ptr->client_id; 2723 service_data[j++].use_count = service_ptr-> 2724 service_use_count; 2725 } 2726 } 2727 2728 read_unlock_bh(&arm_state->susp_res_lock); 2729 2730 vchiq_log_warning(vchiq_susp_log_level, 2731 "-- Videcore suspend state: %s --", 2732 suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]); 2733 vchiq_log_warning(vchiq_susp_log_level, 2734 "-- Videcore resume state: %s --", 2735 resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]); 2736 2737 if (only_nonzero) 2738 vchiq_log_warning(vchiq_susp_log_level, "Too many active " 2739 "services (%d). Only dumping up to first %d services " 2740 "with non-zero use-count", active_services, 2741 local_max_services); 2742 2743 for (i = 0; i < j; i++) { 2744 vchiq_log_warning(vchiq_susp_log_level, 2745 "----- %c%c%c%c:%d service count %d %s", 2746 VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc), 2747 service_data[i].clientid, 2748 service_data[i].use_count, 2749 service_data[i].use_count ? nz : ""); 2750 } 2751 vchiq_log_warning(vchiq_susp_log_level, 2752 "----- VCHIQ use count count %d", peer_count); 2753 vchiq_log_warning(vchiq_susp_log_level, 2754 "--- Overall vchiq instance use count %d", vc_use_count); 2755 2756 vchiq_dump_platform_use_state(state); 2757} 2758 2759VCHIQ_STATUS_T 2760vchiq_check_service(VCHIQ_SERVICE_T *service) 2761{ 2762 VCHIQ_ARM_STATE_T *arm_state; 2763 VCHIQ_STATUS_T ret = VCHIQ_ERROR; 2764 2765 if (!service || !service->state) 2766 goto out; 2767 2768 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2769 2770 arm_state = vchiq_platform_get_arm_state(service->state); 2771 2772 read_lock_bh(&arm_state->susp_res_lock); 2773 if (service->service_use_count) 2774 ret = VCHIQ_SUCCESS; 2775 read_unlock_bh(&arm_state->susp_res_lock); 2776 2777 if (ret == VCHIQ_ERROR) { 2778 vchiq_log_error(vchiq_susp_log_level, 2779 "%s ERROR - %c%c%c%c:%8x service count %d, " 2780 "state count %d, videocore suspend state %s", __func__, 2781 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), 2782 service->client_id, service->service_use_count, 2783 arm_state->videocore_use_count, 2784 suspend_state_names[arm_state->vc_suspend_state + 2785 VC_SUSPEND_NUM_OFFSET]); 2786 vchiq_dump_service_use_state(service->state); 2787 } 2788out: 2789 return ret; 2790} 2791 2792/* stub functions */ 2793void vchiq_on_remote_use_active(VCHIQ_STATE_T *state) 2794{ 2795 (void)state; 2796} 2797 2798void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state, 2799 VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate) 2800{ 2801 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2802 vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id, 2803 get_conn_state_name(oldstate), get_conn_state_name(newstate)); 2804 if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) { 2805 write_lock_bh(&arm_state->susp_res_lock); 2806 if (!arm_state->first_connect) { 2807 char threadname[10]; 2808 arm_state->first_connect = 1; 2809 write_unlock_bh(&arm_state->susp_res_lock); 2810 snprintf(threadname, sizeof(threadname), "VCHIQka-%d", 2811 state->id); 2812 arm_state->ka_thread = vchiq_thread_create( 2813 &vchiq_keepalive_thread_func, 2814 (void *)state, 2815 threadname); 2816 if (arm_state->ka_thread == NULL) { 2817 vchiq_log_error(vchiq_susp_log_level, 2818 "vchiq: FATAL: couldn't create thread %s", 2819 threadname); 2820 } else { 2821 wake_up_process(arm_state->ka_thread); 2822 } 2823 } else 2824 write_unlock_bh(&arm_state->susp_res_lock); 2825 } 2826} 2827 2828 2829/**************************************************************************** 2830* 2831* vchiq_init - called when the module is loaded. 2832* 2833***************************************************************************/ 2834 2835int __init vchiq_init(void); 2836int __init 2837vchiq_init(void) 2838{ 2839 int err; 2840 2841#ifdef notyet 2842 /* create proc entries */ 2843 err = vchiq_proc_init(); 2844 if (err != 0) 2845 goto failed_proc_init; 2846#endif 2847 2848 spin_lock_init(&msg_queue_spinlock); 2849 2850 err = vchiq_platform_init(&g_state); 2851 if (err != 0) 2852 goto failed_platform_init; 2853 2854 vchiq_log_info(vchiq_arm_log_level, 2855 "vchiq: initialised - version %d (min %d)", 2856 VCHIQ_VERSION, VCHIQ_VERSION_MIN); 2857 2858 return 0; 2859 2860failed_platform_init: 2861 vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq"); 2862 return err; 2863} 2864 2865/**************************************************************************** 2866* 2867* vchiq_exit - called when the module is unloaded. 2868* 2869***************************************************************************/ 2870 2871void vchiq_exit(void); 2872void 2873vchiq_exit(void) 2874{ 2875 vchiq_platform_exit(&g_state); 2876} 2877