1/* 2 * Char device for device raw access 3 * 4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software Foundation, 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 */ 20 21#include <linux/bug.h> 22#include <linux/compat.h> 23#include <linux/delay.h> 24#include <linux/device.h> 25#include <linux/errno.h> 26#include <linux/firewire.h> 27#include <linux/firewire-cdev.h> 28#include <linux/idr.h> 29#include <linux/irqflags.h> 30#include <linux/jiffies.h> 31#include <linux/kernel.h> 32#include <linux/kref.h> 33#include <linux/mm.h> 34#include <linux/module.h> 35#include <linux/mutex.h> 36#include <linux/poll.h> 37#include <linux/sched.h> /* required for linux/wait.h */ 38#include <linux/slab.h> 39#include <linux/spinlock.h> 40#include <linux/string.h> 41#include <linux/time.h> 42#include <linux/uaccess.h> 43#include <linux/vmalloc.h> 44#include <linux/wait.h> 45#include <linux/workqueue.h> 46 47#include <asm/system.h> 48 49#include "core.h" 50 51/* 52 * ABI version history is documented in linux/firewire-cdev.h. 53 */ 54#define FW_CDEV_KERNEL_VERSION 4 55#define FW_CDEV_VERSION_EVENT_REQUEST2 4 56#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4 57 58struct client { 59 u32 version; 60 struct fw_device *device; 61 62 spinlock_t lock; 63 bool in_shutdown; 64 struct idr resource_idr; 65 struct list_head event_list; 66 wait_queue_head_t wait; 67 u64 bus_reset_closure; 68 69 struct fw_iso_context *iso_context; 70 u64 iso_closure; 71 struct fw_iso_buffer buffer; 72 unsigned long vm_start; 73 74 struct list_head phy_receiver_link; 75 u64 phy_receiver_closure; 76 77 struct list_head link; 78 struct kref kref; 79}; 80 81static inline void client_get(struct client *client) 82{ 83 kref_get(&client->kref); 84} 85 86static void client_release(struct kref *kref) 87{ 88 struct client *client = container_of(kref, struct client, kref); 89 90 fw_device_put(client->device); 91 kfree(client); 92} 93 94static void client_put(struct client *client) 95{ 96 kref_put(&client->kref, client_release); 97} 98 99struct client_resource; 100typedef void (*client_resource_release_fn_t)(struct client *, 101 struct client_resource *); 102struct client_resource { 103 client_resource_release_fn_t release; 104 int handle; 105}; 106 107struct address_handler_resource { 108 struct client_resource resource; 109 struct fw_address_handler handler; 110 __u64 closure; 111 struct client *client; 112}; 113 114struct outbound_transaction_resource { 115 struct client_resource resource; 116 struct fw_transaction transaction; 117}; 118 119struct inbound_transaction_resource { 120 struct client_resource resource; 121 struct fw_card *card; 122 struct fw_request *request; 123 void *data; 124 size_t length; 125}; 126 127struct descriptor_resource { 128 struct client_resource resource; 129 struct fw_descriptor descriptor; 130 u32 data[0]; 131}; 132 133struct iso_resource { 134 struct client_resource resource; 135 struct client *client; 136 /* Schedule work and access todo only with client->lock held. */ 137 struct delayed_work work; 138 enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC, 139 ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo; 140 int generation; 141 u64 channels; 142 s32 bandwidth; 143 __be32 transaction_data[2]; 144 struct iso_resource_event *e_alloc, *e_dealloc; 145}; 146 147static void release_iso_resource(struct client *, struct client_resource *); 148 149static void schedule_iso_resource(struct iso_resource *r, unsigned long delay) 150{ 151 client_get(r->client); 152 if (!schedule_delayed_work(&r->work, delay)) 153 client_put(r->client); 154} 155 156static void schedule_if_iso_resource(struct client_resource *resource) 157{ 158 if (resource->release == release_iso_resource) 159 schedule_iso_resource(container_of(resource, 160 struct iso_resource, resource), 0); 161} 162 163/* 164 * dequeue_event() just kfree()'s the event, so the event has to be 165 * the first field in a struct XYZ_event. 166 */ 167struct event { 168 struct { void *data; size_t size; } v[2]; 169 struct list_head link; 170}; 171 172struct bus_reset_event { 173 struct event event; 174 struct fw_cdev_event_bus_reset reset; 175}; 176 177struct outbound_transaction_event { 178 struct event event; 179 struct client *client; 180 struct outbound_transaction_resource r; 181 struct fw_cdev_event_response response; 182}; 183 184struct inbound_transaction_event { 185 struct event event; 186 union { 187 struct fw_cdev_event_request request; 188 struct fw_cdev_event_request2 request2; 189 } req; 190}; 191 192struct iso_interrupt_event { 193 struct event event; 194 struct fw_cdev_event_iso_interrupt interrupt; 195}; 196 197struct iso_interrupt_mc_event { 198 struct event event; 199 struct fw_cdev_event_iso_interrupt_mc interrupt; 200}; 201 202struct iso_resource_event { 203 struct event event; 204 struct fw_cdev_event_iso_resource iso_resource; 205}; 206 207struct outbound_phy_packet_event { 208 struct event event; 209 struct client *client; 210 struct fw_packet p; 211 struct fw_cdev_event_phy_packet phy_packet; 212}; 213 214struct inbound_phy_packet_event { 215 struct event event; 216 struct fw_cdev_event_phy_packet phy_packet; 217}; 218 219static inline void __user *u64_to_uptr(__u64 value) 220{ 221 return (void __user *)(unsigned long)value; 222} 223 224static inline __u64 uptr_to_u64(void __user *ptr) 225{ 226 return (__u64)(unsigned long)ptr; 227} 228 229static int fw_device_op_open(struct inode *inode, struct file *file) 230{ 231 struct fw_device *device; 232 struct client *client; 233 234 device = fw_device_get_by_devt(inode->i_rdev); 235 if (device == NULL) 236 return -ENODEV; 237 238 if (fw_device_is_shutdown(device)) { 239 fw_device_put(device); 240 return -ENODEV; 241 } 242 243 client = kzalloc(sizeof(*client), GFP_KERNEL); 244 if (client == NULL) { 245 fw_device_put(device); 246 return -ENOMEM; 247 } 248 249 client->device = device; 250 spin_lock_init(&client->lock); 251 idr_init(&client->resource_idr); 252 INIT_LIST_HEAD(&client->event_list); 253 init_waitqueue_head(&client->wait); 254 INIT_LIST_HEAD(&client->phy_receiver_link); 255 kref_init(&client->kref); 256 257 file->private_data = client; 258 259 mutex_lock(&device->client_list_mutex); 260 list_add_tail(&client->link, &device->client_list); 261 mutex_unlock(&device->client_list_mutex); 262 263 return nonseekable_open(inode, file); 264} 265 266static void queue_event(struct client *client, struct event *event, 267 void *data0, size_t size0, void *data1, size_t size1) 268{ 269 unsigned long flags; 270 271 event->v[0].data = data0; 272 event->v[0].size = size0; 273 event->v[1].data = data1; 274 event->v[1].size = size1; 275 276 spin_lock_irqsave(&client->lock, flags); 277 if (client->in_shutdown) 278 kfree(event); 279 else 280 list_add_tail(&event->link, &client->event_list); 281 spin_unlock_irqrestore(&client->lock, flags); 282 283 wake_up_interruptible(&client->wait); 284} 285 286static int dequeue_event(struct client *client, 287 char __user *buffer, size_t count) 288{ 289 struct event *event; 290 size_t size, total; 291 int i, ret; 292 293 ret = wait_event_interruptible(client->wait, 294 !list_empty(&client->event_list) || 295 fw_device_is_shutdown(client->device)); 296 if (ret < 0) 297 return ret; 298 299 if (list_empty(&client->event_list) && 300 fw_device_is_shutdown(client->device)) 301 return -ENODEV; 302 303 spin_lock_irq(&client->lock); 304 event = list_first_entry(&client->event_list, struct event, link); 305 list_del(&event->link); 306 spin_unlock_irq(&client->lock); 307 308 total = 0; 309 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { 310 size = min(event->v[i].size, count - total); 311 if (copy_to_user(buffer + total, event->v[i].data, size)) { 312 ret = -EFAULT; 313 goto out; 314 } 315 total += size; 316 } 317 ret = total; 318 319 out: 320 kfree(event); 321 322 return ret; 323} 324 325static ssize_t fw_device_op_read(struct file *file, char __user *buffer, 326 size_t count, loff_t *offset) 327{ 328 struct client *client = file->private_data; 329 330 return dequeue_event(client, buffer, count); 331} 332 333static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, 334 struct client *client) 335{ 336 struct fw_card *card = client->device->card; 337 338 spin_lock_irq(&card->lock); 339 340 event->closure = client->bus_reset_closure; 341 event->type = FW_CDEV_EVENT_BUS_RESET; 342 event->generation = client->device->generation; 343 event->node_id = client->device->node_id; 344 event->local_node_id = card->local_node->node_id; 345 event->bm_node_id = card->bm_node_id; 346 event->irm_node_id = card->irm_node->node_id; 347 event->root_node_id = card->root_node->node_id; 348 349 spin_unlock_irq(&card->lock); 350} 351 352static void for_each_client(struct fw_device *device, 353 void (*callback)(struct client *client)) 354{ 355 struct client *c; 356 357 mutex_lock(&device->client_list_mutex); 358 list_for_each_entry(c, &device->client_list, link) 359 callback(c); 360 mutex_unlock(&device->client_list_mutex); 361} 362 363static int schedule_reallocations(int id, void *p, void *data) 364{ 365 schedule_if_iso_resource(p); 366 367 return 0; 368} 369 370static void queue_bus_reset_event(struct client *client) 371{ 372 struct bus_reset_event *e; 373 374 e = kzalloc(sizeof(*e), GFP_KERNEL); 375 if (e == NULL) { 376 fw_notify("Out of memory when allocating event\n"); 377 return; 378 } 379 380 fill_bus_reset_event(&e->reset, client); 381 382 queue_event(client, &e->event, 383 &e->reset, sizeof(e->reset), NULL, 0); 384 385 spin_lock_irq(&client->lock); 386 idr_for_each(&client->resource_idr, schedule_reallocations, client); 387 spin_unlock_irq(&client->lock); 388} 389 390void fw_device_cdev_update(struct fw_device *device) 391{ 392 for_each_client(device, queue_bus_reset_event); 393} 394 395static void wake_up_client(struct client *client) 396{ 397 wake_up_interruptible(&client->wait); 398} 399 400void fw_device_cdev_remove(struct fw_device *device) 401{ 402 for_each_client(device, wake_up_client); 403} 404 405union ioctl_arg { 406 struct fw_cdev_get_info get_info; 407 struct fw_cdev_send_request send_request; 408 struct fw_cdev_allocate allocate; 409 struct fw_cdev_deallocate deallocate; 410 struct fw_cdev_send_response send_response; 411 struct fw_cdev_initiate_bus_reset initiate_bus_reset; 412 struct fw_cdev_add_descriptor add_descriptor; 413 struct fw_cdev_remove_descriptor remove_descriptor; 414 struct fw_cdev_create_iso_context create_iso_context; 415 struct fw_cdev_queue_iso queue_iso; 416 struct fw_cdev_start_iso start_iso; 417 struct fw_cdev_stop_iso stop_iso; 418 struct fw_cdev_get_cycle_timer get_cycle_timer; 419 struct fw_cdev_allocate_iso_resource allocate_iso_resource; 420 struct fw_cdev_send_stream_packet send_stream_packet; 421 struct fw_cdev_get_cycle_timer2 get_cycle_timer2; 422 struct fw_cdev_send_phy_packet send_phy_packet; 423 struct fw_cdev_receive_phy_packets receive_phy_packets; 424 struct fw_cdev_set_iso_channels set_iso_channels; 425}; 426 427static int ioctl_get_info(struct client *client, union ioctl_arg *arg) 428{ 429 struct fw_cdev_get_info *a = &arg->get_info; 430 struct fw_cdev_event_bus_reset bus_reset; 431 unsigned long ret = 0; 432 433 client->version = a->version; 434 a->version = FW_CDEV_KERNEL_VERSION; 435 a->card = client->device->card->index; 436 437 down_read(&fw_device_rwsem); 438 439 if (a->rom != 0) { 440 size_t want = a->rom_length; 441 size_t have = client->device->config_rom_length * 4; 442 443 ret = copy_to_user(u64_to_uptr(a->rom), 444 client->device->config_rom, min(want, have)); 445 } 446 a->rom_length = client->device->config_rom_length * 4; 447 448 up_read(&fw_device_rwsem); 449 450 if (ret != 0) 451 return -EFAULT; 452 453 client->bus_reset_closure = a->bus_reset_closure; 454 if (a->bus_reset != 0) { 455 fill_bus_reset_event(&bus_reset, client); 456 if (copy_to_user(u64_to_uptr(a->bus_reset), 457 &bus_reset, sizeof(bus_reset))) 458 return -EFAULT; 459 } 460 461 return 0; 462} 463 464static int add_client_resource(struct client *client, 465 struct client_resource *resource, gfp_t gfp_mask) 466{ 467 unsigned long flags; 468 int ret; 469 470 retry: 471 if (idr_pre_get(&client->resource_idr, gfp_mask) == 0) 472 return -ENOMEM; 473 474 spin_lock_irqsave(&client->lock, flags); 475 if (client->in_shutdown) 476 ret = -ECANCELED; 477 else 478 ret = idr_get_new(&client->resource_idr, resource, 479 &resource->handle); 480 if (ret >= 0) { 481 client_get(client); 482 schedule_if_iso_resource(resource); 483 } 484 spin_unlock_irqrestore(&client->lock, flags); 485 486 if (ret == -EAGAIN) 487 goto retry; 488 489 return ret < 0 ? ret : 0; 490} 491 492static int release_client_resource(struct client *client, u32 handle, 493 client_resource_release_fn_t release, 494 struct client_resource **return_resource) 495{ 496 struct client_resource *resource; 497 498 spin_lock_irq(&client->lock); 499 if (client->in_shutdown) 500 resource = NULL; 501 else 502 resource = idr_find(&client->resource_idr, handle); 503 if (resource && resource->release == release) 504 idr_remove(&client->resource_idr, handle); 505 spin_unlock_irq(&client->lock); 506 507 if (!(resource && resource->release == release)) 508 return -EINVAL; 509 510 if (return_resource) 511 *return_resource = resource; 512 else 513 resource->release(client, resource); 514 515 client_put(client); 516 517 return 0; 518} 519 520static void release_transaction(struct client *client, 521 struct client_resource *resource) 522{ 523 struct outbound_transaction_resource *r = container_of(resource, 524 struct outbound_transaction_resource, resource); 525 526 fw_cancel_transaction(client->device->card, &r->transaction); 527} 528 529static void complete_transaction(struct fw_card *card, int rcode, 530 void *payload, size_t length, void *data) 531{ 532 struct outbound_transaction_event *e = data; 533 struct fw_cdev_event_response *rsp = &e->response; 534 struct client *client = e->client; 535 unsigned long flags; 536 537 if (length < rsp->length) 538 rsp->length = length; 539 if (rcode == RCODE_COMPLETE) 540 memcpy(rsp->data, payload, rsp->length); 541 542 spin_lock_irqsave(&client->lock, flags); 543 /* 544 * 1. If called while in shutdown, the idr tree must be left untouched. 545 * The idr handle will be removed and the client reference will be 546 * dropped later. 547 * 2. If the call chain was release_client_resource -> 548 * release_transaction -> complete_transaction (instead of a normal 549 * conclusion of the transaction), i.e. if this resource was already 550 * unregistered from the idr, the client reference will be dropped 551 * by release_client_resource and we must not drop it here. 552 */ 553 if (!client->in_shutdown && 554 idr_find(&client->resource_idr, e->r.resource.handle)) { 555 idr_remove(&client->resource_idr, e->r.resource.handle); 556 /* Drop the idr's reference */ 557 client_put(client); 558 } 559 spin_unlock_irqrestore(&client->lock, flags); 560 561 rsp->type = FW_CDEV_EVENT_RESPONSE; 562 rsp->rcode = rcode; 563 564 /* 565 * In the case that sizeof(*rsp) doesn't align with the position of the 566 * data, and the read is short, preserve an extra copy of the data 567 * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless 568 * for short reads and some apps depended on it, this is both safe 569 * and prudent for compatibility. 570 */ 571 if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data)) 572 queue_event(client, &e->event, rsp, sizeof(*rsp), 573 rsp->data, rsp->length); 574 else 575 queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, 576 NULL, 0); 577 578 /* Drop the transaction callback's reference */ 579 client_put(client); 580} 581 582static int init_request(struct client *client, 583 struct fw_cdev_send_request *request, 584 int destination_id, int speed) 585{ 586 struct outbound_transaction_event *e; 587 int ret; 588 589 if (request->tcode != TCODE_STREAM_DATA && 590 (request->length > 4096 || request->length > 512 << speed)) 591 return -EIO; 592 593 if (request->tcode == TCODE_WRITE_QUADLET_REQUEST && 594 request->length < 4) 595 return -EINVAL; 596 597 e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL); 598 if (e == NULL) 599 return -ENOMEM; 600 601 e->client = client; 602 e->response.length = request->length; 603 e->response.closure = request->closure; 604 605 if (request->data && 606 copy_from_user(e->response.data, 607 u64_to_uptr(request->data), request->length)) { 608 ret = -EFAULT; 609 goto failed; 610 } 611 612 e->r.resource.release = release_transaction; 613 ret = add_client_resource(client, &e->r.resource, GFP_KERNEL); 614 if (ret < 0) 615 goto failed; 616 617 /* Get a reference for the transaction callback */ 618 client_get(client); 619 620 fw_send_request(client->device->card, &e->r.transaction, 621 request->tcode, destination_id, request->generation, 622 speed, request->offset, e->response.data, 623 request->length, complete_transaction, e); 624 return 0; 625 626 failed: 627 kfree(e); 628 629 return ret; 630} 631 632static int ioctl_send_request(struct client *client, union ioctl_arg *arg) 633{ 634 switch (arg->send_request.tcode) { 635 case TCODE_WRITE_QUADLET_REQUEST: 636 case TCODE_WRITE_BLOCK_REQUEST: 637 case TCODE_READ_QUADLET_REQUEST: 638 case TCODE_READ_BLOCK_REQUEST: 639 case TCODE_LOCK_MASK_SWAP: 640 case TCODE_LOCK_COMPARE_SWAP: 641 case TCODE_LOCK_FETCH_ADD: 642 case TCODE_LOCK_LITTLE_ADD: 643 case TCODE_LOCK_BOUNDED_ADD: 644 case TCODE_LOCK_WRAP_ADD: 645 case TCODE_LOCK_VENDOR_DEPENDENT: 646 break; 647 default: 648 return -EINVAL; 649 } 650 651 return init_request(client, &arg->send_request, client->device->node_id, 652 client->device->max_speed); 653} 654 655static inline bool is_fcp_request(struct fw_request *request) 656{ 657 return request == NULL; 658} 659 660static void release_request(struct client *client, 661 struct client_resource *resource) 662{ 663 struct inbound_transaction_resource *r = container_of(resource, 664 struct inbound_transaction_resource, resource); 665 666 if (is_fcp_request(r->request)) 667 kfree(r->data); 668 else 669 fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR); 670 671 fw_card_put(r->card); 672 kfree(r); 673} 674 675static void handle_request(struct fw_card *card, struct fw_request *request, 676 int tcode, int destination, int source, 677 int generation, unsigned long long offset, 678 void *payload, size_t length, void *callback_data) 679{ 680 struct address_handler_resource *handler = callback_data; 681 struct inbound_transaction_resource *r; 682 struct inbound_transaction_event *e; 683 size_t event_size0; 684 void *fcp_frame = NULL; 685 int ret; 686 687 /* card may be different from handler->client->device->card */ 688 fw_card_get(card); 689 690 r = kmalloc(sizeof(*r), GFP_ATOMIC); 691 e = kmalloc(sizeof(*e), GFP_ATOMIC); 692 if (r == NULL || e == NULL) { 693 fw_notify("Out of memory when allocating event\n"); 694 goto failed; 695 } 696 r->card = card; 697 r->request = request; 698 r->data = payload; 699 r->length = length; 700 701 if (is_fcp_request(request)) { 702 fcp_frame = kmemdup(payload, length, GFP_ATOMIC); 703 if (fcp_frame == NULL) 704 goto failed; 705 706 r->data = fcp_frame; 707 } 708 709 r->resource.release = release_request; 710 ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC); 711 if (ret < 0) 712 goto failed; 713 714 if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) { 715 struct fw_cdev_event_request *req = &e->req.request; 716 717 if (tcode & 0x10) 718 tcode = TCODE_LOCK_REQUEST; 719 720 req->type = FW_CDEV_EVENT_REQUEST; 721 req->tcode = tcode; 722 req->offset = offset; 723 req->length = length; 724 req->handle = r->resource.handle; 725 req->closure = handler->closure; 726 event_size0 = sizeof(*req); 727 } else { 728 struct fw_cdev_event_request2 *req = &e->req.request2; 729 730 req->type = FW_CDEV_EVENT_REQUEST2; 731 req->tcode = tcode; 732 req->offset = offset; 733 req->source_node_id = source; 734 req->destination_node_id = destination; 735 req->card = card->index; 736 req->generation = generation; 737 req->length = length; 738 req->handle = r->resource.handle; 739 req->closure = handler->closure; 740 event_size0 = sizeof(*req); 741 } 742 743 queue_event(handler->client, &e->event, 744 &e->req, event_size0, r->data, length); 745 return; 746 747 failed: 748 kfree(r); 749 kfree(e); 750 kfree(fcp_frame); 751 752 if (!is_fcp_request(request)) 753 fw_send_response(card, request, RCODE_CONFLICT_ERROR); 754 755 fw_card_put(card); 756} 757 758static void release_address_handler(struct client *client, 759 struct client_resource *resource) 760{ 761 struct address_handler_resource *r = 762 container_of(resource, struct address_handler_resource, resource); 763 764 fw_core_remove_address_handler(&r->handler); 765 kfree(r); 766} 767 768static int ioctl_allocate(struct client *client, union ioctl_arg *arg) 769{ 770 struct fw_cdev_allocate *a = &arg->allocate; 771 struct address_handler_resource *r; 772 struct fw_address_region region; 773 int ret; 774 775 r = kmalloc(sizeof(*r), GFP_KERNEL); 776 if (r == NULL) 777 return -ENOMEM; 778 779 region.start = a->offset; 780 if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END) 781 region.end = a->offset + a->length; 782 else 783 region.end = a->region_end; 784 785 r->handler.length = a->length; 786 r->handler.address_callback = handle_request; 787 r->handler.callback_data = r; 788 r->closure = a->closure; 789 r->client = client; 790 791 ret = fw_core_add_address_handler(&r->handler, ®ion); 792 if (ret < 0) { 793 kfree(r); 794 return ret; 795 } 796 a->offset = r->handler.offset; 797 798 r->resource.release = release_address_handler; 799 ret = add_client_resource(client, &r->resource, GFP_KERNEL); 800 if (ret < 0) { 801 release_address_handler(client, &r->resource); 802 return ret; 803 } 804 a->handle = r->resource.handle; 805 806 return 0; 807} 808 809static int ioctl_deallocate(struct client *client, union ioctl_arg *arg) 810{ 811 return release_client_resource(client, arg->deallocate.handle, 812 release_address_handler, NULL); 813} 814 815static int ioctl_send_response(struct client *client, union ioctl_arg *arg) 816{ 817 struct fw_cdev_send_response *a = &arg->send_response; 818 struct client_resource *resource; 819 struct inbound_transaction_resource *r; 820 int ret = 0; 821 822 if (release_client_resource(client, a->handle, 823 release_request, &resource) < 0) 824 return -EINVAL; 825 826 r = container_of(resource, struct inbound_transaction_resource, 827 resource); 828 if (is_fcp_request(r->request)) 829 goto out; 830 831 if (a->length != fw_get_response_length(r->request)) { 832 ret = -EINVAL; 833 kfree(r->request); 834 goto out; 835 } 836 if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) { 837 ret = -EFAULT; 838 kfree(r->request); 839 goto out; 840 } 841 fw_send_response(r->card, r->request, a->rcode); 842 out: 843 fw_card_put(r->card); 844 kfree(r); 845 846 return ret; 847} 848 849static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg) 850{ 851 fw_schedule_bus_reset(client->device->card, true, 852 arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET); 853 return 0; 854} 855 856static void release_descriptor(struct client *client, 857 struct client_resource *resource) 858{ 859 struct descriptor_resource *r = 860 container_of(resource, struct descriptor_resource, resource); 861 862 fw_core_remove_descriptor(&r->descriptor); 863 kfree(r); 864} 865 866static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg) 867{ 868 struct fw_cdev_add_descriptor *a = &arg->add_descriptor; 869 struct descriptor_resource *r; 870 int ret; 871 872 /* Access policy: Allow this ioctl only on local nodes' device files. */ 873 if (!client->device->is_local) 874 return -ENOSYS; 875 876 if (a->length > 256) 877 return -EINVAL; 878 879 r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL); 880 if (r == NULL) 881 return -ENOMEM; 882 883 if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) { 884 ret = -EFAULT; 885 goto failed; 886 } 887 888 r->descriptor.length = a->length; 889 r->descriptor.immediate = a->immediate; 890 r->descriptor.key = a->key; 891 r->descriptor.data = r->data; 892 893 ret = fw_core_add_descriptor(&r->descriptor); 894 if (ret < 0) 895 goto failed; 896 897 r->resource.release = release_descriptor; 898 ret = add_client_resource(client, &r->resource, GFP_KERNEL); 899 if (ret < 0) { 900 fw_core_remove_descriptor(&r->descriptor); 901 goto failed; 902 } 903 a->handle = r->resource.handle; 904 905 return 0; 906 failed: 907 kfree(r); 908 909 return ret; 910} 911 912static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg) 913{ 914 return release_client_resource(client, arg->remove_descriptor.handle, 915 release_descriptor, NULL); 916} 917 918static void iso_callback(struct fw_iso_context *context, u32 cycle, 919 size_t header_length, void *header, void *data) 920{ 921 struct client *client = data; 922 struct iso_interrupt_event *e; 923 924 e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC); 925 if (e == NULL) { 926 fw_notify("Out of memory when allocating event\n"); 927 return; 928 } 929 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; 930 e->interrupt.closure = client->iso_closure; 931 e->interrupt.cycle = cycle; 932 e->interrupt.header_length = header_length; 933 memcpy(e->interrupt.header, header, header_length); 934 queue_event(client, &e->event, &e->interrupt, 935 sizeof(e->interrupt) + header_length, NULL, 0); 936} 937 938static void iso_mc_callback(struct fw_iso_context *context, 939 dma_addr_t completed, void *data) 940{ 941 struct client *client = data; 942 struct iso_interrupt_mc_event *e; 943 944 e = kmalloc(sizeof(*e), GFP_ATOMIC); 945 if (e == NULL) { 946 fw_notify("Out of memory when allocating event\n"); 947 return; 948 } 949 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL; 950 e->interrupt.closure = client->iso_closure; 951 e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer, 952 completed); 953 queue_event(client, &e->event, &e->interrupt, 954 sizeof(e->interrupt), NULL, 0); 955} 956 957static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) 958{ 959 struct fw_cdev_create_iso_context *a = &arg->create_iso_context; 960 struct fw_iso_context *context; 961 fw_iso_callback_t cb; 962 963 BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT || 964 FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE || 965 FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL != 966 FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL); 967 968 switch (a->type) { 969 case FW_ISO_CONTEXT_TRANSMIT: 970 if (a->speed > SCODE_3200 || a->channel > 63) 971 return -EINVAL; 972 973 cb = iso_callback; 974 break; 975 976 case FW_ISO_CONTEXT_RECEIVE: 977 if (a->header_size < 4 || (a->header_size & 3) || 978 a->channel > 63) 979 return -EINVAL; 980 981 cb = iso_callback; 982 break; 983 984 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 985 cb = (fw_iso_callback_t)iso_mc_callback; 986 break; 987 988 default: 989 return -EINVAL; 990 } 991 992 context = fw_iso_context_create(client->device->card, a->type, 993 a->channel, a->speed, a->header_size, cb, client); 994 if (IS_ERR(context)) 995 return PTR_ERR(context); 996 997 /* We only support one context at this time. */ 998 spin_lock_irq(&client->lock); 999 if (client->iso_context != NULL) { 1000 spin_unlock_irq(&client->lock); 1001 fw_iso_context_destroy(context); 1002 return -EBUSY; 1003 } 1004 client->iso_closure = a->closure; 1005 client->iso_context = context; 1006 spin_unlock_irq(&client->lock); 1007 1008 a->handle = 0; 1009 1010 return 0; 1011} 1012 1013static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg) 1014{ 1015 struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels; 1016 struct fw_iso_context *ctx = client->iso_context; 1017 1018 if (ctx == NULL || a->handle != 0) 1019 return -EINVAL; 1020 1021 return fw_iso_context_set_channels(ctx, &a->channels); 1022} 1023 1024/* Macros for decoding the iso packet control header. */ 1025#define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff) 1026#define GET_INTERRUPT(v) (((v) >> 16) & 0x01) 1027#define GET_SKIP(v) (((v) >> 17) & 0x01) 1028#define GET_TAG(v) (((v) >> 18) & 0x03) 1029#define GET_SY(v) (((v) >> 20) & 0x0f) 1030#define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff) 1031 1032static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) 1033{ 1034 struct fw_cdev_queue_iso *a = &arg->queue_iso; 1035 struct fw_cdev_iso_packet __user *p, *end, *next; 1036 struct fw_iso_context *ctx = client->iso_context; 1037 unsigned long payload, buffer_end, transmit_header_bytes = 0; 1038 u32 control; 1039 int count; 1040 struct { 1041 struct fw_iso_packet packet; 1042 u8 header[256]; 1043 } u; 1044 1045 if (ctx == NULL || a->handle != 0) 1046 return -EINVAL; 1047 1048 /* 1049 * If the user passes a non-NULL data pointer, has mmap()'ed 1050 * the iso buffer, and the pointer points inside the buffer, 1051 * we setup the payload pointers accordingly. Otherwise we 1052 * set them both to 0, which will still let packets with 1053 * payload_length == 0 through. In other words, if no packets 1054 * use the indirect payload, the iso buffer need not be mapped 1055 * and the a->data pointer is ignored. 1056 */ 1057 payload = (unsigned long)a->data - client->vm_start; 1058 buffer_end = client->buffer.page_count << PAGE_SHIFT; 1059 if (a->data == 0 || client->buffer.pages == NULL || 1060 payload >= buffer_end) { 1061 payload = 0; 1062 buffer_end = 0; 1063 } 1064 1065 if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3) 1066 return -EINVAL; 1067 1068 p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets); 1069 if (!access_ok(VERIFY_READ, p, a->size)) 1070 return -EFAULT; 1071 1072 end = (void __user *)p + a->size; 1073 count = 0; 1074 while (p < end) { 1075 if (get_user(control, &p->control)) 1076 return -EFAULT; 1077 u.packet.payload_length = GET_PAYLOAD_LENGTH(control); 1078 u.packet.interrupt = GET_INTERRUPT(control); 1079 u.packet.skip = GET_SKIP(control); 1080 u.packet.tag = GET_TAG(control); 1081 u.packet.sy = GET_SY(control); 1082 u.packet.header_length = GET_HEADER_LENGTH(control); 1083 1084 switch (ctx->type) { 1085 case FW_ISO_CONTEXT_TRANSMIT: 1086 if (u.packet.header_length & 3) 1087 return -EINVAL; 1088 transmit_header_bytes = u.packet.header_length; 1089 break; 1090 1091 case FW_ISO_CONTEXT_RECEIVE: 1092 if (u.packet.header_length == 0 || 1093 u.packet.header_length % ctx->header_size != 0) 1094 return -EINVAL; 1095 break; 1096 1097 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 1098 if (u.packet.payload_length == 0 || 1099 u.packet.payload_length & 3) 1100 return -EINVAL; 1101 break; 1102 } 1103 1104 next = (struct fw_cdev_iso_packet __user *) 1105 &p->header[transmit_header_bytes / 4]; 1106 if (next > end) 1107 return -EINVAL; 1108 if (__copy_from_user 1109 (u.packet.header, p->header, transmit_header_bytes)) 1110 return -EFAULT; 1111 if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT && 1112 u.packet.header_length + u.packet.payload_length > 0) 1113 return -EINVAL; 1114 if (payload + u.packet.payload_length > buffer_end) 1115 return -EINVAL; 1116 1117 if (fw_iso_context_queue(ctx, &u.packet, 1118 &client->buffer, payload)) 1119 break; 1120 1121 p = next; 1122 payload += u.packet.payload_length; 1123 count++; 1124 } 1125 1126 a->size -= uptr_to_u64(p) - a->packets; 1127 a->packets = uptr_to_u64(p); 1128 a->data = client->vm_start + payload; 1129 1130 return count; 1131} 1132 1133static int ioctl_start_iso(struct client *client, union ioctl_arg *arg) 1134{ 1135 struct fw_cdev_start_iso *a = &arg->start_iso; 1136 1137 BUILD_BUG_ON( 1138 FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 || 1139 FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 || 1140 FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 || 1141 FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 || 1142 FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS); 1143 1144 if (client->iso_context == NULL || a->handle != 0) 1145 return -EINVAL; 1146 1147 if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE && 1148 (a->tags == 0 || a->tags > 15 || a->sync > 15)) 1149 return -EINVAL; 1150 1151 return fw_iso_context_start(client->iso_context, 1152 a->cycle, a->sync, a->tags); 1153} 1154 1155static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg) 1156{ 1157 struct fw_cdev_stop_iso *a = &arg->stop_iso; 1158 1159 if (client->iso_context == NULL || a->handle != 0) 1160 return -EINVAL; 1161 1162 return fw_iso_context_stop(client->iso_context); 1163} 1164 1165static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg) 1166{ 1167 struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2; 1168 struct fw_card *card = client->device->card; 1169 struct timespec ts = {0, 0}; 1170 u32 cycle_time; 1171 int ret = 0; 1172 1173 local_irq_disable(); 1174 1175 cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME); 1176 1177 switch (a->clk_id) { 1178 case CLOCK_REALTIME: getnstimeofday(&ts); break; 1179 case CLOCK_MONOTONIC: do_posix_clock_monotonic_gettime(&ts); break; 1180 case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts); break; 1181 default: 1182 ret = -EINVAL; 1183 } 1184 1185 local_irq_enable(); 1186 1187 a->tv_sec = ts.tv_sec; 1188 a->tv_nsec = ts.tv_nsec; 1189 a->cycle_timer = cycle_time; 1190 1191 return ret; 1192} 1193 1194static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg) 1195{ 1196 struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer; 1197 struct fw_cdev_get_cycle_timer2 ct2; 1198 1199 ct2.clk_id = CLOCK_REALTIME; 1200 ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2); 1201 1202 a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC; 1203 a->cycle_timer = ct2.cycle_timer; 1204 1205 return 0; 1206} 1207 1208static void iso_resource_work(struct work_struct *work) 1209{ 1210 struct iso_resource_event *e; 1211 struct iso_resource *r = 1212 container_of(work, struct iso_resource, work.work); 1213 struct client *client = r->client; 1214 int generation, channel, bandwidth, todo; 1215 bool skip, free, success; 1216 1217 spin_lock_irq(&client->lock); 1218 generation = client->device->generation; 1219 todo = r->todo; 1220 /* Allow 1000ms grace period for other reallocations. */ 1221 if (todo == ISO_RES_ALLOC && 1222 time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) { 1223 schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3)); 1224 skip = true; 1225 } else { 1226 /* We could be called twice within the same generation. */ 1227 skip = todo == ISO_RES_REALLOC && 1228 r->generation == generation; 1229 } 1230 free = todo == ISO_RES_DEALLOC || 1231 todo == ISO_RES_ALLOC_ONCE || 1232 todo == ISO_RES_DEALLOC_ONCE; 1233 r->generation = generation; 1234 spin_unlock_irq(&client->lock); 1235 1236 if (skip) 1237 goto out; 1238 1239 bandwidth = r->bandwidth; 1240 1241 fw_iso_resource_manage(client->device->card, generation, 1242 r->channels, &channel, &bandwidth, 1243 todo == ISO_RES_ALLOC || 1244 todo == ISO_RES_REALLOC || 1245 todo == ISO_RES_ALLOC_ONCE, 1246 r->transaction_data); 1247 /* 1248 * Is this generation outdated already? As long as this resource sticks 1249 * in the idr, it will be scheduled again for a newer generation or at 1250 * shutdown. 1251 */ 1252 if (channel == -EAGAIN && 1253 (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC)) 1254 goto out; 1255 1256 success = channel >= 0 || bandwidth > 0; 1257 1258 spin_lock_irq(&client->lock); 1259 /* 1260 * Transit from allocation to reallocation, except if the client 1261 * requested deallocation in the meantime. 1262 */ 1263 if (r->todo == ISO_RES_ALLOC) 1264 r->todo = ISO_RES_REALLOC; 1265 /* 1266 * Allocation or reallocation failure? Pull this resource out of the 1267 * idr and prepare for deletion, unless the client is shutting down. 1268 */ 1269 if (r->todo == ISO_RES_REALLOC && !success && 1270 !client->in_shutdown && 1271 idr_find(&client->resource_idr, r->resource.handle)) { 1272 idr_remove(&client->resource_idr, r->resource.handle); 1273 client_put(client); 1274 free = true; 1275 } 1276 spin_unlock_irq(&client->lock); 1277 1278 if (todo == ISO_RES_ALLOC && channel >= 0) 1279 r->channels = 1ULL << channel; 1280 1281 if (todo == ISO_RES_REALLOC && success) 1282 goto out; 1283 1284 if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) { 1285 e = r->e_alloc; 1286 r->e_alloc = NULL; 1287 } else { 1288 e = r->e_dealloc; 1289 r->e_dealloc = NULL; 1290 } 1291 e->iso_resource.handle = r->resource.handle; 1292 e->iso_resource.channel = channel; 1293 e->iso_resource.bandwidth = bandwidth; 1294 1295 queue_event(client, &e->event, 1296 &e->iso_resource, sizeof(e->iso_resource), NULL, 0); 1297 1298 if (free) { 1299 cancel_delayed_work(&r->work); 1300 kfree(r->e_alloc); 1301 kfree(r->e_dealloc); 1302 kfree(r); 1303 } 1304 out: 1305 client_put(client); 1306} 1307 1308static void release_iso_resource(struct client *client, 1309 struct client_resource *resource) 1310{ 1311 struct iso_resource *r = 1312 container_of(resource, struct iso_resource, resource); 1313 1314 spin_lock_irq(&client->lock); 1315 r->todo = ISO_RES_DEALLOC; 1316 schedule_iso_resource(r, 0); 1317 spin_unlock_irq(&client->lock); 1318} 1319 1320static int init_iso_resource(struct client *client, 1321 struct fw_cdev_allocate_iso_resource *request, int todo) 1322{ 1323 struct iso_resource_event *e1, *e2; 1324 struct iso_resource *r; 1325 int ret; 1326 1327 if ((request->channels == 0 && request->bandwidth == 0) || 1328 request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL || 1329 request->bandwidth < 0) 1330 return -EINVAL; 1331 1332 r = kmalloc(sizeof(*r), GFP_KERNEL); 1333 e1 = kmalloc(sizeof(*e1), GFP_KERNEL); 1334 e2 = kmalloc(sizeof(*e2), GFP_KERNEL); 1335 if (r == NULL || e1 == NULL || e2 == NULL) { 1336 ret = -ENOMEM; 1337 goto fail; 1338 } 1339 1340 INIT_DELAYED_WORK(&r->work, iso_resource_work); 1341 r->client = client; 1342 r->todo = todo; 1343 r->generation = -1; 1344 r->channels = request->channels; 1345 r->bandwidth = request->bandwidth; 1346 r->e_alloc = e1; 1347 r->e_dealloc = e2; 1348 1349 e1->iso_resource.closure = request->closure; 1350 e1->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED; 1351 e2->iso_resource.closure = request->closure; 1352 e2->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED; 1353 1354 if (todo == ISO_RES_ALLOC) { 1355 r->resource.release = release_iso_resource; 1356 ret = add_client_resource(client, &r->resource, GFP_KERNEL); 1357 if (ret < 0) 1358 goto fail; 1359 } else { 1360 r->resource.release = NULL; 1361 r->resource.handle = -1; 1362 schedule_iso_resource(r, 0); 1363 } 1364 request->handle = r->resource.handle; 1365 1366 return 0; 1367 fail: 1368 kfree(r); 1369 kfree(e1); 1370 kfree(e2); 1371 1372 return ret; 1373} 1374 1375static int ioctl_allocate_iso_resource(struct client *client, 1376 union ioctl_arg *arg) 1377{ 1378 return init_iso_resource(client, 1379 &arg->allocate_iso_resource, ISO_RES_ALLOC); 1380} 1381 1382static int ioctl_deallocate_iso_resource(struct client *client, 1383 union ioctl_arg *arg) 1384{ 1385 return release_client_resource(client, 1386 arg->deallocate.handle, release_iso_resource, NULL); 1387} 1388 1389static int ioctl_allocate_iso_resource_once(struct client *client, 1390 union ioctl_arg *arg) 1391{ 1392 return init_iso_resource(client, 1393 &arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE); 1394} 1395 1396static int ioctl_deallocate_iso_resource_once(struct client *client, 1397 union ioctl_arg *arg) 1398{ 1399 return init_iso_resource(client, 1400 &arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE); 1401} 1402 1403/* 1404 * Returns a speed code: Maximum speed to or from this device, 1405 * limited by the device's link speed, the local node's link speed, 1406 * and all PHY port speeds between the two links. 1407 */ 1408static int ioctl_get_speed(struct client *client, union ioctl_arg *arg) 1409{ 1410 return client->device->max_speed; 1411} 1412 1413static int ioctl_send_broadcast_request(struct client *client, 1414 union ioctl_arg *arg) 1415{ 1416 struct fw_cdev_send_request *a = &arg->send_request; 1417 1418 switch (a->tcode) { 1419 case TCODE_WRITE_QUADLET_REQUEST: 1420 case TCODE_WRITE_BLOCK_REQUEST: 1421 break; 1422 default: 1423 return -EINVAL; 1424 } 1425 1426 /* Security policy: Only allow accesses to Units Space. */ 1427 if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END) 1428 return -EACCES; 1429 1430 return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100); 1431} 1432 1433static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg) 1434{ 1435 struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet; 1436 struct fw_cdev_send_request request; 1437 int dest; 1438 1439 if (a->speed > client->device->card->link_speed || 1440 a->length > 1024 << a->speed) 1441 return -EIO; 1442 1443 if (a->tag > 3 || a->channel > 63 || a->sy > 15) 1444 return -EINVAL; 1445 1446 dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy); 1447 request.tcode = TCODE_STREAM_DATA; 1448 request.length = a->length; 1449 request.closure = a->closure; 1450 request.data = a->data; 1451 request.generation = a->generation; 1452 1453 return init_request(client, &request, dest, a->speed); 1454} 1455 1456static void outbound_phy_packet_callback(struct fw_packet *packet, 1457 struct fw_card *card, int status) 1458{ 1459 struct outbound_phy_packet_event *e = 1460 container_of(packet, struct outbound_phy_packet_event, p); 1461 1462 switch (status) { 1463 /* expected: */ 1464 case ACK_COMPLETE: e->phy_packet.rcode = RCODE_COMPLETE; break; 1465 /* should never happen with PHY packets: */ 1466 case ACK_PENDING: e->phy_packet.rcode = RCODE_COMPLETE; break; 1467 case ACK_BUSY_X: 1468 case ACK_BUSY_A: 1469 case ACK_BUSY_B: e->phy_packet.rcode = RCODE_BUSY; break; 1470 case ACK_DATA_ERROR: e->phy_packet.rcode = RCODE_DATA_ERROR; break; 1471 case ACK_TYPE_ERROR: e->phy_packet.rcode = RCODE_TYPE_ERROR; break; 1472 /* stale generation; cancelled; on certain controllers: no ack */ 1473 default: e->phy_packet.rcode = status; break; 1474 } 1475 e->phy_packet.data[0] = packet->timestamp; 1476 1477 queue_event(e->client, &e->event, &e->phy_packet, 1478 sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0); 1479 client_put(e->client); 1480} 1481 1482static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg) 1483{ 1484 struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet; 1485 struct fw_card *card = client->device->card; 1486 struct outbound_phy_packet_event *e; 1487 1488 /* Access policy: Allow this ioctl only on local nodes' device files. */ 1489 if (!client->device->is_local) 1490 return -ENOSYS; 1491 1492 e = kzalloc(sizeof(*e) + 4, GFP_KERNEL); 1493 if (e == NULL) 1494 return -ENOMEM; 1495 1496 client_get(client); 1497 e->client = client; 1498 e->p.speed = SCODE_100; 1499 e->p.generation = a->generation; 1500 e->p.header[0] = a->data[0]; 1501 e->p.header[1] = a->data[1]; 1502 e->p.header_length = 8; 1503 e->p.callback = outbound_phy_packet_callback; 1504 e->phy_packet.closure = a->closure; 1505 e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_SENT; 1506 if (is_ping_packet(a->data)) 1507 e->phy_packet.length = 4; 1508 1509 card->driver->send_request(card, &e->p); 1510 1511 return 0; 1512} 1513 1514static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg) 1515{ 1516 struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets; 1517 struct fw_card *card = client->device->card; 1518 1519 /* Access policy: Allow this ioctl only on local nodes' device files. */ 1520 if (!client->device->is_local) 1521 return -ENOSYS; 1522 1523 spin_lock_irq(&card->lock); 1524 1525 list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list); 1526 client->phy_receiver_closure = a->closure; 1527 1528 spin_unlock_irq(&card->lock); 1529 1530 return 0; 1531} 1532 1533void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p) 1534{ 1535 struct client *client; 1536 struct inbound_phy_packet_event *e; 1537 unsigned long flags; 1538 1539 spin_lock_irqsave(&card->lock, flags); 1540 1541 list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) { 1542 e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC); 1543 if (e == NULL) { 1544 fw_notify("Out of memory when allocating event\n"); 1545 break; 1546 } 1547 e->phy_packet.closure = client->phy_receiver_closure; 1548 e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED; 1549 e->phy_packet.rcode = RCODE_COMPLETE; 1550 e->phy_packet.length = 8; 1551 e->phy_packet.data[0] = p->header[1]; 1552 e->phy_packet.data[1] = p->header[2]; 1553 queue_event(client, &e->event, 1554 &e->phy_packet, sizeof(e->phy_packet) + 8, NULL, 0); 1555 } 1556 1557 spin_unlock_irqrestore(&card->lock, flags); 1558} 1559 1560static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = { 1561 [0x00] = ioctl_get_info, 1562 [0x01] = ioctl_send_request, 1563 [0x02] = ioctl_allocate, 1564 [0x03] = ioctl_deallocate, 1565 [0x04] = ioctl_send_response, 1566 [0x05] = ioctl_initiate_bus_reset, 1567 [0x06] = ioctl_add_descriptor, 1568 [0x07] = ioctl_remove_descriptor, 1569 [0x08] = ioctl_create_iso_context, 1570 [0x09] = ioctl_queue_iso, 1571 [0x0a] = ioctl_start_iso, 1572 [0x0b] = ioctl_stop_iso, 1573 [0x0c] = ioctl_get_cycle_timer, 1574 [0x0d] = ioctl_allocate_iso_resource, 1575 [0x0e] = ioctl_deallocate_iso_resource, 1576 [0x0f] = ioctl_allocate_iso_resource_once, 1577 [0x10] = ioctl_deallocate_iso_resource_once, 1578 [0x11] = ioctl_get_speed, 1579 [0x12] = ioctl_send_broadcast_request, 1580 [0x13] = ioctl_send_stream_packet, 1581 [0x14] = ioctl_get_cycle_timer2, 1582 [0x15] = ioctl_send_phy_packet, 1583 [0x16] = ioctl_receive_phy_packets, 1584 [0x17] = ioctl_set_iso_channels, 1585}; 1586 1587static int dispatch_ioctl(struct client *client, 1588 unsigned int cmd, void __user *arg) 1589{ 1590 union ioctl_arg buffer; 1591 int ret; 1592 1593 if (fw_device_is_shutdown(client->device)) 1594 return -ENODEV; 1595 1596 if (_IOC_TYPE(cmd) != '#' || 1597 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) || 1598 _IOC_SIZE(cmd) > sizeof(buffer)) 1599 return -EINVAL; 1600 1601 if (_IOC_DIR(cmd) == _IOC_READ) 1602 memset(&buffer, 0, _IOC_SIZE(cmd)); 1603 1604 if (_IOC_DIR(cmd) & _IOC_WRITE) 1605 if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd))) 1606 return -EFAULT; 1607 1608 ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer); 1609 if (ret < 0) 1610 return ret; 1611 1612 if (_IOC_DIR(cmd) & _IOC_READ) 1613 if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd))) 1614 return -EFAULT; 1615 1616 return ret; 1617} 1618 1619static long fw_device_op_ioctl(struct file *file, 1620 unsigned int cmd, unsigned long arg) 1621{ 1622 return dispatch_ioctl(file->private_data, cmd, (void __user *)arg); 1623} 1624 1625#ifdef CONFIG_COMPAT 1626static long fw_device_op_compat_ioctl(struct file *file, 1627 unsigned int cmd, unsigned long arg) 1628{ 1629 return dispatch_ioctl(file->private_data, cmd, compat_ptr(arg)); 1630} 1631#endif 1632 1633static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) 1634{ 1635 struct client *client = file->private_data; 1636 enum dma_data_direction direction; 1637 unsigned long size; 1638 int page_count, ret; 1639 1640 if (fw_device_is_shutdown(client->device)) 1641 return -ENODEV; 1642 1643 if (client->buffer.pages != NULL) 1644 return -EBUSY; 1645 1646 if (!(vma->vm_flags & VM_SHARED)) 1647 return -EINVAL; 1648 1649 if (vma->vm_start & ~PAGE_MASK) 1650 return -EINVAL; 1651 1652 client->vm_start = vma->vm_start; 1653 size = vma->vm_end - vma->vm_start; 1654 page_count = size >> PAGE_SHIFT; 1655 if (size & ~PAGE_MASK) 1656 return -EINVAL; 1657 1658 if (vma->vm_flags & VM_WRITE) 1659 direction = DMA_TO_DEVICE; 1660 else 1661 direction = DMA_FROM_DEVICE; 1662 1663 ret = fw_iso_buffer_init(&client->buffer, client->device->card, 1664 page_count, direction); 1665 if (ret < 0) 1666 return ret; 1667 1668 ret = fw_iso_buffer_map(&client->buffer, vma); 1669 if (ret < 0) 1670 fw_iso_buffer_destroy(&client->buffer, client->device->card); 1671 1672 return ret; 1673} 1674 1675static int shutdown_resource(int id, void *p, void *data) 1676{ 1677 struct client_resource *resource = p; 1678 struct client *client = data; 1679 1680 resource->release(client, resource); 1681 client_put(client); 1682 1683 return 0; 1684} 1685 1686static int fw_device_op_release(struct inode *inode, struct file *file) 1687{ 1688 struct client *client = file->private_data; 1689 struct event *event, *next_event; 1690 1691 spin_lock_irq(&client->device->card->lock); 1692 list_del(&client->phy_receiver_link); 1693 spin_unlock_irq(&client->device->card->lock); 1694 1695 mutex_lock(&client->device->client_list_mutex); 1696 list_del(&client->link); 1697 mutex_unlock(&client->device->client_list_mutex); 1698 1699 if (client->iso_context) 1700 fw_iso_context_destroy(client->iso_context); 1701 1702 if (client->buffer.pages) 1703 fw_iso_buffer_destroy(&client->buffer, client->device->card); 1704 1705 /* Freeze client->resource_idr and client->event_list */ 1706 spin_lock_irq(&client->lock); 1707 client->in_shutdown = true; 1708 spin_unlock_irq(&client->lock); 1709 1710 idr_for_each(&client->resource_idr, shutdown_resource, client); 1711 idr_remove_all(&client->resource_idr); 1712 idr_destroy(&client->resource_idr); 1713 1714 list_for_each_entry_safe(event, next_event, &client->event_list, link) 1715 kfree(event); 1716 1717 client_put(client); 1718 1719 return 0; 1720} 1721 1722static unsigned int fw_device_op_poll(struct file *file, poll_table * pt) 1723{ 1724 struct client *client = file->private_data; 1725 unsigned int mask = 0; 1726 1727 poll_wait(file, &client->wait, pt); 1728 1729 if (fw_device_is_shutdown(client->device)) 1730 mask |= POLLHUP | POLLERR; 1731 if (!list_empty(&client->event_list)) 1732 mask |= POLLIN | POLLRDNORM; 1733 1734 return mask; 1735} 1736 1737const struct file_operations fw_device_ops = { 1738 .owner = THIS_MODULE, 1739 .llseek = no_llseek, 1740 .open = fw_device_op_open, 1741 .read = fw_device_op_read, 1742 .unlocked_ioctl = fw_device_op_ioctl, 1743 .mmap = fw_device_op_mmap, 1744 .release = fw_device_op_release, 1745 .poll = fw_device_op_poll, 1746#ifdef CONFIG_COMPAT 1747 .compat_ioctl = fw_device_op_compat_ioctl, 1748#endif 1749}; 1750