1/* 2 * The USB Monitor, inspired by Dave Harding's USBMon. 3 * 4 * This is a binary format reader. 5 * 6 * Copyright (C) 2006 Paolo Abeni (paolo.abeni@email.it) 7 * Copyright (C) 2006 Pete Zaitcev (zaitcev@redhat.com) 8 */ 9 10#include <linux/kernel.h> 11#include <linux/types.h> 12#include <linux/fs.h> 13#include <linux/cdev.h> 14#include <linux/usb.h> 15#include <linux/poll.h> 16#include <linux/compat.h> 17#include <linux/mm.h> 18 19#include <asm/uaccess.h> 20 21#include "usb_mon.h" 22 23/* 24 * Defined by USB 2.0 clause 9.3, table 9.2. 25 */ 26#define SETUP_LEN 8 27 28/* ioctl macros */ 29#define MON_IOC_MAGIC 0x92 30 31#define MON_IOCQ_URB_LEN _IO(MON_IOC_MAGIC, 1) 32/* #2 used to be MON_IOCX_URB, removed before it got into Linus tree */ 33#define MON_IOCG_STATS _IOR(MON_IOC_MAGIC, 3, struct mon_bin_stats) 34#define MON_IOCT_RING_SIZE _IO(MON_IOC_MAGIC, 4) 35#define MON_IOCQ_RING_SIZE _IO(MON_IOC_MAGIC, 5) 36#define MON_IOCX_GET _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get) 37#define MON_IOCX_MFETCH _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch) 38#define MON_IOCH_MFLUSH _IO(MON_IOC_MAGIC, 8) 39#ifdef CONFIG_COMPAT 40#define MON_IOCX_GET32 _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get32) 41#define MON_IOCX_MFETCH32 _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch32) 42#endif 43 44/* 45 * Some architectures have enormous basic pages (16KB for ia64, 64KB for ppc). 46 * But it's all right. Just use a simple way to make sure the chunk is never 47 * smaller than a page. 48 * 49 * N.B. An application does not know our chunk size. 50 * 51 * Woops, get_zeroed_page() returns a single page. I guess we're stuck with 52 * page-sized chunks for the time being. 53 */ 54#define CHUNK_SIZE PAGE_SIZE 55#define CHUNK_ALIGN(x) (((x)+CHUNK_SIZE-1) & ~(CHUNK_SIZE-1)) 56 57#define BUFF_MAX CHUNK_ALIGN(1200*1024) 58#define BUFF_DFL CHUNK_ALIGN(300*1024) 59#define BUFF_MIN CHUNK_ALIGN(8*1024) 60 61/* 62 * The per-event API header (2 per URB). 63 * 64 * This structure is seen in userland as defined by the documentation. 65 */ 66struct mon_bin_hdr { 67 u64 id; /* URB ID - from submission to callback */ 68 unsigned char type; /* Same as in text API; extensible. */ 69 unsigned char xfer_type; /* ISO, Intr, Control, Bulk */ 70 unsigned char epnum; /* Endpoint number and transfer direction */ 71 unsigned char devnum; /* Device address */ 72 unsigned short busnum; /* Bus number */ 73 char flag_setup; 74 char flag_data; 75 s64 ts_sec; /* gettimeofday */ 76 s32 ts_usec; /* gettimeofday */ 77 int status; 78 unsigned int len_urb; /* Length of data (submitted or actual) */ 79 unsigned int len_cap; /* Delivered length */ 80 unsigned char setup[SETUP_LEN]; /* Only for Control S-type */ 81}; 82 83/* per file statistic */ 84struct mon_bin_stats { 85 u32 queued; 86 u32 dropped; 87}; 88 89struct mon_bin_get { 90 struct mon_bin_hdr __user *hdr; /* Only 48 bytes, not 64. */ 91 void __user *data; 92 size_t alloc; /* Length of data (can be zero) */ 93}; 94 95struct mon_bin_mfetch { 96 u32 __user *offvec; /* Vector of events fetched */ 97 u32 nfetch; /* Number of events to fetch (out: fetched) */ 98 u32 nflush; /* Number of events to flush */ 99}; 100 101#ifdef CONFIG_COMPAT 102struct mon_bin_get32 { 103 u32 hdr32; 104 u32 data32; 105 u32 alloc32; 106}; 107 108struct mon_bin_mfetch32 { 109 u32 offvec32; 110 u32 nfetch32; 111 u32 nflush32; 112}; 113#endif 114 115/* Having these two values same prevents wrapping of the mon_bin_hdr */ 116#define PKT_ALIGN 64 117#define PKT_SIZE 64 118 119/* max number of USB bus supported */ 120#define MON_BIN_MAX_MINOR 128 121 122/* 123 * The buffer: map of used pages. 124 */ 125struct mon_pgmap { 126 struct page *pg; 127 unsigned char *ptr; 128}; 129 130/* 131 * This gets associated with an open file struct. 132 */ 133struct mon_reader_bin { 134 /* The buffer: one per open. */ 135 spinlock_t b_lock; /* Protect b_cnt, b_in */ 136 unsigned int b_size; /* Current size of the buffer - bytes */ 137 unsigned int b_cnt; /* Bytes used */ 138 unsigned int b_in, b_out; /* Offsets into buffer - bytes */ 139 unsigned int b_read; /* Amount of read data in curr. pkt. */ 140 struct mon_pgmap *b_vec; /* The map array */ 141 wait_queue_head_t b_wait; /* Wait for data here */ 142 143 struct mutex fetch_lock; /* Protect b_read, b_out */ 144 int mmap_active; 145 146 /* A list of these is needed for "bus 0". Some time later. */ 147 struct mon_reader r; 148 149 /* Stats */ 150 unsigned int cnt_lost; 151}; 152 153static inline struct mon_bin_hdr *MON_OFF2HDR(const struct mon_reader_bin *rp, 154 unsigned int offset) 155{ 156 return (struct mon_bin_hdr *) 157 (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE); 158} 159 160#define MON_RING_EMPTY(rp) ((rp)->b_cnt == 0) 161 162static dev_t mon_bin_dev0; 163static struct cdev mon_bin_cdev; 164 165static void mon_buff_area_fill(const struct mon_reader_bin *rp, 166 unsigned int offset, unsigned int size); 167static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp); 168static int mon_alloc_buff(struct mon_pgmap *map, int npages); 169static void mon_free_buff(struct mon_pgmap *map, int npages); 170 171/* 172 * This is a "chunked memcpy". It does not manipulate any counters. 173 * But it returns the new offset for repeated application. 174 */ 175unsigned int mon_copy_to_buff(const struct mon_reader_bin *this, 176 unsigned int off, const unsigned char *from, unsigned int length) 177{ 178 unsigned int step_len; 179 unsigned char *buf; 180 unsigned int in_page; 181 182 while (length) { 183 /* 184 * Determine step_len. 185 */ 186 step_len = length; 187 in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1)); 188 if (in_page < step_len) 189 step_len = in_page; 190 191 /* 192 * Copy data and advance pointers. 193 */ 194 buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE; 195 memcpy(buf, from, step_len); 196 if ((off += step_len) >= this->b_size) off = 0; 197 from += step_len; 198 length -= step_len; 199 } 200 return off; 201} 202 203/* 204 * This is a little worse than the above because it's "chunked copy_to_user". 205 * The return value is an error code, not an offset. 206 */ 207static int copy_from_buf(const struct mon_reader_bin *this, unsigned int off, 208 char __user *to, int length) 209{ 210 unsigned int step_len; 211 unsigned char *buf; 212 unsigned int in_page; 213 214 while (length) { 215 /* 216 * Determine step_len. 217 */ 218 step_len = length; 219 in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1)); 220 if (in_page < step_len) 221 step_len = in_page; 222 223 /* 224 * Copy data and advance pointers. 225 */ 226 buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE; 227 if (copy_to_user(to, buf, step_len)) 228 return -EINVAL; 229 if ((off += step_len) >= this->b_size) off = 0; 230 to += step_len; 231 length -= step_len; 232 } 233 return 0; 234} 235 236/* 237 * Allocate an (aligned) area in the buffer. 238 * This is called under b_lock. 239 * Returns ~0 on failure. 240 */ 241static unsigned int mon_buff_area_alloc(struct mon_reader_bin *rp, 242 unsigned int size) 243{ 244 unsigned int offset; 245 246 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 247 if (rp->b_cnt + size > rp->b_size) 248 return ~0; 249 offset = rp->b_in; 250 rp->b_cnt += size; 251 if ((rp->b_in += size) >= rp->b_size) 252 rp->b_in -= rp->b_size; 253 return offset; 254} 255 256/* 257 * This is the same thing as mon_buff_area_alloc, only it does not allow 258 * buffers to wrap. This is needed by applications which pass references 259 * into mmap-ed buffers up their stacks (libpcap can do that). 260 * 261 * Currently, we always have the header stuck with the data, although 262 * it is not strictly speaking necessary. 263 * 264 * When a buffer would wrap, we place a filler packet to mark the space. 265 */ 266static unsigned int mon_buff_area_alloc_contiguous(struct mon_reader_bin *rp, 267 unsigned int size) 268{ 269 unsigned int offset; 270 unsigned int fill_size; 271 272 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 273 if (rp->b_cnt + size > rp->b_size) 274 return ~0; 275 if (rp->b_in + size > rp->b_size) { 276 /* 277 * This would wrap. Find if we still have space after 278 * skipping to the end of the buffer. If we do, place 279 * a filler packet and allocate a new packet. 280 */ 281 fill_size = rp->b_size - rp->b_in; 282 if (rp->b_cnt + size + fill_size > rp->b_size) 283 return ~0; 284 mon_buff_area_fill(rp, rp->b_in, fill_size); 285 286 offset = 0; 287 rp->b_in = size; 288 rp->b_cnt += size + fill_size; 289 } else if (rp->b_in + size == rp->b_size) { 290 offset = rp->b_in; 291 rp->b_in = 0; 292 rp->b_cnt += size; 293 } else { 294 offset = rp->b_in; 295 rp->b_in += size; 296 rp->b_cnt += size; 297 } 298 return offset; 299} 300 301/* 302 * Return a few (kilo-)bytes to the head of the buffer. 303 * This is used if a DMA fetch fails. 304 */ 305static void mon_buff_area_shrink(struct mon_reader_bin *rp, unsigned int size) 306{ 307 308 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 309 rp->b_cnt -= size; 310 if (rp->b_in < size) 311 rp->b_in += rp->b_size; 312 rp->b_in -= size; 313} 314 315/* 316 * This has to be called under both b_lock and fetch_lock, because 317 * it accesses both b_cnt and b_out. 318 */ 319static void mon_buff_area_free(struct mon_reader_bin *rp, unsigned int size) 320{ 321 322 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 323 rp->b_cnt -= size; 324 if ((rp->b_out += size) >= rp->b_size) 325 rp->b_out -= rp->b_size; 326} 327 328static void mon_buff_area_fill(const struct mon_reader_bin *rp, 329 unsigned int offset, unsigned int size) 330{ 331 struct mon_bin_hdr *ep; 332 333 ep = MON_OFF2HDR(rp, offset); 334 memset(ep, 0, PKT_SIZE); 335 ep->type = '@'; 336 ep->len_cap = size - PKT_SIZE; 337} 338 339static inline char mon_bin_get_setup(unsigned char *setupb, 340 const struct urb *urb, char ev_type) 341{ 342 343 if (!usb_pipecontrol(urb->pipe) || ev_type != 'S') 344 return '-'; 345 346 if (urb->dev->bus->uses_dma && 347 (urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) { 348 return mon_dmapeek(setupb, urb->setup_dma, SETUP_LEN); 349 } 350 if (urb->setup_packet == NULL) 351 return 'Z'; 352 353 memcpy(setupb, urb->setup_packet, SETUP_LEN); 354 return 0; 355} 356 357static char mon_bin_get_data(const struct mon_reader_bin *rp, 358 unsigned int offset, struct urb *urb, unsigned int length) 359{ 360 361 if (urb->dev->bus->uses_dma && 362 (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) { 363 mon_dmapeek_vec(rp, offset, urb->transfer_dma, length); 364 return 0; 365 } 366 367 if (urb->transfer_buffer == NULL) 368 return 'Z'; 369 370 mon_copy_to_buff(rp, offset, urb->transfer_buffer, length); 371 return 0; 372} 373 374static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb, 375 char ev_type) 376{ 377 unsigned long flags; 378 struct timeval ts; 379 unsigned int urb_length; 380 unsigned int offset; 381 unsigned int length; 382 struct mon_bin_hdr *ep; 383 char data_tag = 0; 384 385 do_gettimeofday(&ts); 386 387 spin_lock_irqsave(&rp->b_lock, flags); 388 389 /* 390 * Find the maximum allowable length, then allocate space. 391 */ 392 urb_length = (ev_type == 'S') ? 393 urb->transfer_buffer_length : urb->actual_length; 394 length = urb_length; 395 396 if (length >= rp->b_size/5) 397 length = rp->b_size/5; 398 399 if (usb_pipein(urb->pipe)) { 400 if (ev_type == 'S') { 401 length = 0; 402 data_tag = '<'; 403 } 404 } else { 405 if (ev_type == 'C') { 406 length = 0; 407 data_tag = '>'; 408 } 409 } 410 411 if (rp->mmap_active) 412 offset = mon_buff_area_alloc_contiguous(rp, length + PKT_SIZE); 413 else 414 offset = mon_buff_area_alloc(rp, length + PKT_SIZE); 415 if (offset == ~0) { 416 rp->cnt_lost++; 417 spin_unlock_irqrestore(&rp->b_lock, flags); 418 return; 419 } 420 421 ep = MON_OFF2HDR(rp, offset); 422 if ((offset += PKT_SIZE) >= rp->b_size) offset = 0; 423 424 /* 425 * Fill the allocated area. 426 */ 427 memset(ep, 0, PKT_SIZE); 428 ep->type = ev_type; 429 ep->xfer_type = usb_pipetype(urb->pipe); 430 /* We use the fact that usb_pipein() returns 0x80 */ 431 ep->epnum = usb_pipeendpoint(urb->pipe) | usb_pipein(urb->pipe); 432 ep->devnum = usb_pipedevice(urb->pipe); 433 ep->busnum = urb->dev->bus->busnum; 434 ep->id = (unsigned long) urb; 435 ep->ts_sec = ts.tv_sec; 436 ep->ts_usec = ts.tv_usec; 437 ep->status = urb->status; 438 ep->len_urb = urb_length; 439 ep->len_cap = length; 440 441 ep->flag_setup = mon_bin_get_setup(ep->setup, urb, ev_type); 442 if (length != 0) { 443 ep->flag_data = mon_bin_get_data(rp, offset, urb, length); 444 if (ep->flag_data != 0) { /* Yes, it's 0x00, not '0' */ 445 ep->len_cap = 0; 446 mon_buff_area_shrink(rp, length); 447 } 448 } else { 449 ep->flag_data = data_tag; 450 } 451 452 spin_unlock_irqrestore(&rp->b_lock, flags); 453 454 wake_up(&rp->b_wait); 455} 456 457static void mon_bin_submit(void *data, struct urb *urb) 458{ 459 struct mon_reader_bin *rp = data; 460 mon_bin_event(rp, urb, 'S'); 461} 462 463static void mon_bin_complete(void *data, struct urb *urb) 464{ 465 struct mon_reader_bin *rp = data; 466 mon_bin_event(rp, urb, 'C'); 467} 468 469static void mon_bin_error(void *data, struct urb *urb, int error) 470{ 471 struct mon_reader_bin *rp = data; 472 unsigned long flags; 473 unsigned int offset; 474 struct mon_bin_hdr *ep; 475 476 spin_lock_irqsave(&rp->b_lock, flags); 477 478 offset = mon_buff_area_alloc(rp, PKT_SIZE); 479 if (offset == ~0) { 480 /* Not incrementing cnt_lost. Just because. */ 481 spin_unlock_irqrestore(&rp->b_lock, flags); 482 return; 483 } 484 485 ep = MON_OFF2HDR(rp, offset); 486 487 memset(ep, 0, PKT_SIZE); 488 ep->type = 'E'; 489 ep->xfer_type = usb_pipetype(urb->pipe); 490 /* We use the fact that usb_pipein() returns 0x80 */ 491 ep->epnum = usb_pipeendpoint(urb->pipe) | usb_pipein(urb->pipe); 492 ep->devnum = usb_pipedevice(urb->pipe); 493 ep->busnum = urb->dev->bus->busnum; 494 ep->id = (unsigned long) urb; 495 ep->status = error; 496 497 ep->flag_setup = '-'; 498 ep->flag_data = 'E'; 499 500 spin_unlock_irqrestore(&rp->b_lock, flags); 501 502 wake_up(&rp->b_wait); 503} 504 505static int mon_bin_open(struct inode *inode, struct file *file) 506{ 507 struct mon_bus *mbus; 508 struct mon_reader_bin *rp; 509 size_t size; 510 int rc; 511 512 mutex_lock(&mon_lock); 513 if ((mbus = mon_bus_lookup(iminor(inode))) == NULL) { 514 mutex_unlock(&mon_lock); 515 return -ENODEV; 516 } 517 if (mbus != &mon_bus0 && mbus->u_bus == NULL) { 518 printk(KERN_ERR TAG ": consistency error on open\n"); 519 mutex_unlock(&mon_lock); 520 return -ENODEV; 521 } 522 523 rp = kzalloc(sizeof(struct mon_reader_bin), GFP_KERNEL); 524 if (rp == NULL) { 525 rc = -ENOMEM; 526 goto err_alloc; 527 } 528 spin_lock_init(&rp->b_lock); 529 init_waitqueue_head(&rp->b_wait); 530 mutex_init(&rp->fetch_lock); 531 532 rp->b_size = BUFF_DFL; 533 534 size = sizeof(struct mon_pgmap) * (rp->b_size/CHUNK_SIZE); 535 if ((rp->b_vec = kzalloc(size, GFP_KERNEL)) == NULL) { 536 rc = -ENOMEM; 537 goto err_allocvec; 538 } 539 540 if ((rc = mon_alloc_buff(rp->b_vec, rp->b_size/CHUNK_SIZE)) < 0) 541 goto err_allocbuff; 542 543 rp->r.m_bus = mbus; 544 rp->r.r_data = rp; 545 rp->r.rnf_submit = mon_bin_submit; 546 rp->r.rnf_error = mon_bin_error; 547 rp->r.rnf_complete = mon_bin_complete; 548 549 mon_reader_add(mbus, &rp->r); 550 551 file->private_data = rp; 552 mutex_unlock(&mon_lock); 553 return 0; 554 555err_allocbuff: 556 kfree(rp->b_vec); 557err_allocvec: 558 kfree(rp); 559err_alloc: 560 mutex_unlock(&mon_lock); 561 return rc; 562} 563 564/* 565 * Extract an event from buffer and copy it to user space. 566 * Wait if there is no event ready. 567 * Returns zero or error. 568 */ 569static int mon_bin_get_event(struct file *file, struct mon_reader_bin *rp, 570 struct mon_bin_hdr __user *hdr, void __user *data, unsigned int nbytes) 571{ 572 unsigned long flags; 573 struct mon_bin_hdr *ep; 574 size_t step_len; 575 unsigned int offset; 576 int rc; 577 578 mutex_lock(&rp->fetch_lock); 579 580 if ((rc = mon_bin_wait_event(file, rp)) < 0) { 581 mutex_unlock(&rp->fetch_lock); 582 return rc; 583 } 584 585 ep = MON_OFF2HDR(rp, rp->b_out); 586 587 if (copy_to_user(hdr, ep, sizeof(struct mon_bin_hdr))) { 588 mutex_unlock(&rp->fetch_lock); 589 return -EFAULT; 590 } 591 592 step_len = min(ep->len_cap, nbytes); 593 if ((offset = rp->b_out + PKT_SIZE) >= rp->b_size) offset = 0; 594 595 if (copy_from_buf(rp, offset, data, step_len)) { 596 mutex_unlock(&rp->fetch_lock); 597 return -EFAULT; 598 } 599 600 spin_lock_irqsave(&rp->b_lock, flags); 601 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); 602 spin_unlock_irqrestore(&rp->b_lock, flags); 603 rp->b_read = 0; 604 605 mutex_unlock(&rp->fetch_lock); 606 return 0; 607} 608 609static int mon_bin_release(struct inode *inode, struct file *file) 610{ 611 struct mon_reader_bin *rp = file->private_data; 612 struct mon_bus* mbus = rp->r.m_bus; 613 614 mutex_lock(&mon_lock); 615 616 if (mbus->nreaders <= 0) { 617 printk(KERN_ERR TAG ": consistency error on close\n"); 618 mutex_unlock(&mon_lock); 619 return 0; 620 } 621 mon_reader_del(mbus, &rp->r); 622 623 mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE); 624 kfree(rp->b_vec); 625 kfree(rp); 626 627 mutex_unlock(&mon_lock); 628 return 0; 629} 630 631static ssize_t mon_bin_read(struct file *file, char __user *buf, 632 size_t nbytes, loff_t *ppos) 633{ 634 struct mon_reader_bin *rp = file->private_data; 635 unsigned long flags; 636 struct mon_bin_hdr *ep; 637 unsigned int offset; 638 size_t step_len; 639 char *ptr; 640 ssize_t done = 0; 641 int rc; 642 643 mutex_lock(&rp->fetch_lock); 644 645 if ((rc = mon_bin_wait_event(file, rp)) < 0) { 646 mutex_unlock(&rp->fetch_lock); 647 return rc; 648 } 649 650 ep = MON_OFF2HDR(rp, rp->b_out); 651 652 if (rp->b_read < sizeof(struct mon_bin_hdr)) { 653 step_len = min(nbytes, sizeof(struct mon_bin_hdr) - rp->b_read); 654 ptr = ((char *)ep) + rp->b_read; 655 if (step_len && copy_to_user(buf, ptr, step_len)) { 656 mutex_unlock(&rp->fetch_lock); 657 return -EFAULT; 658 } 659 nbytes -= step_len; 660 buf += step_len; 661 rp->b_read += step_len; 662 done += step_len; 663 } 664 665 if (rp->b_read >= sizeof(struct mon_bin_hdr)) { 666 step_len = min(nbytes, (size_t)ep->len_cap); 667 offset = rp->b_out + PKT_SIZE; 668 offset += rp->b_read - sizeof(struct mon_bin_hdr); 669 if (offset >= rp->b_size) 670 offset -= rp->b_size; 671 if (copy_from_buf(rp, offset, buf, step_len)) { 672 mutex_unlock(&rp->fetch_lock); 673 return -EFAULT; 674 } 675 nbytes -= step_len; 676 buf += step_len; 677 rp->b_read += step_len; 678 done += step_len; 679 } 680 681 /* 682 * Check if whole packet was read, and if so, jump to the next one. 683 */ 684 if (rp->b_read >= sizeof(struct mon_bin_hdr) + ep->len_cap) { 685 spin_lock_irqsave(&rp->b_lock, flags); 686 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); 687 spin_unlock_irqrestore(&rp->b_lock, flags); 688 rp->b_read = 0; 689 } 690 691 mutex_unlock(&rp->fetch_lock); 692 return done; 693} 694 695/* 696 * Remove at most nevents from chunked buffer. 697 * Returns the number of removed events. 698 */ 699static int mon_bin_flush(struct mon_reader_bin *rp, unsigned nevents) 700{ 701 unsigned long flags; 702 struct mon_bin_hdr *ep; 703 int i; 704 705 mutex_lock(&rp->fetch_lock); 706 spin_lock_irqsave(&rp->b_lock, flags); 707 for (i = 0; i < nevents; ++i) { 708 if (MON_RING_EMPTY(rp)) 709 break; 710 711 ep = MON_OFF2HDR(rp, rp->b_out); 712 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); 713 } 714 spin_unlock_irqrestore(&rp->b_lock, flags); 715 rp->b_read = 0; 716 mutex_unlock(&rp->fetch_lock); 717 return i; 718} 719 720/* 721 * Fetch at most max event offsets into the buffer and put them into vec. 722 * The events are usually freed later with mon_bin_flush. 723 * Return the effective number of events fetched. 724 */ 725static int mon_bin_fetch(struct file *file, struct mon_reader_bin *rp, 726 u32 __user *vec, unsigned int max) 727{ 728 unsigned int cur_out; 729 unsigned int bytes, avail; 730 unsigned int size; 731 unsigned int nevents; 732 struct mon_bin_hdr *ep; 733 unsigned long flags; 734 int rc; 735 736 mutex_lock(&rp->fetch_lock); 737 738 if ((rc = mon_bin_wait_event(file, rp)) < 0) { 739 mutex_unlock(&rp->fetch_lock); 740 return rc; 741 } 742 743 spin_lock_irqsave(&rp->b_lock, flags); 744 avail = rp->b_cnt; 745 spin_unlock_irqrestore(&rp->b_lock, flags); 746 747 cur_out = rp->b_out; 748 nevents = 0; 749 bytes = 0; 750 while (bytes < avail) { 751 if (nevents >= max) 752 break; 753 754 ep = MON_OFF2HDR(rp, cur_out); 755 if (put_user(cur_out, &vec[nevents])) { 756 mutex_unlock(&rp->fetch_lock); 757 return -EFAULT; 758 } 759 760 nevents++; 761 size = ep->len_cap + PKT_SIZE; 762 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 763 if ((cur_out += size) >= rp->b_size) 764 cur_out -= rp->b_size; 765 bytes += size; 766 } 767 768 mutex_unlock(&rp->fetch_lock); 769 return nevents; 770} 771 772/* 773 * Count events. This is almost the same as the above mon_bin_fetch, 774 * only we do not store offsets into user vector, and we have no limit. 775 */ 776static int mon_bin_queued(struct mon_reader_bin *rp) 777{ 778 unsigned int cur_out; 779 unsigned int bytes, avail; 780 unsigned int size; 781 unsigned int nevents; 782 struct mon_bin_hdr *ep; 783 unsigned long flags; 784 785 mutex_lock(&rp->fetch_lock); 786 787 spin_lock_irqsave(&rp->b_lock, flags); 788 avail = rp->b_cnt; 789 spin_unlock_irqrestore(&rp->b_lock, flags); 790 791 cur_out = rp->b_out; 792 nevents = 0; 793 bytes = 0; 794 while (bytes < avail) { 795 ep = MON_OFF2HDR(rp, cur_out); 796 797 nevents++; 798 size = ep->len_cap + PKT_SIZE; 799 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 800 if ((cur_out += size) >= rp->b_size) 801 cur_out -= rp->b_size; 802 bytes += size; 803 } 804 805 mutex_unlock(&rp->fetch_lock); 806 return nevents; 807} 808 809/* 810 */ 811static int mon_bin_ioctl(struct inode *inode, struct file *file, 812 unsigned int cmd, unsigned long arg) 813{ 814 struct mon_reader_bin *rp = file->private_data; 815 // struct mon_bus* mbus = rp->r.m_bus; 816 int ret = 0; 817 struct mon_bin_hdr *ep; 818 unsigned long flags; 819 820 switch (cmd) { 821 822 case MON_IOCQ_URB_LEN: 823 /* 824 * N.B. This only returns the size of data, without the header. 825 */ 826 spin_lock_irqsave(&rp->b_lock, flags); 827 if (!MON_RING_EMPTY(rp)) { 828 ep = MON_OFF2HDR(rp, rp->b_out); 829 ret = ep->len_cap; 830 } 831 spin_unlock_irqrestore(&rp->b_lock, flags); 832 break; 833 834 case MON_IOCQ_RING_SIZE: 835 ret = rp->b_size; 836 break; 837 838 case MON_IOCT_RING_SIZE: 839 /* 840 * Changing the buffer size will flush it's contents; the new 841 * buffer is allocated before releasing the old one to be sure 842 * the device will stay functional also in case of memory 843 * pressure. 844 */ 845 { 846 int size; 847 struct mon_pgmap *vec; 848 849 if (arg < BUFF_MIN || arg > BUFF_MAX) 850 return -EINVAL; 851 852 size = CHUNK_ALIGN(arg); 853 if ((vec = kzalloc(sizeof(struct mon_pgmap) * (size/CHUNK_SIZE), 854 GFP_KERNEL)) == NULL) { 855 ret = -ENOMEM; 856 break; 857 } 858 859 ret = mon_alloc_buff(vec, size/CHUNK_SIZE); 860 if (ret < 0) { 861 kfree(vec); 862 break; 863 } 864 865 mutex_lock(&rp->fetch_lock); 866 spin_lock_irqsave(&rp->b_lock, flags); 867 mon_free_buff(rp->b_vec, size/CHUNK_SIZE); 868 kfree(rp->b_vec); 869 rp->b_vec = vec; 870 rp->b_size = size; 871 rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0; 872 rp->cnt_lost = 0; 873 spin_unlock_irqrestore(&rp->b_lock, flags); 874 mutex_unlock(&rp->fetch_lock); 875 } 876 break; 877 878 case MON_IOCH_MFLUSH: 879 ret = mon_bin_flush(rp, arg); 880 break; 881 882 case MON_IOCX_GET: 883 { 884 struct mon_bin_get getb; 885 886 if (copy_from_user(&getb, (void __user *)arg, 887 sizeof(struct mon_bin_get))) 888 return -EFAULT; 889 890 if (getb.alloc > 0x10000000) /* Want to cast to u32 */ 891 return -EINVAL; 892 ret = mon_bin_get_event(file, rp, 893 getb.hdr, getb.data, (unsigned int)getb.alloc); 894 } 895 break; 896 897#ifdef CONFIG_COMPAT 898 case MON_IOCX_GET32: { 899 struct mon_bin_get32 getb; 900 901 if (copy_from_user(&getb, (void __user *)arg, 902 sizeof(struct mon_bin_get32))) 903 return -EFAULT; 904 905 ret = mon_bin_get_event(file, rp, 906 compat_ptr(getb.hdr32), compat_ptr(getb.data32), 907 getb.alloc32); 908 } 909 break; 910#endif 911 912 case MON_IOCX_MFETCH: 913 { 914 struct mon_bin_mfetch mfetch; 915 struct mon_bin_mfetch __user *uptr; 916 917 uptr = (struct mon_bin_mfetch __user *)arg; 918 919 if (copy_from_user(&mfetch, uptr, sizeof(mfetch))) 920 return -EFAULT; 921 922 if (mfetch.nflush) { 923 ret = mon_bin_flush(rp, mfetch.nflush); 924 if (ret < 0) 925 return ret; 926 if (put_user(ret, &uptr->nflush)) 927 return -EFAULT; 928 } 929 ret = mon_bin_fetch(file, rp, mfetch.offvec, mfetch.nfetch); 930 if (ret < 0) 931 return ret; 932 if (put_user(ret, &uptr->nfetch)) 933 return -EFAULT; 934 ret = 0; 935 } 936 break; 937 938#ifdef CONFIG_COMPAT 939 case MON_IOCX_MFETCH32: 940 { 941 struct mon_bin_mfetch32 mfetch; 942 struct mon_bin_mfetch32 __user *uptr; 943 944 uptr = (struct mon_bin_mfetch32 __user *) compat_ptr(arg); 945 946 if (copy_from_user(&mfetch, uptr, sizeof(mfetch))) 947 return -EFAULT; 948 949 if (mfetch.nflush32) { 950 ret = mon_bin_flush(rp, mfetch.nflush32); 951 if (ret < 0) 952 return ret; 953 if (put_user(ret, &uptr->nflush32)) 954 return -EFAULT; 955 } 956 ret = mon_bin_fetch(file, rp, compat_ptr(mfetch.offvec32), 957 mfetch.nfetch32); 958 if (ret < 0) 959 return ret; 960 if (put_user(ret, &uptr->nfetch32)) 961 return -EFAULT; 962 ret = 0; 963 } 964 break; 965#endif 966 967 case MON_IOCG_STATS: { 968 struct mon_bin_stats __user *sp; 969 unsigned int nevents; 970 unsigned int ndropped; 971 972 spin_lock_irqsave(&rp->b_lock, flags); 973 ndropped = rp->cnt_lost; 974 rp->cnt_lost = 0; 975 spin_unlock_irqrestore(&rp->b_lock, flags); 976 nevents = mon_bin_queued(rp); 977 978 sp = (struct mon_bin_stats __user *)arg; 979 if (put_user(rp->cnt_lost, &sp->dropped)) 980 return -EFAULT; 981 if (put_user(nevents, &sp->queued)) 982 return -EFAULT; 983 984 } 985 break; 986 987 default: 988 return -ENOTTY; 989 } 990 991 return ret; 992} 993 994static unsigned int 995mon_bin_poll(struct file *file, struct poll_table_struct *wait) 996{ 997 struct mon_reader_bin *rp = file->private_data; 998 unsigned int mask = 0; 999 unsigned long flags; 1000 1001 if (file->f_mode & FMODE_READ) 1002 poll_wait(file, &rp->b_wait, wait); 1003 1004 spin_lock_irqsave(&rp->b_lock, flags); 1005 if (!MON_RING_EMPTY(rp)) 1006 mask |= POLLIN | POLLRDNORM; /* readable */ 1007 spin_unlock_irqrestore(&rp->b_lock, flags); 1008 return mask; 1009} 1010 1011/* 1012 * open and close: just keep track of how many times the device is 1013 * mapped, to use the proper memory allocation function. 1014 */ 1015static void mon_bin_vma_open(struct vm_area_struct *vma) 1016{ 1017 struct mon_reader_bin *rp = vma->vm_private_data; 1018 rp->mmap_active++; 1019} 1020 1021static void mon_bin_vma_close(struct vm_area_struct *vma) 1022{ 1023 struct mon_reader_bin *rp = vma->vm_private_data; 1024 rp->mmap_active--; 1025} 1026 1027/* 1028 * Map ring pages to user space. 1029 */ 1030struct page *mon_bin_vma_nopage(struct vm_area_struct *vma, 1031 unsigned long address, int *type) 1032{ 1033 struct mon_reader_bin *rp = vma->vm_private_data; 1034 unsigned long offset, chunk_idx; 1035 struct page *pageptr; 1036 1037 offset = (address - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT); 1038 if (offset >= rp->b_size) 1039 return NOPAGE_SIGBUS; 1040 chunk_idx = offset / CHUNK_SIZE; 1041 pageptr = rp->b_vec[chunk_idx].pg; 1042 get_page(pageptr); 1043 if (type) 1044 *type = VM_FAULT_MINOR; 1045 return pageptr; 1046} 1047 1048struct vm_operations_struct mon_bin_vm_ops = { 1049 .open = mon_bin_vma_open, 1050 .close = mon_bin_vma_close, 1051 .nopage = mon_bin_vma_nopage, 1052}; 1053 1054int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma) 1055{ 1056 /* don't do anything here: "nopage" will set up page table entries */ 1057 vma->vm_ops = &mon_bin_vm_ops; 1058 vma->vm_flags |= VM_RESERVED; 1059 vma->vm_private_data = filp->private_data; 1060 mon_bin_vma_open(vma); 1061 return 0; 1062} 1063 1064struct file_operations mon_fops_binary = { 1065 .owner = THIS_MODULE, 1066 .open = mon_bin_open, 1067 .llseek = no_llseek, 1068 .read = mon_bin_read, 1069 /* .write = mon_text_write, */ 1070 .poll = mon_bin_poll, 1071 .ioctl = mon_bin_ioctl, 1072 .release = mon_bin_release, 1073}; 1074 1075static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp) 1076{ 1077 DECLARE_WAITQUEUE(waita, current); 1078 unsigned long flags; 1079 1080 add_wait_queue(&rp->b_wait, &waita); 1081 set_current_state(TASK_INTERRUPTIBLE); 1082 1083 spin_lock_irqsave(&rp->b_lock, flags); 1084 while (MON_RING_EMPTY(rp)) { 1085 spin_unlock_irqrestore(&rp->b_lock, flags); 1086 1087 if (file->f_flags & O_NONBLOCK) { 1088 set_current_state(TASK_RUNNING); 1089 remove_wait_queue(&rp->b_wait, &waita); 1090 return -EWOULDBLOCK; /* Same as EAGAIN in Linux */ 1091 } 1092 schedule(); 1093 if (signal_pending(current)) { 1094 remove_wait_queue(&rp->b_wait, &waita); 1095 return -EINTR; 1096 } 1097 set_current_state(TASK_INTERRUPTIBLE); 1098 1099 spin_lock_irqsave(&rp->b_lock, flags); 1100 } 1101 spin_unlock_irqrestore(&rp->b_lock, flags); 1102 1103 set_current_state(TASK_RUNNING); 1104 remove_wait_queue(&rp->b_wait, &waita); 1105 return 0; 1106} 1107 1108static int mon_alloc_buff(struct mon_pgmap *map, int npages) 1109{ 1110 int n; 1111 unsigned long vaddr; 1112 1113 for (n = 0; n < npages; n++) { 1114 vaddr = get_zeroed_page(GFP_KERNEL); 1115 if (vaddr == 0) { 1116 while (n-- != 0) 1117 free_page((unsigned long) map[n].ptr); 1118 return -ENOMEM; 1119 } 1120 map[n].ptr = (unsigned char *) vaddr; 1121 map[n].pg = virt_to_page(vaddr); 1122 } 1123 return 0; 1124} 1125 1126static void mon_free_buff(struct mon_pgmap *map, int npages) 1127{ 1128 int n; 1129 1130 for (n = 0; n < npages; n++) 1131 free_page((unsigned long) map[n].ptr); 1132} 1133 1134int __init mon_bin_init(void) 1135{ 1136 int rc; 1137 1138 rc = alloc_chrdev_region(&mon_bin_dev0, 0, MON_BIN_MAX_MINOR, "usbmon"); 1139 if (rc < 0) 1140 goto err_dev; 1141 1142 cdev_init(&mon_bin_cdev, &mon_fops_binary); 1143 mon_bin_cdev.owner = THIS_MODULE; 1144 1145 rc = cdev_add(&mon_bin_cdev, mon_bin_dev0, MON_BIN_MAX_MINOR); 1146 if (rc < 0) 1147 goto err_add; 1148 1149 return 0; 1150 1151err_add: 1152 unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR); 1153err_dev: 1154 return rc; 1155} 1156 1157void mon_bin_exit(void) 1158{ 1159 cdev_del(&mon_bin_cdev); 1160 unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR); 1161} 1162