1/* 2 * Driver for OHCI 1394 controllers 3 * 4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software Foundation, 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 */ 20 21#include <linux/bug.h> 22#include <linux/compiler.h> 23#include <linux/delay.h> 24#include <linux/device.h> 25#include <linux/dma-mapping.h> 26#include <linux/firewire.h> 27#include <linux/firewire-constants.h> 28#include <linux/init.h> 29#include <linux/interrupt.h> 30#include <linux/io.h> 31#include <linux/kernel.h> 32#include <linux/list.h> 33#include <linux/mm.h> 34#include <linux/module.h> 35#include <linux/moduleparam.h> 36#include <linux/mutex.h> 37#include <linux/pci.h> 38#include <linux/pci_ids.h> 39#include <linux/slab.h> 40#include <linux/spinlock.h> 41#include <linux/string.h> 42#include <linux/time.h> 43 44#include <asm/byteorder.h> 45#include <asm/page.h> 46#include <asm/system.h> 47 48#ifdef CONFIG_PPC_PMAC 49#include <asm/pmac_feature.h> 50#endif 51 52#include "core.h" 53#include "ohci.h" 54 55#define DESCRIPTOR_OUTPUT_MORE 0 56#define DESCRIPTOR_OUTPUT_LAST (1 << 12) 57#define DESCRIPTOR_INPUT_MORE (2 << 12) 58#define DESCRIPTOR_INPUT_LAST (3 << 12) 59#define DESCRIPTOR_STATUS (1 << 11) 60#define DESCRIPTOR_KEY_IMMEDIATE (2 << 8) 61#define DESCRIPTOR_PING (1 << 7) 62#define DESCRIPTOR_YY (1 << 6) 63#define DESCRIPTOR_NO_IRQ (0 << 4) 64#define DESCRIPTOR_IRQ_ERROR (1 << 4) 65#define DESCRIPTOR_IRQ_ALWAYS (3 << 4) 66#define DESCRIPTOR_BRANCH_ALWAYS (3 << 2) 67#define DESCRIPTOR_WAIT (3 << 0) 68 69struct descriptor { 70 __le16 req_count; 71 __le16 control; 72 __le32 data_address; 73 __le32 branch_address; 74 __le16 res_count; 75 __le16 transfer_status; 76} __attribute__((aligned(16))); 77 78#define CONTROL_SET(regs) (regs) 79#define CONTROL_CLEAR(regs) ((regs) + 4) 80#define COMMAND_PTR(regs) ((regs) + 12) 81#define CONTEXT_MATCH(regs) ((regs) + 16) 82 83struct ar_buffer { 84 struct descriptor descriptor; 85 struct ar_buffer *next; 86 __le32 data[0]; 87}; 88 89struct ar_context { 90 struct fw_ohci *ohci; 91 struct ar_buffer *current_buffer; 92 struct ar_buffer *last_buffer; 93 void *pointer; 94 u32 regs; 95 struct tasklet_struct tasklet; 96}; 97 98struct context; 99 100typedef int (*descriptor_callback_t)(struct context *ctx, 101 struct descriptor *d, 102 struct descriptor *last); 103 104/* 105 * A buffer that contains a block of DMA-able coherent memory used for 106 * storing a portion of a DMA descriptor program. 107 */ 108struct descriptor_buffer { 109 struct list_head list; 110 dma_addr_t buffer_bus; 111 size_t buffer_size; 112 size_t used; 113 struct descriptor buffer[0]; 114}; 115 116struct context { 117 struct fw_ohci *ohci; 118 u32 regs; 119 int total_allocation; 120 121 /* 122 * List of page-sized buffers for storing DMA descriptors. 123 * Head of list contains buffers in use and tail of list contains 124 * free buffers. 125 */ 126 struct list_head buffer_list; 127 128 /* 129 * Pointer to a buffer inside buffer_list that contains the tail 130 * end of the current DMA program. 131 */ 132 struct descriptor_buffer *buffer_tail; 133 134 /* 135 * The descriptor containing the branch address of the first 136 * descriptor that has not yet been filled by the device. 137 */ 138 struct descriptor *last; 139 140 /* 141 * The last descriptor in the DMA program. It contains the branch 142 * address that must be updated upon appending a new descriptor. 143 */ 144 struct descriptor *prev; 145 146 descriptor_callback_t callback; 147 148 struct tasklet_struct tasklet; 149}; 150 151#define IT_HEADER_SY(v) ((v) << 0) 152#define IT_HEADER_TCODE(v) ((v) << 4) 153#define IT_HEADER_CHANNEL(v) ((v) << 8) 154#define IT_HEADER_TAG(v) ((v) << 14) 155#define IT_HEADER_SPEED(v) ((v) << 16) 156#define IT_HEADER_DATA_LENGTH(v) ((v) << 16) 157 158struct iso_context { 159 struct fw_iso_context base; 160 struct context context; 161 int excess_bytes; 162 void *header; 163 size_t header_length; 164}; 165 166#define CONFIG_ROM_SIZE 1024 167 168struct fw_ohci { 169 struct fw_card card; 170 171 __iomem char *registers; 172 int node_id; 173 int generation; 174 int request_generation; /* for timestamping incoming requests */ 175 unsigned quirks; 176 unsigned int pri_req_max; 177 u32 bus_time; 178 bool is_root; 179 bool csr_state_setclear_abdicate; 180 181 /* 182 * Spinlock for accessing fw_ohci data. Never call out of 183 * this driver with this lock held. 184 */ 185 spinlock_t lock; 186 187 struct mutex phy_reg_mutex; 188 189 struct ar_context ar_request_ctx; 190 struct ar_context ar_response_ctx; 191 struct context at_request_ctx; 192 struct context at_response_ctx; 193 194 u32 it_context_mask; /* unoccupied IT contexts */ 195 struct iso_context *it_context_list; 196 u64 ir_context_channels; /* unoccupied channels */ 197 u32 ir_context_mask; /* unoccupied IR contexts */ 198 struct iso_context *ir_context_list; 199 u64 mc_channels; /* channels in use by the multichannel IR context */ 200 bool mc_allocated; 201 202 __be32 *config_rom; 203 dma_addr_t config_rom_bus; 204 __be32 *next_config_rom; 205 dma_addr_t next_config_rom_bus; 206 __be32 next_header; 207 208 __le32 *self_id_cpu; 209 dma_addr_t self_id_bus; 210 struct tasklet_struct bus_reset_tasklet; 211 212 u32 self_id_buffer[512]; 213}; 214 215static inline struct fw_ohci *fw_ohci(struct fw_card *card) 216{ 217 return container_of(card, struct fw_ohci, card); 218} 219 220#define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000 221#define IR_CONTEXT_BUFFER_FILL 0x80000000 222#define IR_CONTEXT_ISOCH_HEADER 0x40000000 223#define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000 224#define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000 225#define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000 226 227#define CONTEXT_RUN 0x8000 228#define CONTEXT_WAKE 0x1000 229#define CONTEXT_DEAD 0x0800 230#define CONTEXT_ACTIVE 0x0400 231 232#define OHCI1394_MAX_AT_REQ_RETRIES 0xf 233#define OHCI1394_MAX_AT_RESP_RETRIES 0x2 234#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8 235 236#define OHCI1394_REGISTER_SIZE 0x800 237#define OHCI_LOOP_COUNT 500 238#define OHCI1394_PCI_HCI_Control 0x40 239#define SELF_ID_BUF_SIZE 0x800 240#define OHCI_TCODE_PHY_PACKET 0x0e 241#define OHCI_VERSION_1_1 0x010010 242 243static char ohci_driver_name[] = KBUILD_MODNAME; 244 245#define PCI_DEVICE_ID_AGERE_FW643 0x5901 246#define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380 247#define PCI_DEVICE_ID_TI_TSB12LV22 0x8009 248 249#define QUIRK_CYCLE_TIMER 1 250#define QUIRK_RESET_PACKET 2 251#define QUIRK_BE_HEADERS 4 252#define QUIRK_NO_1394A 8 253#define QUIRK_NO_MSI 16 254 255/* In case of multiple matches in ohci_quirks[], only the first one is used. */ 256static const struct { 257 unsigned short vendor, device, revision, flags; 258} ohci_quirks[] = { 259 {PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_ANY_ID, 260 QUIRK_CYCLE_TIMER}, 261 262 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, PCI_ANY_ID, 263 QUIRK_BE_HEADERS}, 264 265 {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6, 266 QUIRK_NO_MSI}, 267 268 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID, 269 QUIRK_NO_MSI}, 270 271 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID, 272 QUIRK_CYCLE_TIMER}, 273 274 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID, 275 QUIRK_CYCLE_TIMER}, 276 277 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID, 278 QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A}, 279 280 {PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID, 281 QUIRK_RESET_PACKET}, 282 283 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID, 284 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, 285}; 286 287/* This overrides anything that was found in ohci_quirks[]. */ 288static int param_quirks; 289module_param_named(quirks, param_quirks, int, 0644); 290MODULE_PARM_DESC(quirks, "Chip quirks (default = 0" 291 ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER) 292 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET) 293 ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS) 294 ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A) 295 ", disable MSI = " __stringify(QUIRK_NO_MSI) 296 ")"); 297 298#define OHCI_PARAM_DEBUG_AT_AR 1 299#define OHCI_PARAM_DEBUG_SELFIDS 2 300#define OHCI_PARAM_DEBUG_IRQS 4 301#define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */ 302 303#ifdef CONFIG_FIREWIRE_OHCI_DEBUG 304 305static int param_debug; 306module_param_named(debug, param_debug, int, 0644); 307MODULE_PARM_DESC(debug, "Verbose logging (default = 0" 308 ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR) 309 ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS) 310 ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS) 311 ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS) 312 ", or a combination, or all = -1)"); 313 314static void log_irqs(u32 evt) 315{ 316 if (likely(!(param_debug & 317 (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS)))) 318 return; 319 320 if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) && 321 !(evt & OHCI1394_busReset)) 322 return; 323 324 fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, 325 evt & OHCI1394_selfIDComplete ? " selfID" : "", 326 evt & OHCI1394_RQPkt ? " AR_req" : "", 327 evt & OHCI1394_RSPkt ? " AR_resp" : "", 328 evt & OHCI1394_reqTxComplete ? " AT_req" : "", 329 evt & OHCI1394_respTxComplete ? " AT_resp" : "", 330 evt & OHCI1394_isochRx ? " IR" : "", 331 evt & OHCI1394_isochTx ? " IT" : "", 332 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "", 333 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "", 334 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "", 335 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "", 336 evt & OHCI1394_regAccessFail ? " regAccessFail" : "", 337 evt & OHCI1394_busReset ? " busReset" : "", 338 evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt | 339 OHCI1394_RSPkt | OHCI1394_reqTxComplete | 340 OHCI1394_respTxComplete | OHCI1394_isochRx | 341 OHCI1394_isochTx | OHCI1394_postedWriteErr | 342 OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds | 343 OHCI1394_cycleInconsistent | 344 OHCI1394_regAccessFail | OHCI1394_busReset) 345 ? " ?" : ""); 346} 347 348static const char *speed[] = { 349 [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta", 350}; 351static const char *power[] = { 352 [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W", 353 [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W", 354}; 355static const char port[] = { '.', '-', 'p', 'c', }; 356 357static char _p(u32 *s, int shift) 358{ 359 return port[*s >> shift & 3]; 360} 361 362static void log_selfids(int node_id, int generation, int self_id_count, u32 *s) 363{ 364 if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS))) 365 return; 366 367 fw_notify("%d selfIDs, generation %d, local node ID %04x\n", 368 self_id_count, generation, node_id); 369 370 for (; self_id_count--; ++s) 371 if ((*s & 1 << 23) == 0) 372 fw_notify("selfID 0: %08x, phy %d [%c%c%c] " 373 "%s gc=%d %s %s%s%s\n", 374 *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2), 375 speed[*s >> 14 & 3], *s >> 16 & 63, 376 power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "", 377 *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : ""); 378 else 379 fw_notify("selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n", 380 *s, *s >> 24 & 63, 381 _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10), 382 _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2)); 383} 384 385static const char *evts[] = { 386 [0x00] = "evt_no_status", [0x01] = "-reserved-", 387 [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack", 388 [0x04] = "evt_underrun", [0x05] = "evt_overrun", 389 [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read", 390 [0x08] = "evt_data_write", [0x09] = "evt_bus_reset", 391 [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err", 392 [0x0c] = "-reserved-", [0x0d] = "-reserved-", 393 [0x0e] = "evt_unknown", [0x0f] = "evt_flushed", 394 [0x10] = "-reserved-", [0x11] = "ack_complete", 395 [0x12] = "ack_pending ", [0x13] = "-reserved-", 396 [0x14] = "ack_busy_X", [0x15] = "ack_busy_A", 397 [0x16] = "ack_busy_B", [0x17] = "-reserved-", 398 [0x18] = "-reserved-", [0x19] = "-reserved-", 399 [0x1a] = "-reserved-", [0x1b] = "ack_tardy", 400 [0x1c] = "-reserved-", [0x1d] = "ack_data_error", 401 [0x1e] = "ack_type_error", [0x1f] = "-reserved-", 402 [0x20] = "pending/cancelled", 403}; 404static const char *tcodes[] = { 405 [0x0] = "QW req", [0x1] = "BW req", 406 [0x2] = "W resp", [0x3] = "-reserved-", 407 [0x4] = "QR req", [0x5] = "BR req", 408 [0x6] = "QR resp", [0x7] = "BR resp", 409 [0x8] = "cycle start", [0x9] = "Lk req", 410 [0xa] = "async stream packet", [0xb] = "Lk resp", 411 [0xc] = "-reserved-", [0xd] = "-reserved-", 412 [0xe] = "link internal", [0xf] = "-reserved-", 413}; 414static const char *phys[] = { 415 [0x0] = "phy config packet", [0x1] = "link-on packet", 416 [0x2] = "self-id packet", [0x3] = "-reserved-", 417}; 418 419static void log_ar_at_event(char dir, int speed, u32 *header, int evt) 420{ 421 int tcode = header[0] >> 4 & 0xf; 422 char specific[12]; 423 424 if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR))) 425 return; 426 427 if (unlikely(evt >= ARRAY_SIZE(evts))) 428 evt = 0x1f; 429 430 if (evt == OHCI1394_evt_bus_reset) { 431 fw_notify("A%c evt_bus_reset, generation %d\n", 432 dir, (header[2] >> 16) & 0xff); 433 return; 434 } 435 436 if (header[0] == ~header[1]) { 437 fw_notify("A%c %s, %s, %08x\n", 438 dir, evts[evt], phys[header[0] >> 30 & 0x3], header[0]); 439 return; 440 } 441 442 switch (tcode) { 443 case 0x0: case 0x6: case 0x8: 444 snprintf(specific, sizeof(specific), " = %08x", 445 be32_to_cpu((__force __be32)header[3])); 446 break; 447 case 0x1: case 0x5: case 0x7: case 0x9: case 0xb: 448 snprintf(specific, sizeof(specific), " %x,%x", 449 header[3] >> 16, header[3] & 0xffff); 450 break; 451 default: 452 specific[0] = '\0'; 453 } 454 455 switch (tcode) { 456 case 0xe: case 0xa: 457 fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]); 458 break; 459 case 0x0: case 0x1: case 0x4: case 0x5: case 0x9: 460 fw_notify("A%c spd %x tl %02x, " 461 "%04x -> %04x, %s, " 462 "%s, %04x%08x%s\n", 463 dir, speed, header[0] >> 10 & 0x3f, 464 header[1] >> 16, header[0] >> 16, evts[evt], 465 tcodes[tcode], header[1] & 0xffff, header[2], specific); 466 break; 467 default: 468 fw_notify("A%c spd %x tl %02x, " 469 "%04x -> %04x, %s, " 470 "%s%s\n", 471 dir, speed, header[0] >> 10 & 0x3f, 472 header[1] >> 16, header[0] >> 16, evts[evt], 473 tcodes[tcode], specific); 474 } 475} 476 477#else 478 479#define param_debug 0 480static inline void log_irqs(u32 evt) {} 481static inline void log_selfids(int node_id, int generation, int self_id_count, u32 *s) {} 482static inline void log_ar_at_event(char dir, int speed, u32 *header, int evt) {} 483 484#endif /* CONFIG_FIREWIRE_OHCI_DEBUG */ 485 486static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data) 487{ 488 writel(data, ohci->registers + offset); 489} 490 491static inline u32 reg_read(const struct fw_ohci *ohci, int offset) 492{ 493 return readl(ohci->registers + offset); 494} 495 496static inline void flush_writes(const struct fw_ohci *ohci) 497{ 498 /* Do a dummy read to flush writes. */ 499 reg_read(ohci, OHCI1394_Version); 500} 501 502static int read_phy_reg(struct fw_ohci *ohci, int addr) 503{ 504 u32 val; 505 int i; 506 507 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr)); 508 for (i = 0; i < 3 + 100; i++) { 509 val = reg_read(ohci, OHCI1394_PhyControl); 510 if (val & OHCI1394_PhyControl_ReadDone) 511 return OHCI1394_PhyControl_ReadData(val); 512 513 /* 514 * Try a few times without waiting. Sleeping is necessary 515 * only when the link/PHY interface is busy. 516 */ 517 if (i >= 3) 518 msleep(1); 519 } 520 fw_error("failed to read phy reg\n"); 521 522 return -EBUSY; 523} 524 525static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val) 526{ 527 int i; 528 529 reg_write(ohci, OHCI1394_PhyControl, 530 OHCI1394_PhyControl_Write(addr, val)); 531 for (i = 0; i < 3 + 100; i++) { 532 val = reg_read(ohci, OHCI1394_PhyControl); 533 if (!(val & OHCI1394_PhyControl_WritePending)) 534 return 0; 535 536 if (i >= 3) 537 msleep(1); 538 } 539 fw_error("failed to write phy reg\n"); 540 541 return -EBUSY; 542} 543 544static int update_phy_reg(struct fw_ohci *ohci, int addr, 545 int clear_bits, int set_bits) 546{ 547 int ret = read_phy_reg(ohci, addr); 548 if (ret < 0) 549 return ret; 550 551 /* 552 * The interrupt status bits are cleared by writing a one bit. 553 * Avoid clearing them unless explicitly requested in set_bits. 554 */ 555 if (addr == 5) 556 clear_bits |= PHY_INT_STATUS_BITS; 557 558 return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits); 559} 560 561static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr) 562{ 563 int ret; 564 565 ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5); 566 if (ret < 0) 567 return ret; 568 569 return read_phy_reg(ohci, addr); 570} 571 572static int ohci_read_phy_reg(struct fw_card *card, int addr) 573{ 574 struct fw_ohci *ohci = fw_ohci(card); 575 int ret; 576 577 mutex_lock(&ohci->phy_reg_mutex); 578 ret = read_phy_reg(ohci, addr); 579 mutex_unlock(&ohci->phy_reg_mutex); 580 581 return ret; 582} 583 584static int ohci_update_phy_reg(struct fw_card *card, int addr, 585 int clear_bits, int set_bits) 586{ 587 struct fw_ohci *ohci = fw_ohci(card); 588 int ret; 589 590 mutex_lock(&ohci->phy_reg_mutex); 591 ret = update_phy_reg(ohci, addr, clear_bits, set_bits); 592 mutex_unlock(&ohci->phy_reg_mutex); 593 594 return ret; 595} 596 597static int ar_context_add_page(struct ar_context *ctx) 598{ 599 struct device *dev = ctx->ohci->card.device; 600 struct ar_buffer *ab; 601 dma_addr_t uninitialized_var(ab_bus); 602 size_t offset; 603 604 ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC); 605 if (ab == NULL) 606 return -ENOMEM; 607 608 ab->next = NULL; 609 memset(&ab->descriptor, 0, sizeof(ab->descriptor)); 610 ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | 611 DESCRIPTOR_STATUS | 612 DESCRIPTOR_BRANCH_ALWAYS); 613 offset = offsetof(struct ar_buffer, data); 614 ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset); 615 ab->descriptor.data_address = cpu_to_le32(ab_bus + offset); 616 ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset); 617 ab->descriptor.branch_address = 0; 618 619 wmb(); /* finish init of new descriptors before branch_address update */ 620 ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1); 621 ctx->last_buffer->next = ab; 622 ctx->last_buffer = ab; 623 624 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); 625 flush_writes(ctx->ohci); 626 627 return 0; 628} 629 630static void ar_context_release(struct ar_context *ctx) 631{ 632 struct ar_buffer *ab, *ab_next; 633 size_t offset; 634 dma_addr_t ab_bus; 635 636 for (ab = ctx->current_buffer; ab; ab = ab_next) { 637 ab_next = ab->next; 638 offset = offsetof(struct ar_buffer, data); 639 ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset; 640 dma_free_coherent(ctx->ohci->card.device, PAGE_SIZE, 641 ab, ab_bus); 642 } 643} 644 645#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) 646#define cond_le32_to_cpu(v) \ 647 (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v)) 648#else 649#define cond_le32_to_cpu(v) le32_to_cpu(v) 650#endif 651 652static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) 653{ 654 struct fw_ohci *ohci = ctx->ohci; 655 struct fw_packet p; 656 u32 status, length, tcode; 657 int evt; 658 659 p.header[0] = cond_le32_to_cpu(buffer[0]); 660 p.header[1] = cond_le32_to_cpu(buffer[1]); 661 p.header[2] = cond_le32_to_cpu(buffer[2]); 662 663 tcode = (p.header[0] >> 4) & 0x0f; 664 switch (tcode) { 665 case TCODE_WRITE_QUADLET_REQUEST: 666 case TCODE_READ_QUADLET_RESPONSE: 667 p.header[3] = (__force __u32) buffer[3]; 668 p.header_length = 16; 669 p.payload_length = 0; 670 break; 671 672 case TCODE_READ_BLOCK_REQUEST : 673 p.header[3] = cond_le32_to_cpu(buffer[3]); 674 p.header_length = 16; 675 p.payload_length = 0; 676 break; 677 678 case TCODE_WRITE_BLOCK_REQUEST: 679 case TCODE_READ_BLOCK_RESPONSE: 680 case TCODE_LOCK_REQUEST: 681 case TCODE_LOCK_RESPONSE: 682 p.header[3] = cond_le32_to_cpu(buffer[3]); 683 p.header_length = 16; 684 p.payload_length = p.header[3] >> 16; 685 break; 686 687 case TCODE_WRITE_RESPONSE: 688 case TCODE_READ_QUADLET_REQUEST: 689 case OHCI_TCODE_PHY_PACKET: 690 p.header_length = 12; 691 p.payload_length = 0; 692 break; 693 694 default: 695 p.header_length = 0; 696 p.payload_length = 0; 697 } 698 699 p.payload = (void *) buffer + p.header_length; 700 701 length = (p.header_length + p.payload_length + 3) / 4; 702 status = cond_le32_to_cpu(buffer[length]); 703 evt = (status >> 16) & 0x1f; 704 705 p.ack = evt - 16; 706 p.speed = (status >> 21) & 0x7; 707 p.timestamp = status & 0xffff; 708 p.generation = ohci->request_generation; 709 710 log_ar_at_event('R', p.speed, p.header, evt); 711 712 /* 713 * Several controllers, notably from NEC and VIA, forget to 714 * write ack_complete status at PHY packet reception. 715 */ 716 if (evt == OHCI1394_evt_no_status && 717 (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4)) 718 p.ack = ACK_COMPLETE; 719 720 /* 721 * The OHCI bus reset handler synthesizes a PHY packet with 722 * the new generation number when a bus reset happens (see 723 * section 8.4.2.3). This helps us determine when a request 724 * was received and make sure we send the response in the same 725 * generation. We only need this for requests; for responses 726 * we use the unique tlabel for finding the matching 727 * request. 728 * 729 * Alas some chips sometimes emit bus reset packets with a 730 * wrong generation. We set the correct generation for these 731 * at a slightly incorrect time (in bus_reset_tasklet). 732 */ 733 if (evt == OHCI1394_evt_bus_reset) { 734 if (!(ohci->quirks & QUIRK_RESET_PACKET)) 735 ohci->request_generation = (p.header[2] >> 16) & 0xff; 736 } else if (ctx == &ohci->ar_request_ctx) { 737 fw_core_handle_request(&ohci->card, &p); 738 } else { 739 fw_core_handle_response(&ohci->card, &p); 740 } 741 742 return buffer + length + 1; 743} 744 745static void ar_context_tasklet(unsigned long data) 746{ 747 struct ar_context *ctx = (struct ar_context *)data; 748 struct fw_ohci *ohci = ctx->ohci; 749 struct ar_buffer *ab; 750 struct descriptor *d; 751 void *buffer, *end; 752 753 ab = ctx->current_buffer; 754 d = &ab->descriptor; 755 756 if (d->res_count == 0) { 757 size_t size, size2, rest, pktsize, size3, offset; 758 dma_addr_t start_bus; 759 void *start; 760 761 /* 762 * This descriptor is finished and we may have a 763 * packet split across this and the next buffer. We 764 * reuse the page for reassembling the split packet. 765 */ 766 767 offset = offsetof(struct ar_buffer, data); 768 start = ab; 769 start_bus = le32_to_cpu(ab->descriptor.data_address) - offset; 770 buffer = ab->data; 771 772 ab = ab->next; 773 d = &ab->descriptor; 774 size = start + PAGE_SIZE - ctx->pointer; 775 /* valid buffer data in the next page */ 776 rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count); 777 /* what actually fits in this page */ 778 size2 = min(rest, (size_t)PAGE_SIZE - offset - size); 779 memmove(buffer, ctx->pointer, size); 780 memcpy(buffer + size, ab->data, size2); 781 782 while (size > 0) { 783 void *next = handle_ar_packet(ctx, buffer); 784 pktsize = next - buffer; 785 if (pktsize >= size) { 786 /* 787 * We have handled all the data that was 788 * originally in this page, so we can now 789 * continue in the next page. 790 */ 791 buffer = next; 792 break; 793 } 794 /* move the next packet to the start of the buffer */ 795 memmove(buffer, next, size + size2 - pktsize); 796 size -= pktsize; 797 /* fill up this page again */ 798 size3 = min(rest - size2, 799 (size_t)PAGE_SIZE - offset - size - size2); 800 memcpy(buffer + size + size2, 801 (void *) ab->data + size2, size3); 802 size2 += size3; 803 } 804 805 if (rest > 0) { 806 /* handle the packets that are fully in the next page */ 807 buffer = (void *) ab->data + 808 (buffer - (start + offset + size)); 809 end = (void *) ab->data + rest; 810 811 while (buffer < end) 812 buffer = handle_ar_packet(ctx, buffer); 813 814 ctx->current_buffer = ab; 815 ctx->pointer = end; 816 817 dma_free_coherent(ohci->card.device, PAGE_SIZE, 818 start, start_bus); 819 ar_context_add_page(ctx); 820 } else { 821 ctx->pointer = start + PAGE_SIZE; 822 } 823 } else { 824 buffer = ctx->pointer; 825 ctx->pointer = end = 826 (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count); 827 828 while (buffer < end) 829 buffer = handle_ar_packet(ctx, buffer); 830 } 831} 832 833static int ar_context_init(struct ar_context *ctx, 834 struct fw_ohci *ohci, u32 regs) 835{ 836 struct ar_buffer ab; 837 838 ctx->regs = regs; 839 ctx->ohci = ohci; 840 ctx->last_buffer = &ab; 841 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx); 842 843 ar_context_add_page(ctx); 844 ar_context_add_page(ctx); 845 ctx->current_buffer = ab.next; 846 ctx->pointer = ctx->current_buffer->data; 847 848 return 0; 849} 850 851static void ar_context_run(struct ar_context *ctx) 852{ 853 struct ar_buffer *ab = ctx->current_buffer; 854 dma_addr_t ab_bus; 855 size_t offset; 856 857 offset = offsetof(struct ar_buffer, data); 858 ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset; 859 860 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab_bus | 1); 861 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN); 862 flush_writes(ctx->ohci); 863} 864 865static struct descriptor *find_branch_descriptor(struct descriptor *d, int z) 866{ 867 int b, key; 868 869 b = (le16_to_cpu(d->control) & DESCRIPTOR_BRANCH_ALWAYS) >> 2; 870 key = (le16_to_cpu(d->control) & DESCRIPTOR_KEY_IMMEDIATE) >> 8; 871 872 /* figure out which descriptor the branch address goes in */ 873 if (z == 2 && (b == 3 || key == 2)) 874 return d; 875 else 876 return d + z - 1; 877} 878 879static void context_tasklet(unsigned long data) 880{ 881 struct context *ctx = (struct context *) data; 882 struct descriptor *d, *last; 883 u32 address; 884 int z; 885 struct descriptor_buffer *desc; 886 887 desc = list_entry(ctx->buffer_list.next, 888 struct descriptor_buffer, list); 889 last = ctx->last; 890 while (last->branch_address != 0) { 891 struct descriptor_buffer *old_desc = desc; 892 address = le32_to_cpu(last->branch_address); 893 z = address & 0xf; 894 address &= ~0xf; 895 896 /* If the branch address points to a buffer outside of the 897 * current buffer, advance to the next buffer. */ 898 if (address < desc->buffer_bus || 899 address >= desc->buffer_bus + desc->used) 900 desc = list_entry(desc->list.next, 901 struct descriptor_buffer, list); 902 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d); 903 last = find_branch_descriptor(d, z); 904 905 if (!ctx->callback(ctx, d, last)) 906 break; 907 908 if (old_desc != desc) { 909 /* If we've advanced to the next buffer, move the 910 * previous buffer to the free list. */ 911 unsigned long flags; 912 old_desc->used = 0; 913 spin_lock_irqsave(&ctx->ohci->lock, flags); 914 list_move_tail(&old_desc->list, &ctx->buffer_list); 915 spin_unlock_irqrestore(&ctx->ohci->lock, flags); 916 } 917 ctx->last = last; 918 } 919} 920 921/* 922 * Allocate a new buffer and add it to the list of free buffers for this 923 * context. Must be called with ohci->lock held. 924 */ 925static int context_add_buffer(struct context *ctx) 926{ 927 struct descriptor_buffer *desc; 928 dma_addr_t uninitialized_var(bus_addr); 929 int offset; 930 931 /* 932 * 16MB of descriptors should be far more than enough for any DMA 933 * program. This will catch run-away userspace or DoS attacks. 934 */ 935 if (ctx->total_allocation >= 16*1024*1024) 936 return -ENOMEM; 937 938 desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE, 939 &bus_addr, GFP_ATOMIC); 940 if (!desc) 941 return -ENOMEM; 942 943 offset = (void *)&desc->buffer - (void *)desc; 944 desc->buffer_size = PAGE_SIZE - offset; 945 desc->buffer_bus = bus_addr + offset; 946 desc->used = 0; 947 948 list_add_tail(&desc->list, &ctx->buffer_list); 949 ctx->total_allocation += PAGE_SIZE; 950 951 return 0; 952} 953 954static int context_init(struct context *ctx, struct fw_ohci *ohci, 955 u32 regs, descriptor_callback_t callback) 956{ 957 ctx->ohci = ohci; 958 ctx->regs = regs; 959 ctx->total_allocation = 0; 960 961 INIT_LIST_HEAD(&ctx->buffer_list); 962 if (context_add_buffer(ctx) < 0) 963 return -ENOMEM; 964 965 ctx->buffer_tail = list_entry(ctx->buffer_list.next, 966 struct descriptor_buffer, list); 967 968 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx); 969 ctx->callback = callback; 970 971 /* 972 * We put a dummy descriptor in the buffer that has a NULL 973 * branch address and looks like it's been sent. That way we 974 * have a descriptor to append DMA programs to. 975 */ 976 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer)); 977 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST); 978 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011); 979 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer); 980 ctx->last = ctx->buffer_tail->buffer; 981 ctx->prev = ctx->buffer_tail->buffer; 982 983 return 0; 984} 985 986static void context_release(struct context *ctx) 987{ 988 struct fw_card *card = &ctx->ohci->card; 989 struct descriptor_buffer *desc, *tmp; 990 991 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list) 992 dma_free_coherent(card->device, PAGE_SIZE, desc, 993 desc->buffer_bus - 994 ((void *)&desc->buffer - (void *)desc)); 995} 996 997/* Must be called with ohci->lock held */ 998static struct descriptor *context_get_descriptors(struct context *ctx, 999 int z, dma_addr_t *d_bus) 1000{ 1001 struct descriptor *d = NULL; 1002 struct descriptor_buffer *desc = ctx->buffer_tail; 1003 1004 if (z * sizeof(*d) > desc->buffer_size) 1005 return NULL; 1006 1007 if (z * sizeof(*d) > desc->buffer_size - desc->used) { 1008 /* No room for the descriptor in this buffer, so advance to the 1009 * next one. */ 1010 1011 if (desc->list.next == &ctx->buffer_list) { 1012 /* If there is no free buffer next in the list, 1013 * allocate one. */ 1014 if (context_add_buffer(ctx) < 0) 1015 return NULL; 1016 } 1017 desc = list_entry(desc->list.next, 1018 struct descriptor_buffer, list); 1019 ctx->buffer_tail = desc; 1020 } 1021 1022 d = desc->buffer + desc->used / sizeof(*d); 1023 memset(d, 0, z * sizeof(*d)); 1024 *d_bus = desc->buffer_bus + desc->used; 1025 1026 return d; 1027} 1028 1029static void context_run(struct context *ctx, u32 extra) 1030{ 1031 struct fw_ohci *ohci = ctx->ohci; 1032 1033 reg_write(ohci, COMMAND_PTR(ctx->regs), 1034 le32_to_cpu(ctx->last->branch_address)); 1035 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0); 1036 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra); 1037 flush_writes(ohci); 1038} 1039 1040static void context_append(struct context *ctx, 1041 struct descriptor *d, int z, int extra) 1042{ 1043 dma_addr_t d_bus; 1044 struct descriptor_buffer *desc = ctx->buffer_tail; 1045 1046 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d); 1047 1048 desc->used += (z + extra) * sizeof(*d); 1049 1050 wmb(); /* finish init of new descriptors before branch_address update */ 1051 ctx->prev->branch_address = cpu_to_le32(d_bus | z); 1052 ctx->prev = find_branch_descriptor(d, z); 1053 1054 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); 1055 flush_writes(ctx->ohci); 1056} 1057 1058static void context_stop(struct context *ctx) 1059{ 1060 u32 reg; 1061 int i; 1062 1063 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); 1064 flush_writes(ctx->ohci); 1065 1066 for (i = 0; i < 10; i++) { 1067 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs)); 1068 if ((reg & CONTEXT_ACTIVE) == 0) 1069 return; 1070 1071 mdelay(1); 1072 } 1073 fw_error("Error: DMA context still active (0x%08x)\n", reg); 1074} 1075 1076struct driver_data { 1077 struct fw_packet *packet; 1078}; 1079 1080/* 1081 * This function apppends a packet to the DMA queue for transmission. 1082 * Must always be called with the ochi->lock held to ensure proper 1083 * generation handling and locking around packet queue manipulation. 1084 */ 1085static int at_context_queue_packet(struct context *ctx, 1086 struct fw_packet *packet) 1087{ 1088 struct fw_ohci *ohci = ctx->ohci; 1089 dma_addr_t d_bus, uninitialized_var(payload_bus); 1090 struct driver_data *driver_data; 1091 struct descriptor *d, *last; 1092 __le32 *header; 1093 int z, tcode; 1094 u32 reg; 1095 1096 d = context_get_descriptors(ctx, 4, &d_bus); 1097 if (d == NULL) { 1098 packet->ack = RCODE_SEND_ERROR; 1099 return -1; 1100 } 1101 1102 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); 1103 d[0].res_count = cpu_to_le16(packet->timestamp); 1104 1105 /* 1106 * The DMA format for asyncronous link packets is different 1107 * from the IEEE1394 layout, so shift the fields around 1108 * accordingly. If header_length is 8, it's a PHY packet, to 1109 * which we need to prepend an extra quadlet. 1110 */ 1111 1112 header = (__le32 *) &d[1]; 1113 switch (packet->header_length) { 1114 case 16: 1115 case 12: 1116 header[0] = cpu_to_le32((packet->header[0] & 0xffff) | 1117 (packet->speed << 16)); 1118 header[1] = cpu_to_le32((packet->header[1] & 0xffff) | 1119 (packet->header[0] & 0xffff0000)); 1120 header[2] = cpu_to_le32(packet->header[2]); 1121 1122 tcode = (packet->header[0] >> 4) & 0x0f; 1123 if (TCODE_IS_BLOCK_PACKET(tcode)) 1124 header[3] = cpu_to_le32(packet->header[3]); 1125 else 1126 header[3] = (__force __le32) packet->header[3]; 1127 1128 d[0].req_count = cpu_to_le16(packet->header_length); 1129 break; 1130 1131 case 8: 1132 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) | 1133 (packet->speed << 16)); 1134 header[1] = cpu_to_le32(packet->header[0]); 1135 header[2] = cpu_to_le32(packet->header[1]); 1136 d[0].req_count = cpu_to_le16(12); 1137 1138 if (is_ping_packet(packet->header)) 1139 d[0].control |= cpu_to_le16(DESCRIPTOR_PING); 1140 break; 1141 1142 case 4: 1143 header[0] = cpu_to_le32((packet->header[0] & 0xffff) | 1144 (packet->speed << 16)); 1145 header[1] = cpu_to_le32(packet->header[0] & 0xffff0000); 1146 d[0].req_count = cpu_to_le16(8); 1147 break; 1148 1149 default: 1150 /* BUG(); */ 1151 packet->ack = RCODE_SEND_ERROR; 1152 return -1; 1153 } 1154 1155 driver_data = (struct driver_data *) &d[3]; 1156 driver_data->packet = packet; 1157 packet->driver_data = driver_data; 1158 1159 if (packet->payload_length > 0) { 1160 payload_bus = 1161 dma_map_single(ohci->card.device, packet->payload, 1162 packet->payload_length, DMA_TO_DEVICE); 1163 if (dma_mapping_error(ohci->card.device, payload_bus)) { 1164 packet->ack = RCODE_SEND_ERROR; 1165 return -1; 1166 } 1167 packet->payload_bus = payload_bus; 1168 packet->payload_mapped = true; 1169 1170 d[2].req_count = cpu_to_le16(packet->payload_length); 1171 d[2].data_address = cpu_to_le32(payload_bus); 1172 last = &d[2]; 1173 z = 3; 1174 } else { 1175 last = &d[0]; 1176 z = 2; 1177 } 1178 1179 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST | 1180 DESCRIPTOR_IRQ_ALWAYS | 1181 DESCRIPTOR_BRANCH_ALWAYS); 1182 1183 if (ohci->generation != packet->generation || 1184 reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) { 1185 if (packet->payload_mapped) 1186 dma_unmap_single(ohci->card.device, payload_bus, 1187 packet->payload_length, DMA_TO_DEVICE); 1188 packet->ack = RCODE_GENERATION; 1189 return -1; 1190 } 1191 1192 context_append(ctx, d, z, 4 - z); 1193 1194 /* If the context isn't already running, start it up. */ 1195 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs)); 1196 if ((reg & CONTEXT_RUN) == 0) 1197 context_run(ctx, 0); 1198 1199 return 0; 1200} 1201 1202static int handle_at_packet(struct context *context, 1203 struct descriptor *d, 1204 struct descriptor *last) 1205{ 1206 struct driver_data *driver_data; 1207 struct fw_packet *packet; 1208 struct fw_ohci *ohci = context->ohci; 1209 int evt; 1210 1211 if (last->transfer_status == 0) 1212 /* This descriptor isn't done yet, stop iteration. */ 1213 return 0; 1214 1215 driver_data = (struct driver_data *) &d[3]; 1216 packet = driver_data->packet; 1217 if (packet == NULL) 1218 /* This packet was cancelled, just continue. */ 1219 return 1; 1220 1221 if (packet->payload_mapped) 1222 dma_unmap_single(ohci->card.device, packet->payload_bus, 1223 packet->payload_length, DMA_TO_DEVICE); 1224 1225 evt = le16_to_cpu(last->transfer_status) & 0x1f; 1226 packet->timestamp = le16_to_cpu(last->res_count); 1227 1228 log_ar_at_event('T', packet->speed, packet->header, evt); 1229 1230 switch (evt) { 1231 case OHCI1394_evt_timeout: 1232 /* Async response transmit timed out. */ 1233 packet->ack = RCODE_CANCELLED; 1234 break; 1235 1236 case OHCI1394_evt_flushed: 1237 /* 1238 * The packet was flushed should give same error as 1239 * when we try to use a stale generation count. 1240 */ 1241 packet->ack = RCODE_GENERATION; 1242 break; 1243 1244 case OHCI1394_evt_missing_ack: 1245 /* 1246 * Using a valid (current) generation count, but the 1247 * node is not on the bus or not sending acks. 1248 */ 1249 packet->ack = RCODE_NO_ACK; 1250 break; 1251 1252 case ACK_COMPLETE + 0x10: 1253 case ACK_PENDING + 0x10: 1254 case ACK_BUSY_X + 0x10: 1255 case ACK_BUSY_A + 0x10: 1256 case ACK_BUSY_B + 0x10: 1257 case ACK_DATA_ERROR + 0x10: 1258 case ACK_TYPE_ERROR + 0x10: 1259 packet->ack = evt - 0x10; 1260 break; 1261 1262 default: 1263 packet->ack = RCODE_SEND_ERROR; 1264 break; 1265 } 1266 1267 packet->callback(packet, &ohci->card, packet->ack); 1268 1269 return 1; 1270} 1271 1272#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff) 1273#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f) 1274#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff) 1275#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) 1276#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) 1277 1278static void handle_local_rom(struct fw_ohci *ohci, 1279 struct fw_packet *packet, u32 csr) 1280{ 1281 struct fw_packet response; 1282 int tcode, length, i; 1283 1284 tcode = HEADER_GET_TCODE(packet->header[0]); 1285 if (TCODE_IS_BLOCK_PACKET(tcode)) 1286 length = HEADER_GET_DATA_LENGTH(packet->header[3]); 1287 else 1288 length = 4; 1289 1290 i = csr - CSR_CONFIG_ROM; 1291 if (i + length > CONFIG_ROM_SIZE) { 1292 fw_fill_response(&response, packet->header, 1293 RCODE_ADDRESS_ERROR, NULL, 0); 1294 } else if (!TCODE_IS_READ_REQUEST(tcode)) { 1295 fw_fill_response(&response, packet->header, 1296 RCODE_TYPE_ERROR, NULL, 0); 1297 } else { 1298 fw_fill_response(&response, packet->header, RCODE_COMPLETE, 1299 (void *) ohci->config_rom + i, length); 1300 } 1301 1302 fw_core_handle_response(&ohci->card, &response); 1303} 1304 1305static void handle_local_lock(struct fw_ohci *ohci, 1306 struct fw_packet *packet, u32 csr) 1307{ 1308 struct fw_packet response; 1309 int tcode, length, ext_tcode, sel, try; 1310 __be32 *payload, lock_old; 1311 u32 lock_arg, lock_data; 1312 1313 tcode = HEADER_GET_TCODE(packet->header[0]); 1314 length = HEADER_GET_DATA_LENGTH(packet->header[3]); 1315 payload = packet->payload; 1316 ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]); 1317 1318 if (tcode == TCODE_LOCK_REQUEST && 1319 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) { 1320 lock_arg = be32_to_cpu(payload[0]); 1321 lock_data = be32_to_cpu(payload[1]); 1322 } else if (tcode == TCODE_READ_QUADLET_REQUEST) { 1323 lock_arg = 0; 1324 lock_data = 0; 1325 } else { 1326 fw_fill_response(&response, packet->header, 1327 RCODE_TYPE_ERROR, NULL, 0); 1328 goto out; 1329 } 1330 1331 sel = (csr - CSR_BUS_MANAGER_ID) / 4; 1332 reg_write(ohci, OHCI1394_CSRData, lock_data); 1333 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg); 1334 reg_write(ohci, OHCI1394_CSRControl, sel); 1335 1336 for (try = 0; try < 20; try++) 1337 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) { 1338 lock_old = cpu_to_be32(reg_read(ohci, 1339 OHCI1394_CSRData)); 1340 fw_fill_response(&response, packet->header, 1341 RCODE_COMPLETE, 1342 &lock_old, sizeof(lock_old)); 1343 goto out; 1344 } 1345 1346 fw_error("swap not done (CSR lock timeout)\n"); 1347 fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0); 1348 1349 out: 1350 fw_core_handle_response(&ohci->card, &response); 1351} 1352 1353static void handle_local_request(struct context *ctx, struct fw_packet *packet) 1354{ 1355 u64 offset, csr; 1356 1357 if (ctx == &ctx->ohci->at_request_ctx) { 1358 packet->ack = ACK_PENDING; 1359 packet->callback(packet, &ctx->ohci->card, packet->ack); 1360 } 1361 1362 offset = 1363 ((unsigned long long) 1364 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) | 1365 packet->header[2]; 1366 csr = offset - CSR_REGISTER_BASE; 1367 1368 /* Handle config rom reads. */ 1369 if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END) 1370 handle_local_rom(ctx->ohci, packet, csr); 1371 else switch (csr) { 1372 case CSR_BUS_MANAGER_ID: 1373 case CSR_BANDWIDTH_AVAILABLE: 1374 case CSR_CHANNELS_AVAILABLE_HI: 1375 case CSR_CHANNELS_AVAILABLE_LO: 1376 handle_local_lock(ctx->ohci, packet, csr); 1377 break; 1378 default: 1379 if (ctx == &ctx->ohci->at_request_ctx) 1380 fw_core_handle_request(&ctx->ohci->card, packet); 1381 else 1382 fw_core_handle_response(&ctx->ohci->card, packet); 1383 break; 1384 } 1385 1386 if (ctx == &ctx->ohci->at_response_ctx) { 1387 packet->ack = ACK_COMPLETE; 1388 packet->callback(packet, &ctx->ohci->card, packet->ack); 1389 } 1390} 1391 1392static void at_context_transmit(struct context *ctx, struct fw_packet *packet) 1393{ 1394 unsigned long flags; 1395 int ret; 1396 1397 spin_lock_irqsave(&ctx->ohci->lock, flags); 1398 1399 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id && 1400 ctx->ohci->generation == packet->generation) { 1401 spin_unlock_irqrestore(&ctx->ohci->lock, flags); 1402 handle_local_request(ctx, packet); 1403 return; 1404 } 1405 1406 ret = at_context_queue_packet(ctx, packet); 1407 spin_unlock_irqrestore(&ctx->ohci->lock, flags); 1408 1409 if (ret < 0) 1410 packet->callback(packet, &ctx->ohci->card, packet->ack); 1411 1412} 1413 1414static u32 cycle_timer_ticks(u32 cycle_timer) 1415{ 1416 u32 ticks; 1417 1418 ticks = cycle_timer & 0xfff; 1419 ticks += 3072 * ((cycle_timer >> 12) & 0x1fff); 1420 ticks += (3072 * 8000) * (cycle_timer >> 25); 1421 1422 return ticks; 1423} 1424 1425/* 1426 * Some controllers exhibit one or more of the following bugs when updating the 1427 * iso cycle timer register: 1428 * - When the lowest six bits are wrapping around to zero, a read that happens 1429 * at the same time will return garbage in the lowest ten bits. 1430 * - When the cycleOffset field wraps around to zero, the cycleCount field is 1431 * not incremented for about 60 ns. 1432 * - Occasionally, the entire register reads zero. 1433 * 1434 * To catch these, we read the register three times and ensure that the 1435 * difference between each two consecutive reads is approximately the same, i.e. 1436 * less than twice the other. Furthermore, any negative difference indicates an 1437 * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to 1438 * execute, so we have enough precision to compute the ratio of the differences.) 1439 */ 1440static u32 get_cycle_time(struct fw_ohci *ohci) 1441{ 1442 u32 c0, c1, c2; 1443 u32 t0, t1, t2; 1444 s32 diff01, diff12; 1445 int i; 1446 1447 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 1448 1449 if (ohci->quirks & QUIRK_CYCLE_TIMER) { 1450 i = 0; 1451 c1 = c2; 1452 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 1453 do { 1454 c0 = c1; 1455 c1 = c2; 1456 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 1457 t0 = cycle_timer_ticks(c0); 1458 t1 = cycle_timer_ticks(c1); 1459 t2 = cycle_timer_ticks(c2); 1460 diff01 = t1 - t0; 1461 diff12 = t2 - t1; 1462 } while ((diff01 <= 0 || diff12 <= 0 || 1463 diff01 / diff12 >= 2 || diff12 / diff01 >= 2) 1464 && i++ < 20); 1465 } 1466 1467 return c2; 1468} 1469 1470/* 1471 * This function has to be called at least every 64 seconds. The bus_time 1472 * field stores not only the upper 25 bits of the BUS_TIME register but also 1473 * the most significant bit of the cycle timer in bit 6 so that we can detect 1474 * changes in this bit. 1475 */ 1476static u32 update_bus_time(struct fw_ohci *ohci) 1477{ 1478 u32 cycle_time_seconds = get_cycle_time(ohci) >> 25; 1479 1480 if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40)) 1481 ohci->bus_time += 0x40; 1482 1483 return ohci->bus_time | cycle_time_seconds; 1484} 1485 1486static void bus_reset_tasklet(unsigned long data) 1487{ 1488 struct fw_ohci *ohci = (struct fw_ohci *)data; 1489 int self_id_count, i, j, reg; 1490 int generation, new_generation; 1491 unsigned long flags; 1492 void *free_rom = NULL; 1493 dma_addr_t free_rom_bus = 0; 1494 bool is_new_root; 1495 1496 reg = reg_read(ohci, OHCI1394_NodeID); 1497 if (!(reg & OHCI1394_NodeID_idValid)) { 1498 fw_notify("node ID not valid, new bus reset in progress\n"); 1499 return; 1500 } 1501 if ((reg & OHCI1394_NodeID_nodeNumber) == 63) { 1502 fw_notify("malconfigured bus\n"); 1503 return; 1504 } 1505 ohci->node_id = reg & (OHCI1394_NodeID_busNumber | 1506 OHCI1394_NodeID_nodeNumber); 1507 1508 is_new_root = (reg & OHCI1394_NodeID_root) != 0; 1509 if (!(ohci->is_root && is_new_root)) 1510 reg_write(ohci, OHCI1394_LinkControlSet, 1511 OHCI1394_LinkControl_cycleMaster); 1512 ohci->is_root = is_new_root; 1513 1514 reg = reg_read(ohci, OHCI1394_SelfIDCount); 1515 if (reg & OHCI1394_SelfIDCount_selfIDError) { 1516 fw_notify("inconsistent self IDs\n"); 1517 return; 1518 } 1519 /* 1520 * The count in the SelfIDCount register is the number of 1521 * bytes in the self ID receive buffer. Since we also receive 1522 * the inverted quadlets and a header quadlet, we shift one 1523 * bit extra to get the actual number of self IDs. 1524 */ 1525 self_id_count = (reg >> 3) & 0xff; 1526 if (self_id_count == 0 || self_id_count > 252) { 1527 fw_notify("inconsistent self IDs\n"); 1528 return; 1529 } 1530 generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff; 1531 rmb(); 1532 1533 for (i = 1, j = 0; j < self_id_count; i += 2, j++) { 1534 if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) { 1535 fw_notify("inconsistent self IDs\n"); 1536 return; 1537 } 1538 ohci->self_id_buffer[j] = 1539 cond_le32_to_cpu(ohci->self_id_cpu[i]); 1540 } 1541 rmb(); 1542 1543 /* 1544 * Check the consistency of the self IDs we just read. The 1545 * problem we face is that a new bus reset can start while we 1546 * read out the self IDs from the DMA buffer. If this happens, 1547 * the DMA buffer will be overwritten with new self IDs and we 1548 * will read out inconsistent data. The OHCI specification 1549 * (section 11.2) recommends a technique similar to 1550 * linux/seqlock.h, where we remember the generation of the 1551 * self IDs in the buffer before reading them out and compare 1552 * it to the current generation after reading them out. If 1553 * the two generations match we know we have a consistent set 1554 * of self IDs. 1555 */ 1556 1557 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff; 1558 if (new_generation != generation) { 1559 fw_notify("recursive bus reset detected, " 1560 "discarding self ids\n"); 1561 return; 1562 } 1563 1564 spin_lock_irqsave(&ohci->lock, flags); 1565 1566 ohci->generation = generation; 1567 context_stop(&ohci->at_request_ctx); 1568 context_stop(&ohci->at_response_ctx); 1569 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); 1570 1571 if (ohci->quirks & QUIRK_RESET_PACKET) 1572 ohci->request_generation = generation; 1573 1574 /* 1575 * This next bit is unrelated to the AT context stuff but we 1576 * have to do it under the spinlock also. If a new config rom 1577 * was set up before this reset, the old one is now no longer 1578 * in use and we can free it. Update the config rom pointers 1579 * to point to the current config rom and clear the 1580 * next_config_rom pointer so a new update can take place. 1581 */ 1582 1583 if (ohci->next_config_rom != NULL) { 1584 if (ohci->next_config_rom != ohci->config_rom) { 1585 free_rom = ohci->config_rom; 1586 free_rom_bus = ohci->config_rom_bus; 1587 } 1588 ohci->config_rom = ohci->next_config_rom; 1589 ohci->config_rom_bus = ohci->next_config_rom_bus; 1590 ohci->next_config_rom = NULL; 1591 1592 /* 1593 * Restore config_rom image and manually update 1594 * config_rom registers. Writing the header quadlet 1595 * will indicate that the config rom is ready, so we 1596 * do that last. 1597 */ 1598 reg_write(ohci, OHCI1394_BusOptions, 1599 be32_to_cpu(ohci->config_rom[2])); 1600 ohci->config_rom[0] = ohci->next_header; 1601 reg_write(ohci, OHCI1394_ConfigROMhdr, 1602 be32_to_cpu(ohci->next_header)); 1603 } 1604 1605#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA 1606 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0); 1607 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0); 1608#endif 1609 1610 spin_unlock_irqrestore(&ohci->lock, flags); 1611 1612 if (free_rom) 1613 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 1614 free_rom, free_rom_bus); 1615 1616 log_selfids(ohci->node_id, generation, 1617 self_id_count, ohci->self_id_buffer); 1618 1619 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation, 1620 self_id_count, ohci->self_id_buffer, 1621 ohci->csr_state_setclear_abdicate); 1622 ohci->csr_state_setclear_abdicate = false; 1623} 1624 1625static irqreturn_t irq_handler(int irq, void *data) 1626{ 1627 struct fw_ohci *ohci = data; 1628 u32 event, iso_event; 1629 int i; 1630 1631 event = reg_read(ohci, OHCI1394_IntEventClear); 1632 1633 if (!event || !~event) 1634 return IRQ_NONE; 1635 1636 /* busReset must not be cleared yet, see OHCI 1.1 clause 7.2.3.2 */ 1637 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset); 1638 log_irqs(event); 1639 1640 if (event & OHCI1394_selfIDComplete) 1641 tasklet_schedule(&ohci->bus_reset_tasklet); 1642 1643 if (event & OHCI1394_RQPkt) 1644 tasklet_schedule(&ohci->ar_request_ctx.tasklet); 1645 1646 if (event & OHCI1394_RSPkt) 1647 tasklet_schedule(&ohci->ar_response_ctx.tasklet); 1648 1649 if (event & OHCI1394_reqTxComplete) 1650 tasklet_schedule(&ohci->at_request_ctx.tasklet); 1651 1652 if (event & OHCI1394_respTxComplete) 1653 tasklet_schedule(&ohci->at_response_ctx.tasklet); 1654 1655 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear); 1656 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event); 1657 1658 while (iso_event) { 1659 i = ffs(iso_event) - 1; 1660 tasklet_schedule(&ohci->ir_context_list[i].context.tasklet); 1661 iso_event &= ~(1 << i); 1662 } 1663 1664 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear); 1665 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event); 1666 1667 while (iso_event) { 1668 i = ffs(iso_event) - 1; 1669 tasklet_schedule(&ohci->it_context_list[i].context.tasklet); 1670 iso_event &= ~(1 << i); 1671 } 1672 1673 if (unlikely(event & OHCI1394_regAccessFail)) 1674 fw_error("Register access failure - " 1675 "please notify linux1394-devel@lists.sf.net\n"); 1676 1677 if (unlikely(event & OHCI1394_postedWriteErr)) 1678 fw_error("PCI posted write error\n"); 1679 1680 if (unlikely(event & OHCI1394_cycleTooLong)) { 1681 if (printk_ratelimit()) 1682 fw_notify("isochronous cycle too long\n"); 1683 reg_write(ohci, OHCI1394_LinkControlSet, 1684 OHCI1394_LinkControl_cycleMaster); 1685 } 1686 1687 if (unlikely(event & OHCI1394_cycleInconsistent)) { 1688 if (printk_ratelimit()) 1689 fw_notify("isochronous cycle inconsistent\n"); 1690 } 1691 1692 if (event & OHCI1394_cycle64Seconds) { 1693 spin_lock(&ohci->lock); 1694 update_bus_time(ohci); 1695 spin_unlock(&ohci->lock); 1696 } 1697 1698 return IRQ_HANDLED; 1699} 1700 1701static int software_reset(struct fw_ohci *ohci) 1702{ 1703 int i; 1704 1705 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset); 1706 1707 for (i = 0; i < OHCI_LOOP_COUNT; i++) { 1708 if ((reg_read(ohci, OHCI1394_HCControlSet) & 1709 OHCI1394_HCControl_softReset) == 0) 1710 return 0; 1711 msleep(1); 1712 } 1713 1714 return -EBUSY; 1715} 1716 1717static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length) 1718{ 1719 size_t size = length * 4; 1720 1721 memcpy(dest, src, size); 1722 if (size < CONFIG_ROM_SIZE) 1723 memset(&dest[length], 0, CONFIG_ROM_SIZE - size); 1724} 1725 1726static int configure_1394a_enhancements(struct fw_ohci *ohci) 1727{ 1728 bool enable_1394a; 1729 int ret, clear, set, offset; 1730 1731 /* Check if the driver should configure link and PHY. */ 1732 if (!(reg_read(ohci, OHCI1394_HCControlSet) & 1733 OHCI1394_HCControl_programPhyEnable)) 1734 return 0; 1735 1736 /* Paranoia: check whether the PHY supports 1394a, too. */ 1737 enable_1394a = false; 1738 ret = read_phy_reg(ohci, 2); 1739 if (ret < 0) 1740 return ret; 1741 if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) { 1742 ret = read_paged_phy_reg(ohci, 1, 8); 1743 if (ret < 0) 1744 return ret; 1745 if (ret >= 1) 1746 enable_1394a = true; 1747 } 1748 1749 if (ohci->quirks & QUIRK_NO_1394A) 1750 enable_1394a = false; 1751 1752 /* Configure PHY and link consistently. */ 1753 if (enable_1394a) { 1754 clear = 0; 1755 set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI; 1756 } else { 1757 clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI; 1758 set = 0; 1759 } 1760 ret = update_phy_reg(ohci, 5, clear, set); 1761 if (ret < 0) 1762 return ret; 1763 1764 if (enable_1394a) 1765 offset = OHCI1394_HCControlSet; 1766 else 1767 offset = OHCI1394_HCControlClear; 1768 reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable); 1769 1770 /* Clean up: configuration has been taken care of. */ 1771 reg_write(ohci, OHCI1394_HCControlClear, 1772 OHCI1394_HCControl_programPhyEnable); 1773 1774 return 0; 1775} 1776 1777static int ohci_enable(struct fw_card *card, 1778 const __be32 *config_rom, size_t length) 1779{ 1780 struct fw_ohci *ohci = fw_ohci(card); 1781 struct pci_dev *dev = to_pci_dev(card->device); 1782 u32 lps, seconds, version, irqs; 1783 int i, ret; 1784 1785 if (software_reset(ohci)) { 1786 fw_error("Failed to reset ohci card.\n"); 1787 return -EBUSY; 1788 } 1789 1790 /* 1791 * Now enable LPS, which we need in order to start accessing 1792 * most of the registers. In fact, on some cards (ALI M5251), 1793 * accessing registers in the SClk domain without LPS enabled 1794 * will lock up the machine. Wait 50msec to make sure we have 1795 * full link enabled. However, with some cards (well, at least 1796 * a JMicron PCIe card), we have to try again sometimes. 1797 */ 1798 reg_write(ohci, OHCI1394_HCControlSet, 1799 OHCI1394_HCControl_LPS | 1800 OHCI1394_HCControl_postedWriteEnable); 1801 flush_writes(ohci); 1802 1803 for (lps = 0, i = 0; !lps && i < 3; i++) { 1804 msleep(50); 1805 lps = reg_read(ohci, OHCI1394_HCControlSet) & 1806 OHCI1394_HCControl_LPS; 1807 } 1808 1809 if (!lps) { 1810 fw_error("Failed to set Link Power Status\n"); 1811 return -EIO; 1812 } 1813 1814 reg_write(ohci, OHCI1394_HCControlClear, 1815 OHCI1394_HCControl_noByteSwapData); 1816 1817 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus); 1818 reg_write(ohci, OHCI1394_LinkControlSet, 1819 OHCI1394_LinkControl_rcvSelfID | 1820 OHCI1394_LinkControl_rcvPhyPkt | 1821 OHCI1394_LinkControl_cycleTimerEnable | 1822 OHCI1394_LinkControl_cycleMaster); 1823 1824 reg_write(ohci, OHCI1394_ATRetries, 1825 OHCI1394_MAX_AT_REQ_RETRIES | 1826 (OHCI1394_MAX_AT_RESP_RETRIES << 4) | 1827 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) | 1828 (200 << 16)); 1829 1830 seconds = lower_32_bits(get_seconds()); 1831 reg_write(ohci, OHCI1394_IsochronousCycleTimer, seconds << 25); 1832 ohci->bus_time = seconds & ~0x3f; 1833 1834 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; 1835 if (version >= OHCI_VERSION_1_1) { 1836 reg_write(ohci, OHCI1394_InitialChannelsAvailableHi, 1837 0xfffffffe); 1838 card->broadcast_channel_auto_allocated = true; 1839 } 1840 1841 /* Get implemented bits of the priority arbitration request counter. */ 1842 reg_write(ohci, OHCI1394_FairnessControl, 0x3f); 1843 ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f; 1844 reg_write(ohci, OHCI1394_FairnessControl, 0); 1845 card->priority_budget_implemented = ohci->pri_req_max != 0; 1846 1847 ar_context_run(&ohci->ar_request_ctx); 1848 ar_context_run(&ohci->ar_response_ctx); 1849 1850 reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000); 1851 reg_write(ohci, OHCI1394_IntEventClear, ~0); 1852 reg_write(ohci, OHCI1394_IntMaskClear, ~0); 1853 1854 ret = configure_1394a_enhancements(ohci); 1855 if (ret < 0) 1856 return ret; 1857 1858 /* Activate link_on bit and contender bit in our self ID packets.*/ 1859 ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER); 1860 if (ret < 0) 1861 return ret; 1862 1863 1864 if (config_rom) { 1865 ohci->next_config_rom = 1866 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, 1867 &ohci->next_config_rom_bus, 1868 GFP_KERNEL); 1869 if (ohci->next_config_rom == NULL) 1870 return -ENOMEM; 1871 1872 copy_config_rom(ohci->next_config_rom, config_rom, length); 1873 } else { 1874 /* 1875 * In the suspend case, config_rom is NULL, which 1876 * means that we just reuse the old config rom. 1877 */ 1878 ohci->next_config_rom = ohci->config_rom; 1879 ohci->next_config_rom_bus = ohci->config_rom_bus; 1880 } 1881 1882 ohci->next_header = ohci->next_config_rom[0]; 1883 ohci->next_config_rom[0] = 0; 1884 reg_write(ohci, OHCI1394_ConfigROMhdr, 0); 1885 reg_write(ohci, OHCI1394_BusOptions, 1886 be32_to_cpu(ohci->next_config_rom[2])); 1887 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); 1888 1889 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); 1890 1891 if (!(ohci->quirks & QUIRK_NO_MSI)) 1892 pci_enable_msi(dev); 1893 if (request_irq(dev->irq, irq_handler, 1894 pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, 1895 ohci_driver_name, ohci)) { 1896 fw_error("Failed to allocate interrupt %d.\n", dev->irq); 1897 pci_disable_msi(dev); 1898 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 1899 ohci->config_rom, ohci->config_rom_bus); 1900 return -EIO; 1901 } 1902 1903 irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete | 1904 OHCI1394_RQPkt | OHCI1394_RSPkt | 1905 OHCI1394_isochTx | OHCI1394_isochRx | 1906 OHCI1394_postedWriteErr | 1907 OHCI1394_selfIDComplete | 1908 OHCI1394_regAccessFail | 1909 OHCI1394_cycle64Seconds | 1910 OHCI1394_cycleInconsistent | OHCI1394_cycleTooLong | 1911 OHCI1394_masterIntEnable; 1912 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) 1913 irqs |= OHCI1394_busReset; 1914 reg_write(ohci, OHCI1394_IntMaskSet, irqs); 1915 1916 reg_write(ohci, OHCI1394_HCControlSet, 1917 OHCI1394_HCControl_linkEnable | 1918 OHCI1394_HCControl_BIBimageValid); 1919 flush_writes(ohci); 1920 1921 /* We are ready to go, reset bus to finish initialization. */ 1922 fw_schedule_bus_reset(&ohci->card, false, true); 1923 1924 return 0; 1925} 1926 1927static int ohci_set_config_rom(struct fw_card *card, 1928 const __be32 *config_rom, size_t length) 1929{ 1930 struct fw_ohci *ohci; 1931 unsigned long flags; 1932 int ret = -EBUSY; 1933 __be32 *next_config_rom; 1934 dma_addr_t uninitialized_var(next_config_rom_bus); 1935 1936 ohci = fw_ohci(card); 1937 1938 1939 next_config_rom = 1940 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, 1941 &next_config_rom_bus, GFP_KERNEL); 1942 if (next_config_rom == NULL) 1943 return -ENOMEM; 1944 1945 spin_lock_irqsave(&ohci->lock, flags); 1946 1947 if (ohci->next_config_rom == NULL) { 1948 ohci->next_config_rom = next_config_rom; 1949 ohci->next_config_rom_bus = next_config_rom_bus; 1950 1951 copy_config_rom(ohci->next_config_rom, config_rom, length); 1952 1953 ohci->next_header = config_rom[0]; 1954 ohci->next_config_rom[0] = 0; 1955 1956 reg_write(ohci, OHCI1394_ConfigROMmap, 1957 ohci->next_config_rom_bus); 1958 ret = 0; 1959 } 1960 1961 spin_unlock_irqrestore(&ohci->lock, flags); 1962 1963 /* 1964 * Now initiate a bus reset to have the changes take 1965 * effect. We clean up the old config rom memory and DMA 1966 * mappings in the bus reset tasklet, since the OHCI 1967 * controller could need to access it before the bus reset 1968 * takes effect. 1969 */ 1970 if (ret == 0) 1971 fw_schedule_bus_reset(&ohci->card, true, true); 1972 else 1973 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 1974 next_config_rom, next_config_rom_bus); 1975 1976 return ret; 1977} 1978 1979static void ohci_send_request(struct fw_card *card, struct fw_packet *packet) 1980{ 1981 struct fw_ohci *ohci = fw_ohci(card); 1982 1983 at_context_transmit(&ohci->at_request_ctx, packet); 1984} 1985 1986static void ohci_send_response(struct fw_card *card, struct fw_packet *packet) 1987{ 1988 struct fw_ohci *ohci = fw_ohci(card); 1989 1990 at_context_transmit(&ohci->at_response_ctx, packet); 1991} 1992 1993static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet) 1994{ 1995 struct fw_ohci *ohci = fw_ohci(card); 1996 struct context *ctx = &ohci->at_request_ctx; 1997 struct driver_data *driver_data = packet->driver_data; 1998 int ret = -ENOENT; 1999 2000 tasklet_disable(&ctx->tasklet); 2001 2002 if (packet->ack != 0) 2003 goto out; 2004 2005 if (packet->payload_mapped) 2006 dma_unmap_single(ohci->card.device, packet->payload_bus, 2007 packet->payload_length, DMA_TO_DEVICE); 2008 2009 log_ar_at_event('T', packet->speed, packet->header, 0x20); 2010 driver_data->packet = NULL; 2011 packet->ack = RCODE_CANCELLED; 2012 packet->callback(packet, &ohci->card, packet->ack); 2013 ret = 0; 2014 out: 2015 tasklet_enable(&ctx->tasklet); 2016 2017 return ret; 2018} 2019 2020static int ohci_enable_phys_dma(struct fw_card *card, 2021 int node_id, int generation) 2022{ 2023#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA 2024 return 0; 2025#else 2026 struct fw_ohci *ohci = fw_ohci(card); 2027 unsigned long flags; 2028 int n, ret = 0; 2029 2030 2031 spin_lock_irqsave(&ohci->lock, flags); 2032 2033 if (ohci->generation != generation) { 2034 ret = -ESTALE; 2035 goto out; 2036 } 2037 2038 /* 2039 * Note, if the node ID contains a non-local bus ID, physical DMA is 2040 * enabled for _all_ nodes on remote buses. 2041 */ 2042 2043 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63; 2044 if (n < 32) 2045 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n); 2046 else 2047 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32)); 2048 2049 flush_writes(ohci); 2050 out: 2051 spin_unlock_irqrestore(&ohci->lock, flags); 2052 2053 return ret; 2054#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ 2055} 2056 2057static u32 ohci_read_csr(struct fw_card *card, int csr_offset) 2058{ 2059 struct fw_ohci *ohci = fw_ohci(card); 2060 unsigned long flags; 2061 u32 value; 2062 2063 switch (csr_offset) { 2064 case CSR_STATE_CLEAR: 2065 case CSR_STATE_SET: 2066 if (ohci->is_root && 2067 (reg_read(ohci, OHCI1394_LinkControlSet) & 2068 OHCI1394_LinkControl_cycleMaster)) 2069 value = CSR_STATE_BIT_CMSTR; 2070 else 2071 value = 0; 2072 if (ohci->csr_state_setclear_abdicate) 2073 value |= CSR_STATE_BIT_ABDICATE; 2074 2075 return value; 2076 2077 case CSR_NODE_IDS: 2078 return reg_read(ohci, OHCI1394_NodeID) << 16; 2079 2080 case CSR_CYCLE_TIME: 2081 return get_cycle_time(ohci); 2082 2083 case CSR_BUS_TIME: 2084 /* 2085 * We might be called just after the cycle timer has wrapped 2086 * around but just before the cycle64Seconds handler, so we 2087 * better check here, too, if the bus time needs to be updated. 2088 */ 2089 spin_lock_irqsave(&ohci->lock, flags); 2090 value = update_bus_time(ohci); 2091 spin_unlock_irqrestore(&ohci->lock, flags); 2092 return value; 2093 2094 case CSR_BUSY_TIMEOUT: 2095 value = reg_read(ohci, OHCI1394_ATRetries); 2096 return (value >> 4) & 0x0ffff00f; 2097 2098 case CSR_PRIORITY_BUDGET: 2099 return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) | 2100 (ohci->pri_req_max << 8); 2101 2102 default: 2103 WARN_ON(1); 2104 return 0; 2105 } 2106} 2107 2108static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value) 2109{ 2110 struct fw_ohci *ohci = fw_ohci(card); 2111 unsigned long flags; 2112 2113 switch (csr_offset) { 2114 case CSR_STATE_CLEAR: 2115 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { 2116 reg_write(ohci, OHCI1394_LinkControlClear, 2117 OHCI1394_LinkControl_cycleMaster); 2118 flush_writes(ohci); 2119 } 2120 if (value & CSR_STATE_BIT_ABDICATE) 2121 ohci->csr_state_setclear_abdicate = false; 2122 break; 2123 2124 case CSR_STATE_SET: 2125 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { 2126 reg_write(ohci, OHCI1394_LinkControlSet, 2127 OHCI1394_LinkControl_cycleMaster); 2128 flush_writes(ohci); 2129 } 2130 if (value & CSR_STATE_BIT_ABDICATE) 2131 ohci->csr_state_setclear_abdicate = true; 2132 break; 2133 2134 case CSR_NODE_IDS: 2135 reg_write(ohci, OHCI1394_NodeID, value >> 16); 2136 flush_writes(ohci); 2137 break; 2138 2139 case CSR_CYCLE_TIME: 2140 reg_write(ohci, OHCI1394_IsochronousCycleTimer, value); 2141 reg_write(ohci, OHCI1394_IntEventSet, 2142 OHCI1394_cycleInconsistent); 2143 flush_writes(ohci); 2144 break; 2145 2146 case CSR_BUS_TIME: 2147 spin_lock_irqsave(&ohci->lock, flags); 2148 ohci->bus_time = (ohci->bus_time & 0x7f) | (value & ~0x7f); 2149 spin_unlock_irqrestore(&ohci->lock, flags); 2150 break; 2151 2152 case CSR_BUSY_TIMEOUT: 2153 value = (value & 0xf) | ((value & 0xf) << 4) | 2154 ((value & 0xf) << 8) | ((value & 0x0ffff000) << 4); 2155 reg_write(ohci, OHCI1394_ATRetries, value); 2156 flush_writes(ohci); 2157 break; 2158 2159 case CSR_PRIORITY_BUDGET: 2160 reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f); 2161 flush_writes(ohci); 2162 break; 2163 2164 default: 2165 WARN_ON(1); 2166 break; 2167 } 2168} 2169 2170static void copy_iso_headers(struct iso_context *ctx, void *p) 2171{ 2172 int i = ctx->header_length; 2173 2174 if (i + ctx->base.header_size > PAGE_SIZE) 2175 return; 2176 2177 /* 2178 * The iso header is byteswapped to little endian by 2179 * the controller, but the remaining header quadlets 2180 * are big endian. We want to present all the headers 2181 * as big endian, so we have to swap the first quadlet. 2182 */ 2183 if (ctx->base.header_size > 0) 2184 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4)); 2185 if (ctx->base.header_size > 4) 2186 *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p); 2187 if (ctx->base.header_size > 8) 2188 memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8); 2189 ctx->header_length += ctx->base.header_size; 2190} 2191 2192static int handle_ir_packet_per_buffer(struct context *context, 2193 struct descriptor *d, 2194 struct descriptor *last) 2195{ 2196 struct iso_context *ctx = 2197 container_of(context, struct iso_context, context); 2198 struct descriptor *pd; 2199 __le32 *ir_header; 2200 void *p; 2201 2202 for (pd = d; pd <= last; pd++) 2203 if (pd->transfer_status) 2204 break; 2205 if (pd > last) 2206 /* Descriptor(s) not done yet, stop iteration */ 2207 return 0; 2208 2209 p = last + 1; 2210 copy_iso_headers(ctx, p); 2211 2212 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { 2213 ir_header = (__le32 *) p; 2214 ctx->base.callback.sc(&ctx->base, 2215 le32_to_cpu(ir_header[0]) & 0xffff, 2216 ctx->header_length, ctx->header, 2217 ctx->base.callback_data); 2218 ctx->header_length = 0; 2219 } 2220 2221 return 1; 2222} 2223 2224/* d == last because each descriptor block is only a single descriptor. */ 2225static int handle_ir_buffer_fill(struct context *context, 2226 struct descriptor *d, 2227 struct descriptor *last) 2228{ 2229 struct iso_context *ctx = 2230 container_of(context, struct iso_context, context); 2231 2232 if (!last->transfer_status) 2233 /* Descriptor(s) not done yet, stop iteration */ 2234 return 0; 2235 2236 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) 2237 ctx->base.callback.mc(&ctx->base, 2238 le32_to_cpu(last->data_address) + 2239 le16_to_cpu(last->req_count) - 2240 le16_to_cpu(last->res_count), 2241 ctx->base.callback_data); 2242 2243 return 1; 2244} 2245 2246static int handle_it_packet(struct context *context, 2247 struct descriptor *d, 2248 struct descriptor *last) 2249{ 2250 struct iso_context *ctx = 2251 container_of(context, struct iso_context, context); 2252 int i; 2253 struct descriptor *pd; 2254 2255 for (pd = d; pd <= last; pd++) 2256 if (pd->transfer_status) 2257 break; 2258 if (pd > last) 2259 /* Descriptor(s) not done yet, stop iteration */ 2260 return 0; 2261 2262 i = ctx->header_length; 2263 if (i + 4 < PAGE_SIZE) { 2264 /* Present this value as big-endian to match the receive code */ 2265 *(__be32 *)(ctx->header + i) = cpu_to_be32( 2266 ((u32)le16_to_cpu(pd->transfer_status) << 16) | 2267 le16_to_cpu(pd->res_count)); 2268 ctx->header_length += 4; 2269 } 2270 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { 2271 ctx->base.callback.sc(&ctx->base, le16_to_cpu(last->res_count), 2272 ctx->header_length, ctx->header, 2273 ctx->base.callback_data); 2274 ctx->header_length = 0; 2275 } 2276 return 1; 2277} 2278 2279static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels) 2280{ 2281 u32 hi = channels >> 32, lo = channels; 2282 2283 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi); 2284 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo); 2285 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi); 2286 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo); 2287 mmiowb(); 2288 ohci->mc_channels = channels; 2289} 2290 2291static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, 2292 int type, int channel, size_t header_size) 2293{ 2294 struct fw_ohci *ohci = fw_ohci(card); 2295 struct iso_context *uninitialized_var(ctx); 2296 descriptor_callback_t uninitialized_var(callback); 2297 u64 *uninitialized_var(channels); 2298 u32 *uninitialized_var(mask), uninitialized_var(regs); 2299 unsigned long flags; 2300 int index, ret = -EBUSY; 2301 2302 spin_lock_irqsave(&ohci->lock, flags); 2303 2304 switch (type) { 2305 case FW_ISO_CONTEXT_TRANSMIT: 2306 mask = &ohci->it_context_mask; 2307 callback = handle_it_packet; 2308 index = ffs(*mask) - 1; 2309 if (index >= 0) { 2310 *mask &= ~(1 << index); 2311 regs = OHCI1394_IsoXmitContextBase(index); 2312 ctx = &ohci->it_context_list[index]; 2313 } 2314 break; 2315 2316 case FW_ISO_CONTEXT_RECEIVE: 2317 channels = &ohci->ir_context_channels; 2318 mask = &ohci->ir_context_mask; 2319 callback = handle_ir_packet_per_buffer; 2320 index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; 2321 if (index >= 0) { 2322 *channels &= ~(1ULL << channel); 2323 *mask &= ~(1 << index); 2324 regs = OHCI1394_IsoRcvContextBase(index); 2325 ctx = &ohci->ir_context_list[index]; 2326 } 2327 break; 2328 2329 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 2330 mask = &ohci->ir_context_mask; 2331 callback = handle_ir_buffer_fill; 2332 index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1; 2333 if (index >= 0) { 2334 ohci->mc_allocated = true; 2335 *mask &= ~(1 << index); 2336 regs = OHCI1394_IsoRcvContextBase(index); 2337 ctx = &ohci->ir_context_list[index]; 2338 } 2339 break; 2340 2341 default: 2342 index = -1; 2343 ret = -ENOSYS; 2344 } 2345 2346 spin_unlock_irqrestore(&ohci->lock, flags); 2347 2348 if (index < 0) 2349 return ERR_PTR(ret); 2350 2351 memset(ctx, 0, sizeof(*ctx)); 2352 ctx->header_length = 0; 2353 ctx->header = (void *) __get_free_page(GFP_KERNEL); 2354 if (ctx->header == NULL) { 2355 ret = -ENOMEM; 2356 goto out; 2357 } 2358 ret = context_init(&ctx->context, ohci, regs, callback); 2359 if (ret < 0) 2360 goto out_with_header; 2361 2362 if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) 2363 set_multichannel_mask(ohci, 0); 2364 2365 return &ctx->base; 2366 2367 out_with_header: 2368 free_page((unsigned long)ctx->header); 2369 out: 2370 spin_lock_irqsave(&ohci->lock, flags); 2371 2372 switch (type) { 2373 case FW_ISO_CONTEXT_RECEIVE: 2374 *channels |= 1ULL << channel; 2375 break; 2376 2377 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 2378 ohci->mc_allocated = false; 2379 break; 2380 } 2381 *mask |= 1 << index; 2382 2383 spin_unlock_irqrestore(&ohci->lock, flags); 2384 2385 return ERR_PTR(ret); 2386} 2387 2388static int ohci_start_iso(struct fw_iso_context *base, 2389 s32 cycle, u32 sync, u32 tags) 2390{ 2391 struct iso_context *ctx = container_of(base, struct iso_context, base); 2392 struct fw_ohci *ohci = ctx->context.ohci; 2393 u32 control = IR_CONTEXT_ISOCH_HEADER, match; 2394 int index; 2395 2396 switch (ctx->base.type) { 2397 case FW_ISO_CONTEXT_TRANSMIT: 2398 index = ctx - ohci->it_context_list; 2399 match = 0; 2400 if (cycle >= 0) 2401 match = IT_CONTEXT_CYCLE_MATCH_ENABLE | 2402 (cycle & 0x7fff) << 16; 2403 2404 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index); 2405 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index); 2406 context_run(&ctx->context, match); 2407 break; 2408 2409 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 2410 control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE; 2411 /* fall through */ 2412 case FW_ISO_CONTEXT_RECEIVE: 2413 index = ctx - ohci->ir_context_list; 2414 match = (tags << 28) | (sync << 8) | ctx->base.channel; 2415 if (cycle >= 0) { 2416 match |= (cycle & 0x07fff) << 12; 2417 control |= IR_CONTEXT_CYCLE_MATCH_ENABLE; 2418 } 2419 2420 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index); 2421 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); 2422 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); 2423 context_run(&ctx->context, control); 2424 break; 2425 } 2426 2427 return 0; 2428} 2429 2430static int ohci_stop_iso(struct fw_iso_context *base) 2431{ 2432 struct fw_ohci *ohci = fw_ohci(base->card); 2433 struct iso_context *ctx = container_of(base, struct iso_context, base); 2434 int index; 2435 2436 switch (ctx->base.type) { 2437 case FW_ISO_CONTEXT_TRANSMIT: 2438 index = ctx - ohci->it_context_list; 2439 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index); 2440 break; 2441 2442 case FW_ISO_CONTEXT_RECEIVE: 2443 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 2444 index = ctx - ohci->ir_context_list; 2445 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index); 2446 break; 2447 } 2448 flush_writes(ohci); 2449 context_stop(&ctx->context); 2450 2451 return 0; 2452} 2453 2454static void ohci_free_iso_context(struct fw_iso_context *base) 2455{ 2456 struct fw_ohci *ohci = fw_ohci(base->card); 2457 struct iso_context *ctx = container_of(base, struct iso_context, base); 2458 unsigned long flags; 2459 int index; 2460 2461 ohci_stop_iso(base); 2462 context_release(&ctx->context); 2463 free_page((unsigned long)ctx->header); 2464 2465 spin_lock_irqsave(&ohci->lock, flags); 2466 2467 switch (base->type) { 2468 case FW_ISO_CONTEXT_TRANSMIT: 2469 index = ctx - ohci->it_context_list; 2470 ohci->it_context_mask |= 1 << index; 2471 break; 2472 2473 case FW_ISO_CONTEXT_RECEIVE: 2474 index = ctx - ohci->ir_context_list; 2475 ohci->ir_context_mask |= 1 << index; 2476 ohci->ir_context_channels |= 1ULL << base->channel; 2477 break; 2478 2479 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 2480 index = ctx - ohci->ir_context_list; 2481 ohci->ir_context_mask |= 1 << index; 2482 ohci->ir_context_channels |= ohci->mc_channels; 2483 ohci->mc_channels = 0; 2484 ohci->mc_allocated = false; 2485 break; 2486 } 2487 2488 spin_unlock_irqrestore(&ohci->lock, flags); 2489} 2490 2491static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels) 2492{ 2493 struct fw_ohci *ohci = fw_ohci(base->card); 2494 unsigned long flags; 2495 int ret; 2496 2497 switch (base->type) { 2498 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 2499 2500 spin_lock_irqsave(&ohci->lock, flags); 2501 2502 /* Don't allow multichannel to grab other contexts' channels. */ 2503 if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) { 2504 *channels = ohci->ir_context_channels; 2505 ret = -EBUSY; 2506 } else { 2507 set_multichannel_mask(ohci, *channels); 2508 ret = 0; 2509 } 2510 2511 spin_unlock_irqrestore(&ohci->lock, flags); 2512 2513 break; 2514 default: 2515 ret = -EINVAL; 2516 } 2517 2518 return ret; 2519} 2520 2521static int queue_iso_transmit(struct iso_context *ctx, 2522 struct fw_iso_packet *packet, 2523 struct fw_iso_buffer *buffer, 2524 unsigned long payload) 2525{ 2526 struct descriptor *d, *last, *pd; 2527 struct fw_iso_packet *p; 2528 __le32 *header; 2529 dma_addr_t d_bus, page_bus; 2530 u32 z, header_z, payload_z, irq; 2531 u32 payload_index, payload_end_index, next_page_index; 2532 int page, end_page, i, length, offset; 2533 2534 p = packet; 2535 payload_index = payload; 2536 2537 if (p->skip) 2538 z = 1; 2539 else 2540 z = 2; 2541 if (p->header_length > 0) 2542 z++; 2543 2544 /* Determine the first page the payload isn't contained in. */ 2545 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT; 2546 if (p->payload_length > 0) 2547 payload_z = end_page - (payload_index >> PAGE_SHIFT); 2548 else 2549 payload_z = 0; 2550 2551 z += payload_z; 2552 2553 /* Get header size in number of descriptors. */ 2554 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d)); 2555 2556 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus); 2557 if (d == NULL) 2558 return -ENOMEM; 2559 2560 if (!p->skip) { 2561 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); 2562 d[0].req_count = cpu_to_le16(8); 2563 d[0].branch_address = cpu_to_le32(d_bus | z); 2564 2565 header = (__le32 *) &d[1]; 2566 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) | 2567 IT_HEADER_TAG(p->tag) | 2568 IT_HEADER_TCODE(TCODE_STREAM_DATA) | 2569 IT_HEADER_CHANNEL(ctx->base.channel) | 2570 IT_HEADER_SPEED(ctx->base.speed)); 2571 header[1] = 2572 cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length + 2573 p->payload_length)); 2574 } 2575 2576 if (p->header_length > 0) { 2577 d[2].req_count = cpu_to_le16(p->header_length); 2578 d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d)); 2579 memcpy(&d[z], p->header, p->header_length); 2580 } 2581 2582 pd = d + z - payload_z; 2583 payload_end_index = payload_index + p->payload_length; 2584 for (i = 0; i < payload_z; i++) { 2585 page = payload_index >> PAGE_SHIFT; 2586 offset = payload_index & ~PAGE_MASK; 2587 next_page_index = (page + 1) << PAGE_SHIFT; 2588 length = 2589 min(next_page_index, payload_end_index) - payload_index; 2590 pd[i].req_count = cpu_to_le16(length); 2591 2592 page_bus = page_private(buffer->pages[page]); 2593 pd[i].data_address = cpu_to_le32(page_bus + offset); 2594 2595 payload_index += length; 2596 } 2597 2598 if (p->interrupt) 2599 irq = DESCRIPTOR_IRQ_ALWAYS; 2600 else 2601 irq = DESCRIPTOR_NO_IRQ; 2602 2603 last = z == 2 ? d : d + z - 1; 2604 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST | 2605 DESCRIPTOR_STATUS | 2606 DESCRIPTOR_BRANCH_ALWAYS | 2607 irq); 2608 2609 context_append(&ctx->context, d, z, header_z); 2610 2611 return 0; 2612} 2613 2614static int queue_iso_packet_per_buffer(struct iso_context *ctx, 2615 struct fw_iso_packet *packet, 2616 struct fw_iso_buffer *buffer, 2617 unsigned long payload) 2618{ 2619 struct descriptor *d, *pd; 2620 dma_addr_t d_bus, page_bus; 2621 u32 z, header_z, rest; 2622 int i, j, length; 2623 int page, offset, packet_count, header_size, payload_per_buffer; 2624 2625 /* 2626 * The OHCI controller puts the isochronous header and trailer in the 2627 * buffer, so we need at least 8 bytes. 2628 */ 2629 packet_count = packet->header_length / ctx->base.header_size; 2630 header_size = max(ctx->base.header_size, (size_t)8); 2631 2632 /* Get header size in number of descriptors. */ 2633 header_z = DIV_ROUND_UP(header_size, sizeof(*d)); 2634 page = payload >> PAGE_SHIFT; 2635 offset = payload & ~PAGE_MASK; 2636 payload_per_buffer = packet->payload_length / packet_count; 2637 2638 for (i = 0; i < packet_count; i++) { 2639 /* d points to the header descriptor */ 2640 z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1; 2641 d = context_get_descriptors(&ctx->context, 2642 z + header_z, &d_bus); 2643 if (d == NULL) 2644 return -ENOMEM; 2645 2646 d->control = cpu_to_le16(DESCRIPTOR_STATUS | 2647 DESCRIPTOR_INPUT_MORE); 2648 if (packet->skip && i == 0) 2649 d->control |= cpu_to_le16(DESCRIPTOR_WAIT); 2650 d->req_count = cpu_to_le16(header_size); 2651 d->res_count = d->req_count; 2652 d->transfer_status = 0; 2653 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d))); 2654 2655 rest = payload_per_buffer; 2656 pd = d; 2657 for (j = 1; j < z; j++) { 2658 pd++; 2659 pd->control = cpu_to_le16(DESCRIPTOR_STATUS | 2660 DESCRIPTOR_INPUT_MORE); 2661 2662 if (offset + rest < PAGE_SIZE) 2663 length = rest; 2664 else 2665 length = PAGE_SIZE - offset; 2666 pd->req_count = cpu_to_le16(length); 2667 pd->res_count = pd->req_count; 2668 pd->transfer_status = 0; 2669 2670 page_bus = page_private(buffer->pages[page]); 2671 pd->data_address = cpu_to_le32(page_bus + offset); 2672 2673 offset = (offset + length) & ~PAGE_MASK; 2674 rest -= length; 2675 if (offset == 0) 2676 page++; 2677 } 2678 pd->control = cpu_to_le16(DESCRIPTOR_STATUS | 2679 DESCRIPTOR_INPUT_LAST | 2680 DESCRIPTOR_BRANCH_ALWAYS); 2681 if (packet->interrupt && i == packet_count - 1) 2682 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); 2683 2684 context_append(&ctx->context, d, z, header_z); 2685 } 2686 2687 return 0; 2688} 2689 2690static int queue_iso_buffer_fill(struct iso_context *ctx, 2691 struct fw_iso_packet *packet, 2692 struct fw_iso_buffer *buffer, 2693 unsigned long payload) 2694{ 2695 struct descriptor *d; 2696 dma_addr_t d_bus, page_bus; 2697 int page, offset, rest, z, i, length; 2698 2699 page = payload >> PAGE_SHIFT; 2700 offset = payload & ~PAGE_MASK; 2701 rest = packet->payload_length; 2702 2703 /* We need one descriptor for each page in the buffer. */ 2704 z = DIV_ROUND_UP(offset + rest, PAGE_SIZE); 2705 2706 if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count)) 2707 return -EFAULT; 2708 2709 for (i = 0; i < z; i++) { 2710 d = context_get_descriptors(&ctx->context, 1, &d_bus); 2711 if (d == NULL) 2712 return -ENOMEM; 2713 2714 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | 2715 DESCRIPTOR_BRANCH_ALWAYS); 2716 if (packet->skip && i == 0) 2717 d->control |= cpu_to_le16(DESCRIPTOR_WAIT); 2718 if (packet->interrupt && i == z - 1) 2719 d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); 2720 2721 if (offset + rest < PAGE_SIZE) 2722 length = rest; 2723 else 2724 length = PAGE_SIZE - offset; 2725 d->req_count = cpu_to_le16(length); 2726 d->res_count = d->req_count; 2727 d->transfer_status = 0; 2728 2729 page_bus = page_private(buffer->pages[page]); 2730 d->data_address = cpu_to_le32(page_bus + offset); 2731 2732 rest -= length; 2733 offset = 0; 2734 page++; 2735 2736 context_append(&ctx->context, d, 1, 0); 2737 } 2738 2739 return 0; 2740} 2741 2742static int ohci_queue_iso(struct fw_iso_context *base, 2743 struct fw_iso_packet *packet, 2744 struct fw_iso_buffer *buffer, 2745 unsigned long payload) 2746{ 2747 struct iso_context *ctx = container_of(base, struct iso_context, base); 2748 unsigned long flags; 2749 int ret = -ENOSYS; 2750 2751 spin_lock_irqsave(&ctx->context.ohci->lock, flags); 2752 switch (base->type) { 2753 case FW_ISO_CONTEXT_TRANSMIT: 2754 ret = queue_iso_transmit(ctx, packet, buffer, payload); 2755 break; 2756 case FW_ISO_CONTEXT_RECEIVE: 2757 ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload); 2758 break; 2759 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 2760 ret = queue_iso_buffer_fill(ctx, packet, buffer, payload); 2761 break; 2762 } 2763 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); 2764 2765 return ret; 2766} 2767 2768static const struct fw_card_driver ohci_driver = { 2769 .enable = ohci_enable, 2770 .read_phy_reg = ohci_read_phy_reg, 2771 .update_phy_reg = ohci_update_phy_reg, 2772 .set_config_rom = ohci_set_config_rom, 2773 .send_request = ohci_send_request, 2774 .send_response = ohci_send_response, 2775 .cancel_packet = ohci_cancel_packet, 2776 .enable_phys_dma = ohci_enable_phys_dma, 2777 .read_csr = ohci_read_csr, 2778 .write_csr = ohci_write_csr, 2779 2780 .allocate_iso_context = ohci_allocate_iso_context, 2781 .free_iso_context = ohci_free_iso_context, 2782 .set_iso_channels = ohci_set_iso_channels, 2783 .queue_iso = ohci_queue_iso, 2784 .start_iso = ohci_start_iso, 2785 .stop_iso = ohci_stop_iso, 2786}; 2787 2788#ifdef CONFIG_PPC_PMAC 2789static void pmac_ohci_on(struct pci_dev *dev) 2790{ 2791 if (machine_is(powermac)) { 2792 struct device_node *ofn = pci_device_to_OF_node(dev); 2793 2794 if (ofn) { 2795 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1); 2796 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1); 2797 } 2798 } 2799} 2800 2801static void pmac_ohci_off(struct pci_dev *dev) 2802{ 2803 if (machine_is(powermac)) { 2804 struct device_node *ofn = pci_device_to_OF_node(dev); 2805 2806 if (ofn) { 2807 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0); 2808 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0); 2809 } 2810 } 2811} 2812#else 2813static inline void pmac_ohci_on(struct pci_dev *dev) {} 2814static inline void pmac_ohci_off(struct pci_dev *dev) {} 2815#endif /* CONFIG_PPC_PMAC */ 2816 2817static int __devinit pci_probe(struct pci_dev *dev, 2818 const struct pci_device_id *ent) 2819{ 2820 struct fw_ohci *ohci; 2821 u32 bus_options, max_receive, link_speed, version; 2822 u64 guid; 2823 int i, err, n_ir, n_it; 2824 size_t size; 2825 2826 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL); 2827 if (ohci == NULL) { 2828 err = -ENOMEM; 2829 goto fail; 2830 } 2831 2832 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev); 2833 2834 pmac_ohci_on(dev); 2835 2836 err = pci_enable_device(dev); 2837 if (err) { 2838 fw_error("Failed to enable OHCI hardware\n"); 2839 goto fail_free; 2840 } 2841 2842 pci_set_master(dev); 2843 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0); 2844 pci_set_drvdata(dev, ohci); 2845 2846 spin_lock_init(&ohci->lock); 2847 mutex_init(&ohci->phy_reg_mutex); 2848 2849 tasklet_init(&ohci->bus_reset_tasklet, 2850 bus_reset_tasklet, (unsigned long)ohci); 2851 2852 err = pci_request_region(dev, 0, ohci_driver_name); 2853 if (err) { 2854 fw_error("MMIO resource unavailable\n"); 2855 goto fail_disable; 2856 } 2857 2858 ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE); 2859 if (ohci->registers == NULL) { 2860 fw_error("Failed to remap registers\n"); 2861 err = -ENXIO; 2862 goto fail_iomem; 2863 } 2864 2865 for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++) 2866 if ((ohci_quirks[i].vendor == dev->vendor) && 2867 (ohci_quirks[i].device == (unsigned short)PCI_ANY_ID || 2868 ohci_quirks[i].device == dev->device) && 2869 (ohci_quirks[i].revision == (unsigned short)PCI_ANY_ID || 2870 ohci_quirks[i].revision >= dev->revision)) { 2871 ohci->quirks = ohci_quirks[i].flags; 2872 break; 2873 } 2874 if (param_quirks) 2875 ohci->quirks = param_quirks; 2876 2877 ar_context_init(&ohci->ar_request_ctx, ohci, 2878 OHCI1394_AsReqRcvContextControlSet); 2879 2880 ar_context_init(&ohci->ar_response_ctx, ohci, 2881 OHCI1394_AsRspRcvContextControlSet); 2882 2883 context_init(&ohci->at_request_ctx, ohci, 2884 OHCI1394_AsReqTrContextControlSet, handle_at_packet); 2885 2886 context_init(&ohci->at_response_ctx, ohci, 2887 OHCI1394_AsRspTrContextControlSet, handle_at_packet); 2888 2889 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0); 2890 ohci->ir_context_channels = ~0ULL; 2891 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet); 2892 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0); 2893 n_ir = hweight32(ohci->ir_context_mask); 2894 size = sizeof(struct iso_context) * n_ir; 2895 ohci->ir_context_list = kzalloc(size, GFP_KERNEL); 2896 2897 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); 2898 ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); 2899 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); 2900 n_it = hweight32(ohci->it_context_mask); 2901 size = sizeof(struct iso_context) * n_it; 2902 ohci->it_context_list = kzalloc(size, GFP_KERNEL); 2903 2904 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) { 2905 err = -ENOMEM; 2906 goto fail_contexts; 2907 } 2908 2909 /* self-id dma buffer allocation */ 2910 ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device, 2911 SELF_ID_BUF_SIZE, 2912 &ohci->self_id_bus, 2913 GFP_KERNEL); 2914 if (ohci->self_id_cpu == NULL) { 2915 err = -ENOMEM; 2916 goto fail_contexts; 2917 } 2918 2919 bus_options = reg_read(ohci, OHCI1394_BusOptions); 2920 max_receive = (bus_options >> 12) & 0xf; 2921 link_speed = bus_options & 0x7; 2922 guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) | 2923 reg_read(ohci, OHCI1394_GUIDLo); 2924 2925 err = fw_card_add(&ohci->card, max_receive, link_speed, guid); 2926 if (err) 2927 goto fail_self_id; 2928 2929 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; 2930 fw_notify("Added fw-ohci device %s, OHCI v%x.%x, " 2931 "%d IR + %d IT contexts, quirks 0x%x\n", 2932 dev_name(&dev->dev), version >> 16, version & 0xff, 2933 n_ir, n_it, ohci->quirks); 2934 2935 return 0; 2936 2937 fail_self_id: 2938 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE, 2939 ohci->self_id_cpu, ohci->self_id_bus); 2940 fail_contexts: 2941 kfree(ohci->ir_context_list); 2942 kfree(ohci->it_context_list); 2943 context_release(&ohci->at_response_ctx); 2944 context_release(&ohci->at_request_ctx); 2945 ar_context_release(&ohci->ar_response_ctx); 2946 ar_context_release(&ohci->ar_request_ctx); 2947 pci_iounmap(dev, ohci->registers); 2948 fail_iomem: 2949 pci_release_region(dev, 0); 2950 fail_disable: 2951 pci_disable_device(dev); 2952 fail_free: 2953 kfree(&ohci->card); 2954 pmac_ohci_off(dev); 2955 fail: 2956 if (err == -ENOMEM) 2957 fw_error("Out of memory\n"); 2958 2959 return err; 2960} 2961 2962static void pci_remove(struct pci_dev *dev) 2963{ 2964 struct fw_ohci *ohci; 2965 2966 ohci = pci_get_drvdata(dev); 2967 reg_write(ohci, OHCI1394_IntMaskClear, ~0); 2968 flush_writes(ohci); 2969 fw_core_remove_card(&ohci->card); 2970 2971 2972 software_reset(ohci); 2973 free_irq(dev->irq, ohci); 2974 2975 if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom) 2976 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 2977 ohci->next_config_rom, ohci->next_config_rom_bus); 2978 if (ohci->config_rom) 2979 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 2980 ohci->config_rom, ohci->config_rom_bus); 2981 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE, 2982 ohci->self_id_cpu, ohci->self_id_bus); 2983 ar_context_release(&ohci->ar_request_ctx); 2984 ar_context_release(&ohci->ar_response_ctx); 2985 context_release(&ohci->at_request_ctx); 2986 context_release(&ohci->at_response_ctx); 2987 kfree(ohci->it_context_list); 2988 kfree(ohci->ir_context_list); 2989 pci_disable_msi(dev); 2990 pci_iounmap(dev, ohci->registers); 2991 pci_release_region(dev, 0); 2992 pci_disable_device(dev); 2993 kfree(&ohci->card); 2994 pmac_ohci_off(dev); 2995 2996 fw_notify("Removed fw-ohci device.\n"); 2997} 2998 2999#ifdef CONFIG_PM 3000static int pci_suspend(struct pci_dev *dev, pm_message_t state) 3001{ 3002 struct fw_ohci *ohci = pci_get_drvdata(dev); 3003 int err; 3004 3005 software_reset(ohci); 3006 free_irq(dev->irq, ohci); 3007 pci_disable_msi(dev); 3008 err = pci_save_state(dev); 3009 if (err) { 3010 fw_error("pci_save_state failed\n"); 3011 return err; 3012 } 3013 err = pci_set_power_state(dev, pci_choose_state(dev, state)); 3014 if (err) 3015 fw_error("pci_set_power_state failed with %d\n", err); 3016 pmac_ohci_off(dev); 3017 3018 return 0; 3019} 3020 3021static int pci_resume(struct pci_dev *dev) 3022{ 3023 struct fw_ohci *ohci = pci_get_drvdata(dev); 3024 int err; 3025 3026 pmac_ohci_on(dev); 3027 pci_set_power_state(dev, PCI_D0); 3028 pci_restore_state(dev); 3029 err = pci_enable_device(dev); 3030 if (err) { 3031 fw_error("pci_enable_device failed\n"); 3032 return err; 3033 } 3034 3035 return ohci_enable(&ohci->card, NULL, 0); 3036} 3037#endif 3038 3039static const struct pci_device_id pci_table[] = { 3040 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) }, 3041 { } 3042}; 3043 3044MODULE_DEVICE_TABLE(pci, pci_table); 3045 3046static struct pci_driver fw_ohci_pci_driver = { 3047 .name = ohci_driver_name, 3048 .id_table = pci_table, 3049 .probe = pci_probe, 3050 .remove = pci_remove, 3051#ifdef CONFIG_PM 3052 .resume = pci_resume, 3053 .suspend = pci_suspend, 3054#endif 3055}; 3056 3057MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); 3058MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers"); 3059MODULE_LICENSE("GPL"); 3060 3061/* Provide a module alias so root-on-sbp2 initrds don't break. */ 3062#ifndef CONFIG_IEEE1394_OHCI1394_MODULE 3063MODULE_ALIAS("ohci1394"); 3064#endif 3065 3066static int __init fw_ohci_init(void) 3067{ 3068 return pci_register_driver(&fw_ohci_pci_driver); 3069} 3070 3071static void __exit fw_ohci_cleanup(void) 3072{ 3073 pci_unregister_driver(&fw_ohci_pci_driver); 3074} 3075 3076module_init(fw_ohci_init); 3077module_exit(fw_ohci_cleanup); 3078