1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Driver for OHCI 1394 controllers
4 *
5 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 */
7
8#include <linux/bitops.h>
9#include <linux/bug.h>
10#include <linux/compiler.h>
11#include <linux/delay.h>
12#include <linux/device.h>
13#include <linux/dma-mapping.h>
14#include <linux/firewire.h>
15#include <linux/firewire-constants.h>
16#include <linux/init.h>
17#include <linux/interrupt.h>
18#include <linux/io.h>
19#include <linux/kernel.h>
20#include <linux/list.h>
21#include <linux/mm.h>
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/mutex.h>
25#include <linux/pci.h>
26#include <linux/pci_ids.h>
27#include <linux/slab.h>
28#include <linux/spinlock.h>
29#include <linux/string.h>
30#include <linux/time.h>
31#include <linux/vmalloc.h>
32#include <linux/workqueue.h>
33
34#include <asm/byteorder.h>
35#include <asm/page.h>
36
37#ifdef CONFIG_PPC_PMAC
38#include <asm/pmac_feature.h>
39#endif
40
41#include "core.h"
42#include "ohci.h"
43
44#define ohci_info(ohci, f, args...)	dev_info(ohci->card.device, f, ##args)
45#define ohci_notice(ohci, f, args...)	dev_notice(ohci->card.device, f, ##args)
46#define ohci_err(ohci, f, args...)	dev_err(ohci->card.device, f, ##args)
47
48#define DESCRIPTOR_OUTPUT_MORE		0
49#define DESCRIPTOR_OUTPUT_LAST		(1 << 12)
50#define DESCRIPTOR_INPUT_MORE		(2 << 12)
51#define DESCRIPTOR_INPUT_LAST		(3 << 12)
52#define DESCRIPTOR_STATUS		(1 << 11)
53#define DESCRIPTOR_KEY_IMMEDIATE	(2 << 8)
54#define DESCRIPTOR_PING			(1 << 7)
55#define DESCRIPTOR_YY			(1 << 6)
56#define DESCRIPTOR_NO_IRQ		(0 << 4)
57#define DESCRIPTOR_IRQ_ERROR		(1 << 4)
58#define DESCRIPTOR_IRQ_ALWAYS		(3 << 4)
59#define DESCRIPTOR_BRANCH_ALWAYS	(3 << 2)
60#define DESCRIPTOR_WAIT			(3 << 0)
61
62#define DESCRIPTOR_CMD			(0xf << 12)
63
64struct descriptor {
65	__le16 req_count;
66	__le16 control;
67	__le32 data_address;
68	__le32 branch_address;
69	__le16 res_count;
70	__le16 transfer_status;
71} __attribute__((aligned(16)));
72
73#define CONTROL_SET(regs)	(regs)
74#define CONTROL_CLEAR(regs)	((regs) + 4)
75#define COMMAND_PTR(regs)	((regs) + 12)
76#define CONTEXT_MATCH(regs)	((regs) + 16)
77
78#define AR_BUFFER_SIZE	(32*1024)
79#define AR_BUFFERS_MIN	DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE)
80/* we need at least two pages for proper list management */
81#define AR_BUFFERS	(AR_BUFFERS_MIN >= 2 ? AR_BUFFERS_MIN : 2)
82
83#define MAX_ASYNC_PAYLOAD	4096
84#define MAX_AR_PACKET_SIZE	(16 + MAX_ASYNC_PAYLOAD + 4)
85#define AR_WRAPAROUND_PAGES	DIV_ROUND_UP(MAX_AR_PACKET_SIZE, PAGE_SIZE)
86
87struct ar_context {
88	struct fw_ohci *ohci;
89	struct page *pages[AR_BUFFERS];
90	void *buffer;
91	struct descriptor *descriptors;
92	dma_addr_t descriptors_bus;
93	void *pointer;
94	unsigned int last_buffer_index;
95	u32 regs;
96	struct tasklet_struct tasklet;
97};
98
99struct context;
100
101typedef int (*descriptor_callback_t)(struct context *ctx,
102				     struct descriptor *d,
103				     struct descriptor *last);
104
105/*
106 * A buffer that contains a block of DMA-able coherent memory used for
107 * storing a portion of a DMA descriptor program.
108 */
109struct descriptor_buffer {
110	struct list_head list;
111	dma_addr_t buffer_bus;
112	size_t buffer_size;
113	size_t used;
114	struct descriptor buffer[];
115};
116
117struct context {
118	struct fw_ohci *ohci;
119	u32 regs;
120	int total_allocation;
121	u32 current_bus;
122	bool running;
123	bool flushing;
124
125	/*
126	 * List of page-sized buffers for storing DMA descriptors.
127	 * Head of list contains buffers in use and tail of list contains
128	 * free buffers.
129	 */
130	struct list_head buffer_list;
131
132	/*
133	 * Pointer to a buffer inside buffer_list that contains the tail
134	 * end of the current DMA program.
135	 */
136	struct descriptor_buffer *buffer_tail;
137
138	/*
139	 * The descriptor containing the branch address of the first
140	 * descriptor that has not yet been filled by the device.
141	 */
142	struct descriptor *last;
143
144	/*
145	 * The last descriptor block in the DMA program. It contains the branch
146	 * address that must be updated upon appending a new descriptor.
147	 */
148	struct descriptor *prev;
149	int prev_z;
150
151	descriptor_callback_t callback;
152
153	struct tasklet_struct tasklet;
154};
155
156#define IT_HEADER_SY(v)          ((v) <<  0)
157#define IT_HEADER_TCODE(v)       ((v) <<  4)
158#define IT_HEADER_CHANNEL(v)     ((v) <<  8)
159#define IT_HEADER_TAG(v)         ((v) << 14)
160#define IT_HEADER_SPEED(v)       ((v) << 16)
161#define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
162
163struct iso_context {
164	struct fw_iso_context base;
165	struct context context;
166	void *header;
167	size_t header_length;
168	unsigned long flushing_completions;
169	u32 mc_buffer_bus;
170	u16 mc_completed;
171	u16 last_timestamp;
172	u8 sync;
173	u8 tags;
174};
175
176#define CONFIG_ROM_SIZE 1024
177
178struct fw_ohci {
179	struct fw_card card;
180
181	__iomem char *registers;
182	int node_id;
183	int generation;
184	int request_generation;	/* for timestamping incoming requests */
185	unsigned quirks;
186	unsigned int pri_req_max;
187	u32 bus_time;
188	bool bus_time_running;
189	bool is_root;
190	bool csr_state_setclear_abdicate;
191	int n_ir;
192	int n_it;
193	/*
194	 * Spinlock for accessing fw_ohci data.  Never call out of
195	 * this driver with this lock held.
196	 */
197	spinlock_t lock;
198
199	struct mutex phy_reg_mutex;
200
201	void *misc_buffer;
202	dma_addr_t misc_buffer_bus;
203
204	struct ar_context ar_request_ctx;
205	struct ar_context ar_response_ctx;
206	struct context at_request_ctx;
207	struct context at_response_ctx;
208
209	u32 it_context_support;
210	u32 it_context_mask;     /* unoccupied IT contexts */
211	struct iso_context *it_context_list;
212	u64 ir_context_channels; /* unoccupied channels */
213	u32 ir_context_support;
214	u32 ir_context_mask;     /* unoccupied IR contexts */
215	struct iso_context *ir_context_list;
216	u64 mc_channels; /* channels in use by the multichannel IR context */
217	bool mc_allocated;
218
219	__be32    *config_rom;
220	dma_addr_t config_rom_bus;
221	__be32    *next_config_rom;
222	dma_addr_t next_config_rom_bus;
223	__be32     next_header;
224
225	__le32    *self_id;
226	dma_addr_t self_id_bus;
227	struct work_struct bus_reset_work;
228
229	u32 self_id_buffer[512];
230};
231
232static struct workqueue_struct *selfid_workqueue;
233
234static inline struct fw_ohci *fw_ohci(struct fw_card *card)
235{
236	return container_of(card, struct fw_ohci, card);
237}
238
239#define IT_CONTEXT_CYCLE_MATCH_ENABLE	0x80000000
240#define IR_CONTEXT_BUFFER_FILL		0x80000000
241#define IR_CONTEXT_ISOCH_HEADER		0x40000000
242#define IR_CONTEXT_CYCLE_MATCH_ENABLE	0x20000000
243#define IR_CONTEXT_MULTI_CHANNEL_MODE	0x10000000
244#define IR_CONTEXT_DUAL_BUFFER_MODE	0x08000000
245
246#define CONTEXT_RUN	0x8000
247#define CONTEXT_WAKE	0x1000
248#define CONTEXT_DEAD	0x0800
249#define CONTEXT_ACTIVE	0x0400
250
251#define OHCI1394_MAX_AT_REQ_RETRIES	0xf
252#define OHCI1394_MAX_AT_RESP_RETRIES	0x2
253#define OHCI1394_MAX_PHYS_RESP_RETRIES	0x8
254
255#define OHCI1394_REGISTER_SIZE		0x800
256#define OHCI1394_PCI_HCI_Control	0x40
257#define SELF_ID_BUF_SIZE		0x800
258#define OHCI_TCODE_PHY_PACKET		0x0e
259#define OHCI_VERSION_1_1		0x010010
260
261static char ohci_driver_name[] = KBUILD_MODNAME;
262
263#define PCI_VENDOR_ID_PINNACLE_SYSTEMS	0x11bd
264#define PCI_DEVICE_ID_AGERE_FW643	0x5901
265#define PCI_DEVICE_ID_CREATIVE_SB1394	0x4001
266#define PCI_DEVICE_ID_JMICRON_JMB38X_FW	0x2380
267#define PCI_DEVICE_ID_TI_TSB12LV22	0x8009
268#define PCI_DEVICE_ID_TI_TSB12LV26	0x8020
269#define PCI_DEVICE_ID_TI_TSB82AA2	0x8025
270#define PCI_DEVICE_ID_VIA_VT630X	0x3044
271#define PCI_REV_ID_VIA_VT6306		0x46
272#define PCI_DEVICE_ID_VIA_VT6315	0x3403
273
274#define QUIRK_CYCLE_TIMER		0x1
275#define QUIRK_RESET_PACKET		0x2
276#define QUIRK_BE_HEADERS		0x4
277#define QUIRK_NO_1394A			0x8
278#define QUIRK_NO_MSI			0x10
279#define QUIRK_TI_SLLZ059		0x20
280#define QUIRK_IR_WAKE			0x40
281
282// On PCI Express Root Complex in any type of AMD Ryzen machine, VIA VT6306/6307/6308 with Asmedia
283// ASM1083/1085 brings an inconvenience that the read accesses to 'Isochronous Cycle Timer' register
284// (at offset 0xf0 in PCI I/O space) often causes unexpected system reboot. The mechanism is not
285// clear, since the read access to the other registers is enough safe; e.g. 'Node ID' register,
286// while it is probable due to detection of any type of PCIe error.
287#define QUIRK_REBOOT_BY_CYCLE_TIMER_READ	0x80000000
288
289#if IS_ENABLED(CONFIG_X86)
290
291static bool has_reboot_by_cycle_timer_read_quirk(const struct fw_ohci *ohci)
292{
293	return !!(ohci->quirks & QUIRK_REBOOT_BY_CYCLE_TIMER_READ);
294}
295
296#define PCI_DEVICE_ID_ASMEDIA_ASM108X	0x1080
297
298static bool detect_vt630x_with_asm1083_on_amd_ryzen_machine(const struct pci_dev *pdev)
299{
300	const struct pci_dev *pcie_to_pci_bridge;
301
302	// Detect any type of AMD Ryzen machine.
303	if (!static_cpu_has(X86_FEATURE_ZEN))
304		return false;
305
306	// Detect VIA VT6306/6307/6308.
307	if (pdev->vendor != PCI_VENDOR_ID_VIA)
308		return false;
309	if (pdev->device != PCI_DEVICE_ID_VIA_VT630X)
310		return false;
311
312	// Detect Asmedia ASM1083/1085.
313	pcie_to_pci_bridge = pdev->bus->self;
314	if (pcie_to_pci_bridge->vendor != PCI_VENDOR_ID_ASMEDIA)
315		return false;
316	if (pcie_to_pci_bridge->device != PCI_DEVICE_ID_ASMEDIA_ASM108X)
317		return false;
318
319	return true;
320}
321
322#else
323#define has_reboot_by_cycle_timer_read_quirk(ohci) false
324#define detect_vt630x_with_asm1083_on_amd_ryzen_machine(pdev)	false
325#endif
326
327/* In case of multiple matches in ohci_quirks[], only the first one is used. */
328static const struct {
329	unsigned short vendor, device, revision, flags;
330} ohci_quirks[] = {
331	{PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_ANY_ID,
332		QUIRK_CYCLE_TIMER},
333
334	{PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, PCI_ANY_ID,
335		QUIRK_BE_HEADERS},
336
337	{PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
338		QUIRK_NO_MSI},
339
340	{PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
341		QUIRK_RESET_PACKET},
342
343	{PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID,
344		QUIRK_NO_MSI},
345
346	{PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID,
347		QUIRK_CYCLE_TIMER},
348
349	{PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID,
350		QUIRK_NO_MSI},
351
352	{PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
353		QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
354
355	{PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID,
356		QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A},
357
358	{PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV26, PCI_ANY_ID,
359		QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
360
361	{PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB82AA2, PCI_ANY_ID,
362		QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
363
364	{PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID,
365		QUIRK_RESET_PACKET},
366
367	{PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT630X, PCI_REV_ID_VIA_VT6306,
368		QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE},
369
370	{PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0,
371		QUIRK_CYCLE_TIMER /* FIXME: necessary? */ | QUIRK_NO_MSI},
372
373	{PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID,
374		QUIRK_NO_MSI},
375
376	{PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
377		QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
378};
379
380/* This overrides anything that was found in ohci_quirks[]. */
381static int param_quirks;
382module_param_named(quirks, param_quirks, int, 0644);
383MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
384	", nonatomic cycle timer = "	__stringify(QUIRK_CYCLE_TIMER)
385	", reset packet generation = "	__stringify(QUIRK_RESET_PACKET)
386	", AR/selfID endianness = "	__stringify(QUIRK_BE_HEADERS)
387	", no 1394a enhancements = "	__stringify(QUIRK_NO_1394A)
388	", disable MSI = "		__stringify(QUIRK_NO_MSI)
389	", TI SLLZ059 erratum = "	__stringify(QUIRK_TI_SLLZ059)
390	", IR wake unreliable = "	__stringify(QUIRK_IR_WAKE)
391	")");
392
393#define OHCI_PARAM_DEBUG_AT_AR		1
394#define OHCI_PARAM_DEBUG_SELFIDS	2
395#define OHCI_PARAM_DEBUG_IRQS		4
396#define OHCI_PARAM_DEBUG_BUSRESETS	8 /* only effective before chip init */
397
398static int param_debug;
399module_param_named(debug, param_debug, int, 0644);
400MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
401	", AT/AR events = "	__stringify(OHCI_PARAM_DEBUG_AT_AR)
402	", self-IDs = "		__stringify(OHCI_PARAM_DEBUG_SELFIDS)
403	", IRQs = "		__stringify(OHCI_PARAM_DEBUG_IRQS)
404	", busReset events = "	__stringify(OHCI_PARAM_DEBUG_BUSRESETS)
405	", or a combination, or all = -1)");
406
407static bool param_remote_dma;
408module_param_named(remote_dma, param_remote_dma, bool, 0444);
409MODULE_PARM_DESC(remote_dma, "Enable unfiltered remote DMA (default = N)");
410
411static void log_irqs(struct fw_ohci *ohci, u32 evt)
412{
413	if (likely(!(param_debug &
414			(OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS))))
415		return;
416
417	if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) &&
418	    !(evt & OHCI1394_busReset))
419		return;
420
421	ohci_notice(ohci, "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
422	    evt & OHCI1394_selfIDComplete	? " selfID"		: "",
423	    evt & OHCI1394_RQPkt		? " AR_req"		: "",
424	    evt & OHCI1394_RSPkt		? " AR_resp"		: "",
425	    evt & OHCI1394_reqTxComplete	? " AT_req"		: "",
426	    evt & OHCI1394_respTxComplete	? " AT_resp"		: "",
427	    evt & OHCI1394_isochRx		? " IR"			: "",
428	    evt & OHCI1394_isochTx		? " IT"			: "",
429	    evt & OHCI1394_postedWriteErr	? " postedWriteErr"	: "",
430	    evt & OHCI1394_cycleTooLong		? " cycleTooLong"	: "",
431	    evt & OHCI1394_cycle64Seconds	? " cycle64Seconds"	: "",
432	    evt & OHCI1394_cycleInconsistent	? " cycleInconsistent"	: "",
433	    evt & OHCI1394_regAccessFail	? " regAccessFail"	: "",
434	    evt & OHCI1394_unrecoverableError	? " unrecoverableError"	: "",
435	    evt & OHCI1394_busReset		? " busReset"		: "",
436	    evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
437		    OHCI1394_RSPkt | OHCI1394_reqTxComplete |
438		    OHCI1394_respTxComplete | OHCI1394_isochRx |
439		    OHCI1394_isochTx | OHCI1394_postedWriteErr |
440		    OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
441		    OHCI1394_cycleInconsistent |
442		    OHCI1394_regAccessFail | OHCI1394_busReset)
443						? " ?"			: "");
444}
445
446static const char *speed[] = {
447	[0] = "S100", [1] = "S200", [2] = "S400",    [3] = "beta",
448};
449static const char *power[] = {
450	[0] = "+0W",  [1] = "+15W", [2] = "+30W",    [3] = "+45W",
451	[4] = "-3W",  [5] = " ?W",  [6] = "-3..-6W", [7] = "-3..-10W",
452};
453static const char port[] = { '.', '-', 'p', 'c', };
454
455static char _p(u32 *s, int shift)
456{
457	return port[*s >> shift & 3];
458}
459
460static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count)
461{
462	u32 *s;
463
464	if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
465		return;
466
467	ohci_notice(ohci, "%d selfIDs, generation %d, local node ID %04x\n",
468		    self_id_count, generation, ohci->node_id);
469
470	for (s = ohci->self_id_buffer; self_id_count--; ++s)
471		if ((*s & 1 << 23) == 0)
472			ohci_notice(ohci,
473			    "selfID 0: %08x, phy %d [%c%c%c] %s gc=%d %s %s%s%s\n",
474			    *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
475			    speed[*s >> 14 & 3], *s >> 16 & 63,
476			    power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
477			    *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
478		else
479			ohci_notice(ohci,
480			    "selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
481			    *s, *s >> 24 & 63,
482			    _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
483			    _p(s,  8), _p(s,  6), _p(s,  4), _p(s,  2));
484}
485
486static const char *evts[] = {
487	[0x00] = "evt_no_status",	[0x01] = "-reserved-",
488	[0x02] = "evt_long_packet",	[0x03] = "evt_missing_ack",
489	[0x04] = "evt_underrun",	[0x05] = "evt_overrun",
490	[0x06] = "evt_descriptor_read",	[0x07] = "evt_data_read",
491	[0x08] = "evt_data_write",	[0x09] = "evt_bus_reset",
492	[0x0a] = "evt_timeout",		[0x0b] = "evt_tcode_err",
493	[0x0c] = "-reserved-",		[0x0d] = "-reserved-",
494	[0x0e] = "evt_unknown",		[0x0f] = "evt_flushed",
495	[0x10] = "-reserved-",		[0x11] = "ack_complete",
496	[0x12] = "ack_pending ",	[0x13] = "-reserved-",
497	[0x14] = "ack_busy_X",		[0x15] = "ack_busy_A",
498	[0x16] = "ack_busy_B",		[0x17] = "-reserved-",
499	[0x18] = "-reserved-",		[0x19] = "-reserved-",
500	[0x1a] = "-reserved-",		[0x1b] = "ack_tardy",
501	[0x1c] = "-reserved-",		[0x1d] = "ack_data_error",
502	[0x1e] = "ack_type_error",	[0x1f] = "-reserved-",
503	[0x20] = "pending/cancelled",
504};
505static const char *tcodes[] = {
506	[0x0] = "QW req",		[0x1] = "BW req",
507	[0x2] = "W resp",		[0x3] = "-reserved-",
508	[0x4] = "QR req",		[0x5] = "BR req",
509	[0x6] = "QR resp",		[0x7] = "BR resp",
510	[0x8] = "cycle start",		[0x9] = "Lk req",
511	[0xa] = "async stream packet",	[0xb] = "Lk resp",
512	[0xc] = "-reserved-",		[0xd] = "-reserved-",
513	[0xe] = "link internal",	[0xf] = "-reserved-",
514};
515
516static void log_ar_at_event(struct fw_ohci *ohci,
517			    char dir, int speed, u32 *header, int evt)
518{
519	int tcode = header[0] >> 4 & 0xf;
520	char specific[12];
521
522	if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
523		return;
524
525	if (unlikely(evt >= ARRAY_SIZE(evts)))
526			evt = 0x1f;
527
528	if (evt == OHCI1394_evt_bus_reset) {
529		ohci_notice(ohci, "A%c evt_bus_reset, generation %d\n",
530			    dir, (header[2] >> 16) & 0xff);
531		return;
532	}
533
534	switch (tcode) {
535	case 0x0: case 0x6: case 0x8:
536		snprintf(specific, sizeof(specific), " = %08x",
537			 be32_to_cpu((__force __be32)header[3]));
538		break;
539	case 0x1: case 0x5: case 0x7: case 0x9: case 0xb:
540		snprintf(specific, sizeof(specific), " %x,%x",
541			 header[3] >> 16, header[3] & 0xffff);
542		break;
543	default:
544		specific[0] = '\0';
545	}
546
547	switch (tcode) {
548	case 0xa:
549		ohci_notice(ohci, "A%c %s, %s\n",
550			    dir, evts[evt], tcodes[tcode]);
551		break;
552	case 0xe:
553		ohci_notice(ohci, "A%c %s, PHY %08x %08x\n",
554			    dir, evts[evt], header[1], header[2]);
555		break;
556	case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
557		ohci_notice(ohci,
558			    "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %04x%08x%s\n",
559			    dir, speed, header[0] >> 10 & 0x3f,
560			    header[1] >> 16, header[0] >> 16, evts[evt],
561			    tcodes[tcode], header[1] & 0xffff, header[2], specific);
562		break;
563	default:
564		ohci_notice(ohci,
565			    "A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n",
566			    dir, speed, header[0] >> 10 & 0x3f,
567			    header[1] >> 16, header[0] >> 16, evts[evt],
568			    tcodes[tcode], specific);
569	}
570}
571
572static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
573{
574	writel(data, ohci->registers + offset);
575}
576
577static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
578{
579	return readl(ohci->registers + offset);
580}
581
582static inline void flush_writes(const struct fw_ohci *ohci)
583{
584	/* Do a dummy read to flush writes. */
585	reg_read(ohci, OHCI1394_Version);
586}
587
588/*
589 * Beware!  read_phy_reg(), write_phy_reg(), update_phy_reg(), and
590 * read_paged_phy_reg() require the caller to hold ohci->phy_reg_mutex.
591 * In other words, only use ohci_read_phy_reg() and ohci_update_phy_reg()
592 * directly.  Exceptions are intrinsically serialized contexts like pci_probe.
593 */
594static int read_phy_reg(struct fw_ohci *ohci, int addr)
595{
596	u32 val;
597	int i;
598
599	reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
600	for (i = 0; i < 3 + 100; i++) {
601		val = reg_read(ohci, OHCI1394_PhyControl);
602		if (!~val)
603			return -ENODEV; /* Card was ejected. */
604
605		if (val & OHCI1394_PhyControl_ReadDone)
606			return OHCI1394_PhyControl_ReadData(val);
607
608		/*
609		 * Try a few times without waiting.  Sleeping is necessary
610		 * only when the link/PHY interface is busy.
611		 */
612		if (i >= 3)
613			msleep(1);
614	}
615	ohci_err(ohci, "failed to read phy reg %d\n", addr);
616	dump_stack();
617
618	return -EBUSY;
619}
620
621static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
622{
623	int i;
624
625	reg_write(ohci, OHCI1394_PhyControl,
626		  OHCI1394_PhyControl_Write(addr, val));
627	for (i = 0; i < 3 + 100; i++) {
628		val = reg_read(ohci, OHCI1394_PhyControl);
629		if (!~val)
630			return -ENODEV; /* Card was ejected. */
631
632		if (!(val & OHCI1394_PhyControl_WritePending))
633			return 0;
634
635		if (i >= 3)
636			msleep(1);
637	}
638	ohci_err(ohci, "failed to write phy reg %d, val %u\n", addr, val);
639	dump_stack();
640
641	return -EBUSY;
642}
643
644static int update_phy_reg(struct fw_ohci *ohci, int addr,
645			  int clear_bits, int set_bits)
646{
647	int ret = read_phy_reg(ohci, addr);
648	if (ret < 0)
649		return ret;
650
651	/*
652	 * The interrupt status bits are cleared by writing a one bit.
653	 * Avoid clearing them unless explicitly requested in set_bits.
654	 */
655	if (addr == 5)
656		clear_bits |= PHY_INT_STATUS_BITS;
657
658	return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits);
659}
660
661static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
662{
663	int ret;
664
665	ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5);
666	if (ret < 0)
667		return ret;
668
669	return read_phy_reg(ohci, addr);
670}
671
672static int ohci_read_phy_reg(struct fw_card *card, int addr)
673{
674	struct fw_ohci *ohci = fw_ohci(card);
675	int ret;
676
677	mutex_lock(&ohci->phy_reg_mutex);
678	ret = read_phy_reg(ohci, addr);
679	mutex_unlock(&ohci->phy_reg_mutex);
680
681	return ret;
682}
683
684static int ohci_update_phy_reg(struct fw_card *card, int addr,
685			       int clear_bits, int set_bits)
686{
687	struct fw_ohci *ohci = fw_ohci(card);
688	int ret;
689
690	mutex_lock(&ohci->phy_reg_mutex);
691	ret = update_phy_reg(ohci, addr, clear_bits, set_bits);
692	mutex_unlock(&ohci->phy_reg_mutex);
693
694	return ret;
695}
696
697static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i)
698{
699	return page_private(ctx->pages[i]);
700}
701
702static void ar_context_link_page(struct ar_context *ctx, unsigned int index)
703{
704	struct descriptor *d;
705
706	d = &ctx->descriptors[index];
707	d->branch_address  &= cpu_to_le32(~0xf);
708	d->res_count       =  cpu_to_le16(PAGE_SIZE);
709	d->transfer_status =  0;
710
711	wmb(); /* finish init of new descriptors before branch_address update */
712	d = &ctx->descriptors[ctx->last_buffer_index];
713	d->branch_address  |= cpu_to_le32(1);
714
715	ctx->last_buffer_index = index;
716
717	reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
718}
719
720static void ar_context_release(struct ar_context *ctx)
721{
722	struct device *dev = ctx->ohci->card.device;
723	unsigned int i;
724
725	if (!ctx->buffer)
726		return;
727
728	vunmap(ctx->buffer);
729
730	for (i = 0; i < AR_BUFFERS; i++) {
731		if (ctx->pages[i])
732			dma_free_pages(dev, PAGE_SIZE, ctx->pages[i],
733				       ar_buffer_bus(ctx, i), DMA_FROM_DEVICE);
734	}
735}
736
737static void ar_context_abort(struct ar_context *ctx, const char *error_msg)
738{
739	struct fw_ohci *ohci = ctx->ohci;
740
741	if (reg_read(ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) {
742		reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
743		flush_writes(ohci);
744
745		ohci_err(ohci, "AR error: %s; DMA stopped\n", error_msg);
746	}
747	/* FIXME: restart? */
748}
749
750static inline unsigned int ar_next_buffer_index(unsigned int index)
751{
752	return (index + 1) % AR_BUFFERS;
753}
754
755static inline unsigned int ar_first_buffer_index(struct ar_context *ctx)
756{
757	return ar_next_buffer_index(ctx->last_buffer_index);
758}
759
760/*
761 * We search for the buffer that contains the last AR packet DMA data written
762 * by the controller.
763 */
764static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
765						 unsigned int *buffer_offset)
766{
767	unsigned int i, next_i, last = ctx->last_buffer_index;
768	__le16 res_count, next_res_count;
769
770	i = ar_first_buffer_index(ctx);
771	res_count = READ_ONCE(ctx->descriptors[i].res_count);
772
773	/* A buffer that is not yet completely filled must be the last one. */
774	while (i != last && res_count == 0) {
775
776		/* Peek at the next descriptor. */
777		next_i = ar_next_buffer_index(i);
778		rmb(); /* read descriptors in order */
779		next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
780		/*
781		 * If the next descriptor is still empty, we must stop at this
782		 * descriptor.
783		 */
784		if (next_res_count == cpu_to_le16(PAGE_SIZE)) {
785			/*
786			 * The exception is when the DMA data for one packet is
787			 * split over three buffers; in this case, the middle
788			 * buffer's descriptor might be never updated by the
789			 * controller and look still empty, and we have to peek
790			 * at the third one.
791			 */
792			if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) {
793				next_i = ar_next_buffer_index(next_i);
794				rmb();
795				next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
796				if (next_res_count != cpu_to_le16(PAGE_SIZE))
797					goto next_buffer_is_active;
798			}
799
800			break;
801		}
802
803next_buffer_is_active:
804		i = next_i;
805		res_count = next_res_count;
806	}
807
808	rmb(); /* read res_count before the DMA data */
809
810	*buffer_offset = PAGE_SIZE - le16_to_cpu(res_count);
811	if (*buffer_offset > PAGE_SIZE) {
812		*buffer_offset = 0;
813		ar_context_abort(ctx, "corrupted descriptor");
814	}
815
816	return i;
817}
818
819static void ar_sync_buffers_for_cpu(struct ar_context *ctx,
820				    unsigned int end_buffer_index,
821				    unsigned int end_buffer_offset)
822{
823	unsigned int i;
824
825	i = ar_first_buffer_index(ctx);
826	while (i != end_buffer_index) {
827		dma_sync_single_for_cpu(ctx->ohci->card.device,
828					ar_buffer_bus(ctx, i),
829					PAGE_SIZE, DMA_FROM_DEVICE);
830		i = ar_next_buffer_index(i);
831	}
832	if (end_buffer_offset > 0)
833		dma_sync_single_for_cpu(ctx->ohci->card.device,
834					ar_buffer_bus(ctx, i),
835					end_buffer_offset, DMA_FROM_DEVICE);
836}
837
838#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
839#define cond_le32_to_cpu(v) \
840	(ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v))
841#else
842#define cond_le32_to_cpu(v) le32_to_cpu(v)
843#endif
844
845static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
846{
847	struct fw_ohci *ohci = ctx->ohci;
848	struct fw_packet p;
849	u32 status, length, tcode;
850	int evt;
851
852	p.header[0] = cond_le32_to_cpu(buffer[0]);
853	p.header[1] = cond_le32_to_cpu(buffer[1]);
854	p.header[2] = cond_le32_to_cpu(buffer[2]);
855
856	tcode = (p.header[0] >> 4) & 0x0f;
857	switch (tcode) {
858	case TCODE_WRITE_QUADLET_REQUEST:
859	case TCODE_READ_QUADLET_RESPONSE:
860		p.header[3] = (__force __u32) buffer[3];
861		p.header_length = 16;
862		p.payload_length = 0;
863		break;
864
865	case TCODE_READ_BLOCK_REQUEST :
866		p.header[3] = cond_le32_to_cpu(buffer[3]);
867		p.header_length = 16;
868		p.payload_length = 0;
869		break;
870
871	case TCODE_WRITE_BLOCK_REQUEST:
872	case TCODE_READ_BLOCK_RESPONSE:
873	case TCODE_LOCK_REQUEST:
874	case TCODE_LOCK_RESPONSE:
875		p.header[3] = cond_le32_to_cpu(buffer[3]);
876		p.header_length = 16;
877		p.payload_length = p.header[3] >> 16;
878		if (p.payload_length > MAX_ASYNC_PAYLOAD) {
879			ar_context_abort(ctx, "invalid packet length");
880			return NULL;
881		}
882		break;
883
884	case TCODE_WRITE_RESPONSE:
885	case TCODE_READ_QUADLET_REQUEST:
886	case OHCI_TCODE_PHY_PACKET:
887		p.header_length = 12;
888		p.payload_length = 0;
889		break;
890
891	default:
892		ar_context_abort(ctx, "invalid tcode");
893		return NULL;
894	}
895
896	p.payload = (void *) buffer + p.header_length;
897
898	/* FIXME: What to do about evt_* errors? */
899	length = (p.header_length + p.payload_length + 3) / 4;
900	status = cond_le32_to_cpu(buffer[length]);
901	evt    = (status >> 16) & 0x1f;
902
903	p.ack        = evt - 16;
904	p.speed      = (status >> 21) & 0x7;
905	p.timestamp  = status & 0xffff;
906	p.generation = ohci->request_generation;
907
908	log_ar_at_event(ohci, 'R', p.speed, p.header, evt);
909
910	/*
911	 * Several controllers, notably from NEC and VIA, forget to
912	 * write ack_complete status at PHY packet reception.
913	 */
914	if (evt == OHCI1394_evt_no_status &&
915	    (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4))
916		p.ack = ACK_COMPLETE;
917
918	/*
919	 * The OHCI bus reset handler synthesizes a PHY packet with
920	 * the new generation number when a bus reset happens (see
921	 * section 8.4.2.3).  This helps us determine when a request
922	 * was received and make sure we send the response in the same
923	 * generation.  We only need this for requests; for responses
924	 * we use the unique tlabel for finding the matching
925	 * request.
926	 *
927	 * Alas some chips sometimes emit bus reset packets with a
928	 * wrong generation.  We set the correct generation for these
929	 * at a slightly incorrect time (in bus_reset_work).
930	 */
931	if (evt == OHCI1394_evt_bus_reset) {
932		if (!(ohci->quirks & QUIRK_RESET_PACKET))
933			ohci->request_generation = (p.header[2] >> 16) & 0xff;
934	} else if (ctx == &ohci->ar_request_ctx) {
935		fw_core_handle_request(&ohci->card, &p);
936	} else {
937		fw_core_handle_response(&ohci->card, &p);
938	}
939
940	return buffer + length + 1;
941}
942
943static void *handle_ar_packets(struct ar_context *ctx, void *p, void *end)
944{
945	void *next;
946
947	while (p < end) {
948		next = handle_ar_packet(ctx, p);
949		if (!next)
950			return p;
951		p = next;
952	}
953
954	return p;
955}
956
957static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer)
958{
959	unsigned int i;
960
961	i = ar_first_buffer_index(ctx);
962	while (i != end_buffer) {
963		dma_sync_single_for_device(ctx->ohci->card.device,
964					   ar_buffer_bus(ctx, i),
965					   PAGE_SIZE, DMA_FROM_DEVICE);
966		ar_context_link_page(ctx, i);
967		i = ar_next_buffer_index(i);
968	}
969}
970
971static void ar_context_tasklet(unsigned long data)
972{
973	struct ar_context *ctx = (struct ar_context *)data;
974	unsigned int end_buffer_index, end_buffer_offset;
975	void *p, *end;
976
977	p = ctx->pointer;
978	if (!p)
979		return;
980
981	end_buffer_index = ar_search_last_active_buffer(ctx,
982							&end_buffer_offset);
983	ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset);
984	end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset;
985
986	if (end_buffer_index < ar_first_buffer_index(ctx)) {
987		/*
988		 * The filled part of the overall buffer wraps around; handle
989		 * all packets up to the buffer end here.  If the last packet
990		 * wraps around, its tail will be visible after the buffer end
991		 * because the buffer start pages are mapped there again.
992		 */
993		void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE;
994		p = handle_ar_packets(ctx, p, buffer_end);
995		if (p < buffer_end)
996			goto error;
997		/* adjust p to point back into the actual buffer */
998		p -= AR_BUFFERS * PAGE_SIZE;
999	}
1000
1001	p = handle_ar_packets(ctx, p, end);
1002	if (p != end) {
1003		if (p > end)
1004			ar_context_abort(ctx, "inconsistent descriptor");
1005		goto error;
1006	}
1007
1008	ctx->pointer = p;
1009	ar_recycle_buffers(ctx, end_buffer_index);
1010
1011	return;
1012
1013error:
1014	ctx->pointer = NULL;
1015}
1016
1017static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
1018			   unsigned int descriptors_offset, u32 regs)
1019{
1020	struct device *dev = ohci->card.device;
1021	unsigned int i;
1022	dma_addr_t dma_addr;
1023	struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES];
1024	struct descriptor *d;
1025
1026	ctx->regs        = regs;
1027	ctx->ohci        = ohci;
1028	tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
1029
1030	for (i = 0; i < AR_BUFFERS; i++) {
1031		ctx->pages[i] = dma_alloc_pages(dev, PAGE_SIZE, &dma_addr,
1032						DMA_FROM_DEVICE, GFP_KERNEL);
1033		if (!ctx->pages[i])
1034			goto out_of_memory;
1035		set_page_private(ctx->pages[i], dma_addr);
1036		dma_sync_single_for_device(dev, dma_addr, PAGE_SIZE,
1037					   DMA_FROM_DEVICE);
1038	}
1039
1040	for (i = 0; i < AR_BUFFERS; i++)
1041		pages[i]              = ctx->pages[i];
1042	for (i = 0; i < AR_WRAPAROUND_PAGES; i++)
1043		pages[AR_BUFFERS + i] = ctx->pages[i];
1044	ctx->buffer = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL);
1045	if (!ctx->buffer)
1046		goto out_of_memory;
1047
1048	ctx->descriptors     = ohci->misc_buffer     + descriptors_offset;
1049	ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset;
1050
1051	for (i = 0; i < AR_BUFFERS; i++) {
1052		d = &ctx->descriptors[i];
1053		d->req_count      = cpu_to_le16(PAGE_SIZE);
1054		d->control        = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
1055						DESCRIPTOR_STATUS |
1056						DESCRIPTOR_BRANCH_ALWAYS);
1057		d->data_address   = cpu_to_le32(ar_buffer_bus(ctx, i));
1058		d->branch_address = cpu_to_le32(ctx->descriptors_bus +
1059			ar_next_buffer_index(i) * sizeof(struct descriptor));
1060	}
1061
1062	return 0;
1063
1064out_of_memory:
1065	ar_context_release(ctx);
1066
1067	return -ENOMEM;
1068}
1069
1070static void ar_context_run(struct ar_context *ctx)
1071{
1072	unsigned int i;
1073
1074	for (i = 0; i < AR_BUFFERS; i++)
1075		ar_context_link_page(ctx, i);
1076
1077	ctx->pointer = ctx->buffer;
1078
1079	reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1);
1080	reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
1081}
1082
1083static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
1084{
1085	__le16 branch;
1086
1087	branch = d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS);
1088
1089	/* figure out which descriptor the branch address goes in */
1090	if (z == 2 && branch == cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
1091		return d;
1092	else
1093		return d + z - 1;
1094}
1095
1096static void context_tasklet(unsigned long data)
1097{
1098	struct context *ctx = (struct context *) data;
1099	struct descriptor *d, *last;
1100	u32 address;
1101	int z;
1102	struct descriptor_buffer *desc;
1103
1104	desc = list_entry(ctx->buffer_list.next,
1105			struct descriptor_buffer, list);
1106	last = ctx->last;
1107	while (last->branch_address != 0) {
1108		struct descriptor_buffer *old_desc = desc;
1109		address = le32_to_cpu(last->branch_address);
1110		z = address & 0xf;
1111		address &= ~0xf;
1112		ctx->current_bus = address;
1113
1114		/* If the branch address points to a buffer outside of the
1115		 * current buffer, advance to the next buffer. */
1116		if (address < desc->buffer_bus ||
1117				address >= desc->buffer_bus + desc->used)
1118			desc = list_entry(desc->list.next,
1119					struct descriptor_buffer, list);
1120		d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
1121		last = find_branch_descriptor(d, z);
1122
1123		if (!ctx->callback(ctx, d, last))
1124			break;
1125
1126		if (old_desc != desc) {
1127			/* If we've advanced to the next buffer, move the
1128			 * previous buffer to the free list. */
1129			unsigned long flags;
1130			old_desc->used = 0;
1131			spin_lock_irqsave(&ctx->ohci->lock, flags);
1132			list_move_tail(&old_desc->list, &ctx->buffer_list);
1133			spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1134		}
1135		ctx->last = last;
1136	}
1137}
1138
1139/*
1140 * Allocate a new buffer and add it to the list of free buffers for this
1141 * context.  Must be called with ohci->lock held.
1142 */
1143static int context_add_buffer(struct context *ctx)
1144{
1145	struct descriptor_buffer *desc;
1146	dma_addr_t bus_addr;
1147	int offset;
1148
1149	/*
1150	 * 16MB of descriptors should be far more than enough for any DMA
1151	 * program.  This will catch run-away userspace or DoS attacks.
1152	 */
1153	if (ctx->total_allocation >= 16*1024*1024)
1154		return -ENOMEM;
1155
1156	desc = dmam_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE, &bus_addr, GFP_ATOMIC);
1157	if (!desc)
1158		return -ENOMEM;
1159
1160	offset = (void *)&desc->buffer - (void *)desc;
1161	/*
1162	 * Some controllers, like JMicron ones, always issue 0x20-byte DMA reads
1163	 * for descriptors, even 0x10-byte ones. This can cause page faults when
1164	 * an IOMMU is in use and the oversized read crosses a page boundary.
1165	 * Work around this by always leaving at least 0x10 bytes of padding.
1166	 */
1167	desc->buffer_size = PAGE_SIZE - offset - 0x10;
1168	desc->buffer_bus = bus_addr + offset;
1169	desc->used = 0;
1170
1171	list_add_tail(&desc->list, &ctx->buffer_list);
1172	ctx->total_allocation += PAGE_SIZE;
1173
1174	return 0;
1175}
1176
1177static int context_init(struct context *ctx, struct fw_ohci *ohci,
1178			u32 regs, descriptor_callback_t callback)
1179{
1180	ctx->ohci = ohci;
1181	ctx->regs = regs;
1182	ctx->total_allocation = 0;
1183
1184	INIT_LIST_HEAD(&ctx->buffer_list);
1185	if (context_add_buffer(ctx) < 0)
1186		return -ENOMEM;
1187
1188	ctx->buffer_tail = list_entry(ctx->buffer_list.next,
1189			struct descriptor_buffer, list);
1190
1191	tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
1192	ctx->callback = callback;
1193
1194	/*
1195	 * We put a dummy descriptor in the buffer that has a NULL
1196	 * branch address and looks like it's been sent.  That way we
1197	 * have a descriptor to append DMA programs to.
1198	 */
1199	memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
1200	ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
1201	ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
1202	ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
1203	ctx->last = ctx->buffer_tail->buffer;
1204	ctx->prev = ctx->buffer_tail->buffer;
1205	ctx->prev_z = 1;
1206
1207	return 0;
1208}
1209
1210static void context_release(struct context *ctx)
1211{
1212	struct fw_card *card = &ctx->ohci->card;
1213	struct descriptor_buffer *desc, *tmp;
1214
1215	list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list) {
1216		dmam_free_coherent(card->device, PAGE_SIZE, desc,
1217				   desc->buffer_bus - ((void *)&desc->buffer - (void *)desc));
1218	}
1219}
1220
1221/* Must be called with ohci->lock held */
1222static struct descriptor *context_get_descriptors(struct context *ctx,
1223						  int z, dma_addr_t *d_bus)
1224{
1225	struct descriptor *d = NULL;
1226	struct descriptor_buffer *desc = ctx->buffer_tail;
1227
1228	if (z * sizeof(*d) > desc->buffer_size)
1229		return NULL;
1230
1231	if (z * sizeof(*d) > desc->buffer_size - desc->used) {
1232		/* No room for the descriptor in this buffer, so advance to the
1233		 * next one. */
1234
1235		if (desc->list.next == &ctx->buffer_list) {
1236			/* If there is no free buffer next in the list,
1237			 * allocate one. */
1238			if (context_add_buffer(ctx) < 0)
1239				return NULL;
1240		}
1241		desc = list_entry(desc->list.next,
1242				struct descriptor_buffer, list);
1243		ctx->buffer_tail = desc;
1244	}
1245
1246	d = desc->buffer + desc->used / sizeof(*d);
1247	memset(d, 0, z * sizeof(*d));
1248	*d_bus = desc->buffer_bus + desc->used;
1249
1250	return d;
1251}
1252
1253static void context_run(struct context *ctx, u32 extra)
1254{
1255	struct fw_ohci *ohci = ctx->ohci;
1256
1257	reg_write(ohci, COMMAND_PTR(ctx->regs),
1258		  le32_to_cpu(ctx->last->branch_address));
1259	reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
1260	reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
1261	ctx->running = true;
1262	flush_writes(ohci);
1263}
1264
1265static void context_append(struct context *ctx,
1266			   struct descriptor *d, int z, int extra)
1267{
1268	dma_addr_t d_bus;
1269	struct descriptor_buffer *desc = ctx->buffer_tail;
1270	struct descriptor *d_branch;
1271
1272	d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
1273
1274	desc->used += (z + extra) * sizeof(*d);
1275
1276	wmb(); /* finish init of new descriptors before branch_address update */
1277
1278	d_branch = find_branch_descriptor(ctx->prev, ctx->prev_z);
1279	d_branch->branch_address = cpu_to_le32(d_bus | z);
1280
1281	/*
1282	 * VT6306 incorrectly checks only the single descriptor at the
1283	 * CommandPtr when the wake bit is written, so if it's a
1284	 * multi-descriptor block starting with an INPUT_MORE, put a copy of
1285	 * the branch address in the first descriptor.
1286	 *
1287	 * Not doing this for transmit contexts since not sure how it interacts
1288	 * with skip addresses.
1289	 */
1290	if (unlikely(ctx->ohci->quirks & QUIRK_IR_WAKE) &&
1291	    d_branch != ctx->prev &&
1292	    (ctx->prev->control & cpu_to_le16(DESCRIPTOR_CMD)) ==
1293	     cpu_to_le16(DESCRIPTOR_INPUT_MORE)) {
1294		ctx->prev->branch_address = cpu_to_le32(d_bus | z);
1295	}
1296
1297	ctx->prev = d;
1298	ctx->prev_z = z;
1299}
1300
1301static void context_stop(struct context *ctx)
1302{
1303	struct fw_ohci *ohci = ctx->ohci;
1304	u32 reg;
1305	int i;
1306
1307	reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
1308	ctx->running = false;
1309
1310	for (i = 0; i < 1000; i++) {
1311		reg = reg_read(ohci, CONTROL_SET(ctx->regs));
1312		if ((reg & CONTEXT_ACTIVE) == 0)
1313			return;
1314
1315		if (i)
1316			udelay(10);
1317	}
1318	ohci_err(ohci, "DMA context still active (0x%08x)\n", reg);
1319}
1320
1321struct driver_data {
1322	u8 inline_data[8];
1323	struct fw_packet *packet;
1324};
1325
1326/*
1327 * This function apppends a packet to the DMA queue for transmission.
1328 * Must always be called with the ochi->lock held to ensure proper
1329 * generation handling and locking around packet queue manipulation.
1330 */
1331static int at_context_queue_packet(struct context *ctx,
1332				   struct fw_packet *packet)
1333{
1334	struct fw_ohci *ohci = ctx->ohci;
1335	dma_addr_t d_bus, payload_bus;
1336	struct driver_data *driver_data;
1337	struct descriptor *d, *last;
1338	__le32 *header;
1339	int z, tcode;
1340
1341	d = context_get_descriptors(ctx, 4, &d_bus);
1342	if (d == NULL) {
1343		packet->ack = RCODE_SEND_ERROR;
1344		return -1;
1345	}
1346
1347	d[0].control   = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
1348	d[0].res_count = cpu_to_le16(packet->timestamp);
1349
1350	/*
1351	 * The DMA format for asynchronous link packets is different
1352	 * from the IEEE1394 layout, so shift the fields around
1353	 * accordingly.
1354	 */
1355
1356	tcode = (packet->header[0] >> 4) & 0x0f;
1357	header = (__le32 *) &d[1];
1358	switch (tcode) {
1359	case TCODE_WRITE_QUADLET_REQUEST:
1360	case TCODE_WRITE_BLOCK_REQUEST:
1361	case TCODE_WRITE_RESPONSE:
1362	case TCODE_READ_QUADLET_REQUEST:
1363	case TCODE_READ_BLOCK_REQUEST:
1364	case TCODE_READ_QUADLET_RESPONSE:
1365	case TCODE_READ_BLOCK_RESPONSE:
1366	case TCODE_LOCK_REQUEST:
1367	case TCODE_LOCK_RESPONSE:
1368		header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
1369					(packet->speed << 16));
1370		header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
1371					(packet->header[0] & 0xffff0000));
1372		header[2] = cpu_to_le32(packet->header[2]);
1373
1374		if (TCODE_IS_BLOCK_PACKET(tcode))
1375			header[3] = cpu_to_le32(packet->header[3]);
1376		else
1377			header[3] = (__force __le32) packet->header[3];
1378
1379		d[0].req_count = cpu_to_le16(packet->header_length);
1380		break;
1381
1382	case TCODE_LINK_INTERNAL:
1383		header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
1384					(packet->speed << 16));
1385		header[1] = cpu_to_le32(packet->header[1]);
1386		header[2] = cpu_to_le32(packet->header[2]);
1387		d[0].req_count = cpu_to_le16(12);
1388
1389		if (is_ping_packet(&packet->header[1]))
1390			d[0].control |= cpu_to_le16(DESCRIPTOR_PING);
1391		break;
1392
1393	case TCODE_STREAM_DATA:
1394		header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
1395					(packet->speed << 16));
1396		header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
1397		d[0].req_count = cpu_to_le16(8);
1398		break;
1399
1400	default:
1401		/* BUG(); */
1402		packet->ack = RCODE_SEND_ERROR;
1403		return -1;
1404	}
1405
1406	BUILD_BUG_ON(sizeof(struct driver_data) > sizeof(struct descriptor));
1407	driver_data = (struct driver_data *) &d[3];
1408	driver_data->packet = packet;
1409	packet->driver_data = driver_data;
1410
1411	if (packet->payload_length > 0) {
1412		if (packet->payload_length > sizeof(driver_data->inline_data)) {
1413			payload_bus = dma_map_single(ohci->card.device,
1414						     packet->payload,
1415						     packet->payload_length,
1416						     DMA_TO_DEVICE);
1417			if (dma_mapping_error(ohci->card.device, payload_bus)) {
1418				packet->ack = RCODE_SEND_ERROR;
1419				return -1;
1420			}
1421			packet->payload_bus	= payload_bus;
1422			packet->payload_mapped	= true;
1423		} else {
1424			memcpy(driver_data->inline_data, packet->payload,
1425			       packet->payload_length);
1426			payload_bus = d_bus + 3 * sizeof(*d);
1427		}
1428
1429		d[2].req_count    = cpu_to_le16(packet->payload_length);
1430		d[2].data_address = cpu_to_le32(payload_bus);
1431		last = &d[2];
1432		z = 3;
1433	} else {
1434		last = &d[0];
1435		z = 2;
1436	}
1437
1438	last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
1439				     DESCRIPTOR_IRQ_ALWAYS |
1440				     DESCRIPTOR_BRANCH_ALWAYS);
1441
1442	/* FIXME: Document how the locking works. */
1443	if (ohci->generation != packet->generation) {
1444		if (packet->payload_mapped)
1445			dma_unmap_single(ohci->card.device, payload_bus,
1446					 packet->payload_length, DMA_TO_DEVICE);
1447		packet->ack = RCODE_GENERATION;
1448		return -1;
1449	}
1450
1451	context_append(ctx, d, z, 4 - z);
1452
1453	if (ctx->running)
1454		reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
1455	else
1456		context_run(ctx, 0);
1457
1458	return 0;
1459}
1460
1461static void at_context_flush(struct context *ctx)
1462{
1463	tasklet_disable(&ctx->tasklet);
1464
1465	ctx->flushing = true;
1466	context_tasklet((unsigned long)ctx);
1467	ctx->flushing = false;
1468
1469	tasklet_enable(&ctx->tasklet);
1470}
1471
1472static int handle_at_packet(struct context *context,
1473			    struct descriptor *d,
1474			    struct descriptor *last)
1475{
1476	struct driver_data *driver_data;
1477	struct fw_packet *packet;
1478	struct fw_ohci *ohci = context->ohci;
1479	int evt;
1480
1481	if (last->transfer_status == 0 && !context->flushing)
1482		/* This descriptor isn't done yet, stop iteration. */
1483		return 0;
1484
1485	driver_data = (struct driver_data *) &d[3];
1486	packet = driver_data->packet;
1487	if (packet == NULL)
1488		/* This packet was cancelled, just continue. */
1489		return 1;
1490
1491	if (packet->payload_mapped)
1492		dma_unmap_single(ohci->card.device, packet->payload_bus,
1493				 packet->payload_length, DMA_TO_DEVICE);
1494
1495	evt = le16_to_cpu(last->transfer_status) & 0x1f;
1496	packet->timestamp = le16_to_cpu(last->res_count);
1497
1498	log_ar_at_event(ohci, 'T', packet->speed, packet->header, evt);
1499
1500	switch (evt) {
1501	case OHCI1394_evt_timeout:
1502		/* Async response transmit timed out. */
1503		packet->ack = RCODE_CANCELLED;
1504		break;
1505
1506	case OHCI1394_evt_flushed:
1507		/*
1508		 * The packet was flushed should give same error as
1509		 * when we try to use a stale generation count.
1510		 */
1511		packet->ack = RCODE_GENERATION;
1512		break;
1513
1514	case OHCI1394_evt_missing_ack:
1515		if (context->flushing)
1516			packet->ack = RCODE_GENERATION;
1517		else {
1518			/*
1519			 * Using a valid (current) generation count, but the
1520			 * node is not on the bus or not sending acks.
1521			 */
1522			packet->ack = RCODE_NO_ACK;
1523		}
1524		break;
1525
1526	case ACK_COMPLETE + 0x10:
1527	case ACK_PENDING + 0x10:
1528	case ACK_BUSY_X + 0x10:
1529	case ACK_BUSY_A + 0x10:
1530	case ACK_BUSY_B + 0x10:
1531	case ACK_DATA_ERROR + 0x10:
1532	case ACK_TYPE_ERROR + 0x10:
1533		packet->ack = evt - 0x10;
1534		break;
1535
1536	case OHCI1394_evt_no_status:
1537		if (context->flushing) {
1538			packet->ack = RCODE_GENERATION;
1539			break;
1540		}
1541		fallthrough;
1542
1543	default:
1544		packet->ack = RCODE_SEND_ERROR;
1545		break;
1546	}
1547
1548	packet->callback(packet, &ohci->card, packet->ack);
1549
1550	return 1;
1551}
1552
1553#define HEADER_GET_DESTINATION(q)	(((q) >> 16) & 0xffff)
1554#define HEADER_GET_TCODE(q)		(((q) >> 4) & 0x0f)
1555#define HEADER_GET_OFFSET_HIGH(q)	(((q) >> 0) & 0xffff)
1556#define HEADER_GET_DATA_LENGTH(q)	(((q) >> 16) & 0xffff)
1557#define HEADER_GET_EXTENDED_TCODE(q)	(((q) >> 0) & 0xffff)
1558
1559static void handle_local_rom(struct fw_ohci *ohci,
1560			     struct fw_packet *packet, u32 csr)
1561{
1562	struct fw_packet response;
1563	int tcode, length, i;
1564
1565	tcode = HEADER_GET_TCODE(packet->header[0]);
1566	if (TCODE_IS_BLOCK_PACKET(tcode))
1567		length = HEADER_GET_DATA_LENGTH(packet->header[3]);
1568	else
1569		length = 4;
1570
1571	i = csr - CSR_CONFIG_ROM;
1572	if (i + length > CONFIG_ROM_SIZE) {
1573		fw_fill_response(&response, packet->header,
1574				 RCODE_ADDRESS_ERROR, NULL, 0);
1575	} else if (!TCODE_IS_READ_REQUEST(tcode)) {
1576		fw_fill_response(&response, packet->header,
1577				 RCODE_TYPE_ERROR, NULL, 0);
1578	} else {
1579		fw_fill_response(&response, packet->header, RCODE_COMPLETE,
1580				 (void *) ohci->config_rom + i, length);
1581	}
1582
1583	fw_core_handle_response(&ohci->card, &response);
1584}
1585
1586static void handle_local_lock(struct fw_ohci *ohci,
1587			      struct fw_packet *packet, u32 csr)
1588{
1589	struct fw_packet response;
1590	int tcode, length, ext_tcode, sel, try;
1591	__be32 *payload, lock_old;
1592	u32 lock_arg, lock_data;
1593
1594	tcode = HEADER_GET_TCODE(packet->header[0]);
1595	length = HEADER_GET_DATA_LENGTH(packet->header[3]);
1596	payload = packet->payload;
1597	ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
1598
1599	if (tcode == TCODE_LOCK_REQUEST &&
1600	    ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
1601		lock_arg = be32_to_cpu(payload[0]);
1602		lock_data = be32_to_cpu(payload[1]);
1603	} else if (tcode == TCODE_READ_QUADLET_REQUEST) {
1604		lock_arg = 0;
1605		lock_data = 0;
1606	} else {
1607		fw_fill_response(&response, packet->header,
1608				 RCODE_TYPE_ERROR, NULL, 0);
1609		goto out;
1610	}
1611
1612	sel = (csr - CSR_BUS_MANAGER_ID) / 4;
1613	reg_write(ohci, OHCI1394_CSRData, lock_data);
1614	reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
1615	reg_write(ohci, OHCI1394_CSRControl, sel);
1616
1617	for (try = 0; try < 20; try++)
1618		if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) {
1619			lock_old = cpu_to_be32(reg_read(ohci,
1620							OHCI1394_CSRData));
1621			fw_fill_response(&response, packet->header,
1622					 RCODE_COMPLETE,
1623					 &lock_old, sizeof(lock_old));
1624			goto out;
1625		}
1626
1627	ohci_err(ohci, "swap not done (CSR lock timeout)\n");
1628	fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
1629
1630 out:
1631	fw_core_handle_response(&ohci->card, &response);
1632}
1633
1634static void handle_local_request(struct context *ctx, struct fw_packet *packet)
1635{
1636	u64 offset, csr;
1637
1638	if (ctx == &ctx->ohci->at_request_ctx) {
1639		packet->ack = ACK_PENDING;
1640		packet->callback(packet, &ctx->ohci->card, packet->ack);
1641	}
1642
1643	offset =
1644		((unsigned long long)
1645		 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
1646		packet->header[2];
1647	csr = offset - CSR_REGISTER_BASE;
1648
1649	/* Handle config rom reads. */
1650	if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
1651		handle_local_rom(ctx->ohci, packet, csr);
1652	else switch (csr) {
1653	case CSR_BUS_MANAGER_ID:
1654	case CSR_BANDWIDTH_AVAILABLE:
1655	case CSR_CHANNELS_AVAILABLE_HI:
1656	case CSR_CHANNELS_AVAILABLE_LO:
1657		handle_local_lock(ctx->ohci, packet, csr);
1658		break;
1659	default:
1660		if (ctx == &ctx->ohci->at_request_ctx)
1661			fw_core_handle_request(&ctx->ohci->card, packet);
1662		else
1663			fw_core_handle_response(&ctx->ohci->card, packet);
1664		break;
1665	}
1666
1667	if (ctx == &ctx->ohci->at_response_ctx) {
1668		packet->ack = ACK_COMPLETE;
1669		packet->callback(packet, &ctx->ohci->card, packet->ack);
1670	}
1671}
1672
1673static u32 get_cycle_time(struct fw_ohci *ohci);
1674
1675static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
1676{
1677	unsigned long flags;
1678	int ret;
1679
1680	spin_lock_irqsave(&ctx->ohci->lock, flags);
1681
1682	if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
1683	    ctx->ohci->generation == packet->generation) {
1684		spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1685
1686		// Timestamping on behalf of the hardware.
1687		packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ctx->ohci));
1688
1689		handle_local_request(ctx, packet);
1690		return;
1691	}
1692
1693	ret = at_context_queue_packet(ctx, packet);
1694	spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1695
1696	if (ret < 0) {
1697		// Timestamping on behalf of the hardware.
1698		packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ctx->ohci));
1699
1700		packet->callback(packet, &ctx->ohci->card, packet->ack);
1701	}
1702}
1703
1704static void detect_dead_context(struct fw_ohci *ohci,
1705				const char *name, unsigned int regs)
1706{
1707	u32 ctl;
1708
1709	ctl = reg_read(ohci, CONTROL_SET(regs));
1710	if (ctl & CONTEXT_DEAD)
1711		ohci_err(ohci, "DMA context %s has stopped, error code: %s\n",
1712			name, evts[ctl & 0x1f]);
1713}
1714
1715static void handle_dead_contexts(struct fw_ohci *ohci)
1716{
1717	unsigned int i;
1718	char name[8];
1719
1720	detect_dead_context(ohci, "ATReq", OHCI1394_AsReqTrContextBase);
1721	detect_dead_context(ohci, "ATRsp", OHCI1394_AsRspTrContextBase);
1722	detect_dead_context(ohci, "ARReq", OHCI1394_AsReqRcvContextBase);
1723	detect_dead_context(ohci, "ARRsp", OHCI1394_AsRspRcvContextBase);
1724	for (i = 0; i < 32; ++i) {
1725		if (!(ohci->it_context_support & (1 << i)))
1726			continue;
1727		sprintf(name, "IT%u", i);
1728		detect_dead_context(ohci, name, OHCI1394_IsoXmitContextBase(i));
1729	}
1730	for (i = 0; i < 32; ++i) {
1731		if (!(ohci->ir_context_support & (1 << i)))
1732			continue;
1733		sprintf(name, "IR%u", i);
1734		detect_dead_context(ohci, name, OHCI1394_IsoRcvContextBase(i));
1735	}
1736	/* TODO: maybe try to flush and restart the dead contexts */
1737}
1738
1739static u32 cycle_timer_ticks(u32 cycle_timer)
1740{
1741	u32 ticks;
1742
1743	ticks = cycle_timer & 0xfff;
1744	ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
1745	ticks += (3072 * 8000) * (cycle_timer >> 25);
1746
1747	return ticks;
1748}
1749
1750/*
1751 * Some controllers exhibit one or more of the following bugs when updating the
1752 * iso cycle timer register:
1753 *  - When the lowest six bits are wrapping around to zero, a read that happens
1754 *    at the same time will return garbage in the lowest ten bits.
1755 *  - When the cycleOffset field wraps around to zero, the cycleCount field is
1756 *    not incremented for about 60 ns.
1757 *  - Occasionally, the entire register reads zero.
1758 *
1759 * To catch these, we read the register three times and ensure that the
1760 * difference between each two consecutive reads is approximately the same, i.e.
1761 * less than twice the other.  Furthermore, any negative difference indicates an
1762 * error.  (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
1763 * execute, so we have enough precision to compute the ratio of the differences.)
1764 */
1765static u32 get_cycle_time(struct fw_ohci *ohci)
1766{
1767	u32 c0, c1, c2;
1768	u32 t0, t1, t2;
1769	s32 diff01, diff12;
1770	int i;
1771
1772	if (has_reboot_by_cycle_timer_read_quirk(ohci))
1773		return 0;
1774
1775	c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1776
1777	if (ohci->quirks & QUIRK_CYCLE_TIMER) {
1778		i = 0;
1779		c1 = c2;
1780		c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1781		do {
1782			c0 = c1;
1783			c1 = c2;
1784			c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1785			t0 = cycle_timer_ticks(c0);
1786			t1 = cycle_timer_ticks(c1);
1787			t2 = cycle_timer_ticks(c2);
1788			diff01 = t1 - t0;
1789			diff12 = t2 - t1;
1790		} while ((diff01 <= 0 || diff12 <= 0 ||
1791			  diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
1792			 && i++ < 20);
1793	}
1794
1795	return c2;
1796}
1797
1798/*
1799 * This function has to be called at least every 64 seconds.  The bus_time
1800 * field stores not only the upper 25 bits of the BUS_TIME register but also
1801 * the most significant bit of the cycle timer in bit 6 so that we can detect
1802 * changes in this bit.
1803 */
1804static u32 update_bus_time(struct fw_ohci *ohci)
1805{
1806	u32 cycle_time_seconds = get_cycle_time(ohci) >> 25;
1807
1808	if (unlikely(!ohci->bus_time_running)) {
1809		reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_cycle64Seconds);
1810		ohci->bus_time = (lower_32_bits(ktime_get_seconds()) & ~0x7f) |
1811		                 (cycle_time_seconds & 0x40);
1812		ohci->bus_time_running = true;
1813	}
1814
1815	if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40))
1816		ohci->bus_time += 0x40;
1817
1818	return ohci->bus_time | cycle_time_seconds;
1819}
1820
1821static int get_status_for_port(struct fw_ohci *ohci, int port_index)
1822{
1823	int reg;
1824
1825	mutex_lock(&ohci->phy_reg_mutex);
1826	reg = write_phy_reg(ohci, 7, port_index);
1827	if (reg >= 0)
1828		reg = read_phy_reg(ohci, 8);
1829	mutex_unlock(&ohci->phy_reg_mutex);
1830	if (reg < 0)
1831		return reg;
1832
1833	switch (reg & 0x0f) {
1834	case 0x06:
1835		return 2;	/* is child node (connected to parent node) */
1836	case 0x0e:
1837		return 3;	/* is parent node (connected to child node) */
1838	}
1839	return 1;		/* not connected */
1840}
1841
1842static int get_self_id_pos(struct fw_ohci *ohci, u32 self_id,
1843	int self_id_count)
1844{
1845	int i;
1846	u32 entry;
1847
1848	for (i = 0; i < self_id_count; i++) {
1849		entry = ohci->self_id_buffer[i];
1850		if ((self_id & 0xff000000) == (entry & 0xff000000))
1851			return -1;
1852		if ((self_id & 0xff000000) < (entry & 0xff000000))
1853			return i;
1854	}
1855	return i;
1856}
1857
1858static int initiated_reset(struct fw_ohci *ohci)
1859{
1860	int reg;
1861	int ret = 0;
1862
1863	mutex_lock(&ohci->phy_reg_mutex);
1864	reg = write_phy_reg(ohci, 7, 0xe0); /* Select page 7 */
1865	if (reg >= 0) {
1866		reg = read_phy_reg(ohci, 8);
1867		reg |= 0x40;
1868		reg = write_phy_reg(ohci, 8, reg); /* set PMODE bit */
1869		if (reg >= 0) {
1870			reg = read_phy_reg(ohci, 12); /* read register 12 */
1871			if (reg >= 0) {
1872				if ((reg & 0x08) == 0x08) {
1873					/* bit 3 indicates "initiated reset" */
1874					ret = 0x2;
1875				}
1876			}
1877		}
1878	}
1879	mutex_unlock(&ohci->phy_reg_mutex);
1880	return ret;
1881}
1882
1883/*
1884 * TI TSB82AA2B and TSB12LV26 do not receive the selfID of a locally
1885 * attached TSB41BA3D phy; see http://www.ti.com/litv/pdf/sllz059.
1886 * Construct the selfID from phy register contents.
1887 */
1888static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
1889{
1890	int reg, i, pos, status;
1891	/* link active 1, speed 3, bridge 0, contender 1, more packets 0 */
1892	u32 self_id = 0x8040c800;
1893
1894	reg = reg_read(ohci, OHCI1394_NodeID);
1895	if (!(reg & OHCI1394_NodeID_idValid)) {
1896		ohci_notice(ohci,
1897			    "node ID not valid, new bus reset in progress\n");
1898		return -EBUSY;
1899	}
1900	self_id |= ((reg & 0x3f) << 24); /* phy ID */
1901
1902	reg = ohci_read_phy_reg(&ohci->card, 4);
1903	if (reg < 0)
1904		return reg;
1905	self_id |= ((reg & 0x07) << 8); /* power class */
1906
1907	reg = ohci_read_phy_reg(&ohci->card, 1);
1908	if (reg < 0)
1909		return reg;
1910	self_id |= ((reg & 0x3f) << 16); /* gap count */
1911
1912	for (i = 0; i < 3; i++) {
1913		status = get_status_for_port(ohci, i);
1914		if (status < 0)
1915			return status;
1916		self_id |= ((status & 0x3) << (6 - (i * 2)));
1917	}
1918
1919	self_id |= initiated_reset(ohci);
1920
1921	pos = get_self_id_pos(ohci, self_id, self_id_count);
1922	if (pos >= 0) {
1923		memmove(&(ohci->self_id_buffer[pos+1]),
1924			&(ohci->self_id_buffer[pos]),
1925			(self_id_count - pos) * sizeof(*ohci->self_id_buffer));
1926		ohci->self_id_buffer[pos] = self_id;
1927		self_id_count++;
1928	}
1929	return self_id_count;
1930}
1931
1932static void bus_reset_work(struct work_struct *work)
1933{
1934	struct fw_ohci *ohci =
1935		container_of(work, struct fw_ohci, bus_reset_work);
1936	int self_id_count, generation, new_generation, i, j;
1937	u32 reg;
1938	void *free_rom = NULL;
1939	dma_addr_t free_rom_bus = 0;
1940	bool is_new_root;
1941
1942	reg = reg_read(ohci, OHCI1394_NodeID);
1943	if (!(reg & OHCI1394_NodeID_idValid)) {
1944		ohci_notice(ohci,
1945			    "node ID not valid, new bus reset in progress\n");
1946		return;
1947	}
1948	if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
1949		ohci_notice(ohci, "malconfigured bus\n");
1950		return;
1951	}
1952	ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
1953			       OHCI1394_NodeID_nodeNumber);
1954
1955	is_new_root = (reg & OHCI1394_NodeID_root) != 0;
1956	if (!(ohci->is_root && is_new_root))
1957		reg_write(ohci, OHCI1394_LinkControlSet,
1958			  OHCI1394_LinkControl_cycleMaster);
1959	ohci->is_root = is_new_root;
1960
1961	reg = reg_read(ohci, OHCI1394_SelfIDCount);
1962	if (reg & OHCI1394_SelfIDCount_selfIDError) {
1963		ohci_notice(ohci, "self ID receive error\n");
1964		return;
1965	}
1966	/*
1967	 * The count in the SelfIDCount register is the number of
1968	 * bytes in the self ID receive buffer.  Since we also receive
1969	 * the inverted quadlets and a header quadlet, we shift one
1970	 * bit extra to get the actual number of self IDs.
1971	 */
1972	self_id_count = (reg >> 3) & 0xff;
1973
1974	if (self_id_count > 252) {
1975		ohci_notice(ohci, "bad selfIDSize (%08x)\n", reg);
1976		return;
1977	}
1978
1979	generation = (cond_le32_to_cpu(ohci->self_id[0]) >> 16) & 0xff;
1980	rmb();
1981
1982	for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
1983		u32 id  = cond_le32_to_cpu(ohci->self_id[i]);
1984		u32 id2 = cond_le32_to_cpu(ohci->self_id[i + 1]);
1985
1986		if (id != ~id2) {
1987			/*
1988			 * If the invalid data looks like a cycle start packet,
1989			 * it's likely to be the result of the cycle master
1990			 * having a wrong gap count.  In this case, the self IDs
1991			 * so far are valid and should be processed so that the
1992			 * bus manager can then correct the gap count.
1993			 */
1994			if (id == 0xffff008f) {
1995				ohci_notice(ohci, "ignoring spurious self IDs\n");
1996				self_id_count = j;
1997				break;
1998			}
1999
2000			ohci_notice(ohci, "bad self ID %d/%d (%08x != ~%08x)\n",
2001				    j, self_id_count, id, id2);
2002			return;
2003		}
2004		ohci->self_id_buffer[j] = id;
2005	}
2006
2007	if (ohci->quirks & QUIRK_TI_SLLZ059) {
2008		self_id_count = find_and_insert_self_id(ohci, self_id_count);
2009		if (self_id_count < 0) {
2010			ohci_notice(ohci,
2011				    "could not construct local self ID\n");
2012			return;
2013		}
2014	}
2015
2016	if (self_id_count == 0) {
2017		ohci_notice(ohci, "no self IDs\n");
2018		return;
2019	}
2020	rmb();
2021
2022	/*
2023	 * Check the consistency of the self IDs we just read.  The
2024	 * problem we face is that a new bus reset can start while we
2025	 * read out the self IDs from the DMA buffer. If this happens,
2026	 * the DMA buffer will be overwritten with new self IDs and we
2027	 * will read out inconsistent data.  The OHCI specification
2028	 * (section 11.2) recommends a technique similar to
2029	 * linux/seqlock.h, where we remember the generation of the
2030	 * self IDs in the buffer before reading them out and compare
2031	 * it to the current generation after reading them out.  If
2032	 * the two generations match we know we have a consistent set
2033	 * of self IDs.
2034	 */
2035
2036	new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
2037	if (new_generation != generation) {
2038		ohci_notice(ohci, "new bus reset, discarding self ids\n");
2039		return;
2040	}
2041
2042	/* FIXME: Document how the locking works. */
2043	spin_lock_irq(&ohci->lock);
2044
2045	ohci->generation = -1; /* prevent AT packet queueing */
2046	context_stop(&ohci->at_request_ctx);
2047	context_stop(&ohci->at_response_ctx);
2048
2049	spin_unlock_irq(&ohci->lock);
2050
2051	/*
2052	 * Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent
2053	 * packets in the AT queues and software needs to drain them.
2054	 * Some OHCI 1.1 controllers (JMicron) apparently require this too.
2055	 */
2056	at_context_flush(&ohci->at_request_ctx);
2057	at_context_flush(&ohci->at_response_ctx);
2058
2059	spin_lock_irq(&ohci->lock);
2060
2061	ohci->generation = generation;
2062	reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2063	if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
2064		reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2065
2066	if (ohci->quirks & QUIRK_RESET_PACKET)
2067		ohci->request_generation = generation;
2068
2069	/*
2070	 * This next bit is unrelated to the AT context stuff but we
2071	 * have to do it under the spinlock also.  If a new config rom
2072	 * was set up before this reset, the old one is now no longer
2073	 * in use and we can free it. Update the config rom pointers
2074	 * to point to the current config rom and clear the
2075	 * next_config_rom pointer so a new update can take place.
2076	 */
2077
2078	if (ohci->next_config_rom != NULL) {
2079		if (ohci->next_config_rom != ohci->config_rom) {
2080			free_rom      = ohci->config_rom;
2081			free_rom_bus  = ohci->config_rom_bus;
2082		}
2083		ohci->config_rom      = ohci->next_config_rom;
2084		ohci->config_rom_bus  = ohci->next_config_rom_bus;
2085		ohci->next_config_rom = NULL;
2086
2087		/*
2088		 * Restore config_rom image and manually update
2089		 * config_rom registers.  Writing the header quadlet
2090		 * will indicate that the config rom is ready, so we
2091		 * do that last.
2092		 */
2093		reg_write(ohci, OHCI1394_BusOptions,
2094			  be32_to_cpu(ohci->config_rom[2]));
2095		ohci->config_rom[0] = ohci->next_header;
2096		reg_write(ohci, OHCI1394_ConfigROMhdr,
2097			  be32_to_cpu(ohci->next_header));
2098	}
2099
2100	if (param_remote_dma) {
2101		reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
2102		reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
2103	}
2104
2105	spin_unlock_irq(&ohci->lock);
2106
2107	if (free_rom)
2108		dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, free_rom, free_rom_bus);
2109
2110	log_selfids(ohci, generation, self_id_count);
2111
2112	fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
2113				 self_id_count, ohci->self_id_buffer,
2114				 ohci->csr_state_setclear_abdicate);
2115	ohci->csr_state_setclear_abdicate = false;
2116}
2117
2118static irqreturn_t irq_handler(int irq, void *data)
2119{
2120	struct fw_ohci *ohci = data;
2121	u32 event, iso_event;
2122	int i;
2123
2124	event = reg_read(ohci, OHCI1394_IntEventClear);
2125
2126	if (!event || !~event)
2127		return IRQ_NONE;
2128
2129	/*
2130	 * busReset and postedWriteErr events must not be cleared yet
2131	 * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
2132	 */
2133	reg_write(ohci, OHCI1394_IntEventClear,
2134		  event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
2135	log_irqs(ohci, event);
2136	if (event & OHCI1394_busReset)
2137		reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2138
2139	if (event & OHCI1394_selfIDComplete)
2140		queue_work(selfid_workqueue, &ohci->bus_reset_work);
2141
2142	if (event & OHCI1394_RQPkt)
2143		tasklet_schedule(&ohci->ar_request_ctx.tasklet);
2144
2145	if (event & OHCI1394_RSPkt)
2146		tasklet_schedule(&ohci->ar_response_ctx.tasklet);
2147
2148	if (event & OHCI1394_reqTxComplete)
2149		tasklet_schedule(&ohci->at_request_ctx.tasklet);
2150
2151	if (event & OHCI1394_respTxComplete)
2152		tasklet_schedule(&ohci->at_response_ctx.tasklet);
2153
2154	if (event & OHCI1394_isochRx) {
2155		iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
2156		reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
2157
2158		while (iso_event) {
2159			i = ffs(iso_event) - 1;
2160			tasklet_schedule(
2161				&ohci->ir_context_list[i].context.tasklet);
2162			iso_event &= ~(1 << i);
2163		}
2164	}
2165
2166	if (event & OHCI1394_isochTx) {
2167		iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
2168		reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
2169
2170		while (iso_event) {
2171			i = ffs(iso_event) - 1;
2172			tasklet_schedule(
2173				&ohci->it_context_list[i].context.tasklet);
2174			iso_event &= ~(1 << i);
2175		}
2176	}
2177
2178	if (unlikely(event & OHCI1394_regAccessFail))
2179		ohci_err(ohci, "register access failure\n");
2180
2181	if (unlikely(event & OHCI1394_postedWriteErr)) {
2182		reg_read(ohci, OHCI1394_PostedWriteAddressHi);
2183		reg_read(ohci, OHCI1394_PostedWriteAddressLo);
2184		reg_write(ohci, OHCI1394_IntEventClear,
2185			  OHCI1394_postedWriteErr);
2186		if (printk_ratelimit())
2187			ohci_err(ohci, "PCI posted write error\n");
2188	}
2189
2190	if (unlikely(event & OHCI1394_cycleTooLong)) {
2191		if (printk_ratelimit())
2192			ohci_notice(ohci, "isochronous cycle too long\n");
2193		reg_write(ohci, OHCI1394_LinkControlSet,
2194			  OHCI1394_LinkControl_cycleMaster);
2195	}
2196
2197	if (unlikely(event & OHCI1394_cycleInconsistent)) {
2198		/*
2199		 * We need to clear this event bit in order to make
2200		 * cycleMatch isochronous I/O work.  In theory we should
2201		 * stop active cycleMatch iso contexts now and restart
2202		 * them at least two cycles later.  (FIXME?)
2203		 */
2204		if (printk_ratelimit())
2205			ohci_notice(ohci, "isochronous cycle inconsistent\n");
2206	}
2207
2208	if (unlikely(event & OHCI1394_unrecoverableError))
2209		handle_dead_contexts(ohci);
2210
2211	if (event & OHCI1394_cycle64Seconds) {
2212		spin_lock(&ohci->lock);
2213		update_bus_time(ohci);
2214		spin_unlock(&ohci->lock);
2215	} else
2216		flush_writes(ohci);
2217
2218	return IRQ_HANDLED;
2219}
2220
2221static int software_reset(struct fw_ohci *ohci)
2222{
2223	u32 val;
2224	int i;
2225
2226	reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
2227	for (i = 0; i < 500; i++) {
2228		val = reg_read(ohci, OHCI1394_HCControlSet);
2229		if (!~val)
2230			return -ENODEV; /* Card was ejected. */
2231
2232		if (!(val & OHCI1394_HCControl_softReset))
2233			return 0;
2234
2235		msleep(1);
2236	}
2237
2238	return -EBUSY;
2239}
2240
2241static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length)
2242{
2243	size_t size = length * 4;
2244
2245	memcpy(dest, src, size);
2246	if (size < CONFIG_ROM_SIZE)
2247		memset(&dest[length], 0, CONFIG_ROM_SIZE - size);
2248}
2249
2250static int configure_1394a_enhancements(struct fw_ohci *ohci)
2251{
2252	bool enable_1394a;
2253	int ret, clear, set, offset;
2254
2255	/* Check if the driver should configure link and PHY. */
2256	if (!(reg_read(ohci, OHCI1394_HCControlSet) &
2257	      OHCI1394_HCControl_programPhyEnable))
2258		return 0;
2259
2260	/* Paranoia: check whether the PHY supports 1394a, too. */
2261	enable_1394a = false;
2262	ret = read_phy_reg(ohci, 2);
2263	if (ret < 0)
2264		return ret;
2265	if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) {
2266		ret = read_paged_phy_reg(ohci, 1, 8);
2267		if (ret < 0)
2268			return ret;
2269		if (ret >= 1)
2270			enable_1394a = true;
2271	}
2272
2273	if (ohci->quirks & QUIRK_NO_1394A)
2274		enable_1394a = false;
2275
2276	/* Configure PHY and link consistently. */
2277	if (enable_1394a) {
2278		clear = 0;
2279		set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
2280	} else {
2281		clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
2282		set = 0;
2283	}
2284	ret = update_phy_reg(ohci, 5, clear, set);
2285	if (ret < 0)
2286		return ret;
2287
2288	if (enable_1394a)
2289		offset = OHCI1394_HCControlSet;
2290	else
2291		offset = OHCI1394_HCControlClear;
2292	reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable);
2293
2294	/* Clean up: configuration has been taken care of. */
2295	reg_write(ohci, OHCI1394_HCControlClear,
2296		  OHCI1394_HCControl_programPhyEnable);
2297
2298	return 0;
2299}
2300
2301static int probe_tsb41ba3d(struct fw_ohci *ohci)
2302{
2303	/* TI vendor ID = 0x080028, TSB41BA3D product ID = 0x833005 (sic) */
2304	static const u8 id[] = { 0x08, 0x00, 0x28, 0x83, 0x30, 0x05, };
2305	int reg, i;
2306
2307	reg = read_phy_reg(ohci, 2);
2308	if (reg < 0)
2309		return reg;
2310	if ((reg & PHY_EXTENDED_REGISTERS) != PHY_EXTENDED_REGISTERS)
2311		return 0;
2312
2313	for (i = ARRAY_SIZE(id) - 1; i >= 0; i--) {
2314		reg = read_paged_phy_reg(ohci, 1, i + 10);
2315		if (reg < 0)
2316			return reg;
2317		if (reg != id[i])
2318			return 0;
2319	}
2320	return 1;
2321}
2322
2323static int ohci_enable(struct fw_card *card,
2324		       const __be32 *config_rom, size_t length)
2325{
2326	struct fw_ohci *ohci = fw_ohci(card);
2327	u32 lps, version, irqs;
2328	int i, ret;
2329
2330	ret = software_reset(ohci);
2331	if (ret < 0) {
2332		ohci_err(ohci, "failed to reset ohci card\n");
2333		return ret;
2334	}
2335
2336	/*
2337	 * Now enable LPS, which we need in order to start accessing
2338	 * most of the registers.  In fact, on some cards (ALI M5251),
2339	 * accessing registers in the SClk domain without LPS enabled
2340	 * will lock up the machine.  Wait 50msec to make sure we have
2341	 * full link enabled.  However, with some cards (well, at least
2342	 * a JMicron PCIe card), we have to try again sometimes.
2343	 *
2344	 * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but
2345	 * cannot actually use the phy at that time.  These need tens of
2346	 * millisecods pause between LPS write and first phy access too.
2347	 */
2348
2349	reg_write(ohci, OHCI1394_HCControlSet,
2350		  OHCI1394_HCControl_LPS |
2351		  OHCI1394_HCControl_postedWriteEnable);
2352	flush_writes(ohci);
2353
2354	for (lps = 0, i = 0; !lps && i < 3; i++) {
2355		msleep(50);
2356		lps = reg_read(ohci, OHCI1394_HCControlSet) &
2357		      OHCI1394_HCControl_LPS;
2358	}
2359
2360	if (!lps) {
2361		ohci_err(ohci, "failed to set Link Power Status\n");
2362		return -EIO;
2363	}
2364
2365	if (ohci->quirks & QUIRK_TI_SLLZ059) {
2366		ret = probe_tsb41ba3d(ohci);
2367		if (ret < 0)
2368			return ret;
2369		if (ret)
2370			ohci_notice(ohci, "local TSB41BA3D phy\n");
2371		else
2372			ohci->quirks &= ~QUIRK_TI_SLLZ059;
2373	}
2374
2375	reg_write(ohci, OHCI1394_HCControlClear,
2376		  OHCI1394_HCControl_noByteSwapData);
2377
2378	reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
2379	reg_write(ohci, OHCI1394_LinkControlSet,
2380		  OHCI1394_LinkControl_cycleTimerEnable |
2381		  OHCI1394_LinkControl_cycleMaster);
2382
2383	reg_write(ohci, OHCI1394_ATRetries,
2384		  OHCI1394_MAX_AT_REQ_RETRIES |
2385		  (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
2386		  (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) |
2387		  (200 << 16));
2388
2389	ohci->bus_time_running = false;
2390
2391	for (i = 0; i < 32; i++)
2392		if (ohci->ir_context_support & (1 << i))
2393			reg_write(ohci, OHCI1394_IsoRcvContextControlClear(i),
2394				  IR_CONTEXT_MULTI_CHANNEL_MODE);
2395
2396	version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2397	if (version >= OHCI_VERSION_1_1) {
2398		reg_write(ohci, OHCI1394_InitialChannelsAvailableHi,
2399			  0xfffffffe);
2400		card->broadcast_channel_auto_allocated = true;
2401	}
2402
2403	/* Get implemented bits of the priority arbitration request counter. */
2404	reg_write(ohci, OHCI1394_FairnessControl, 0x3f);
2405	ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f;
2406	reg_write(ohci, OHCI1394_FairnessControl, 0);
2407	card->priority_budget_implemented = ohci->pri_req_max != 0;
2408
2409	reg_write(ohci, OHCI1394_PhyUpperBound, FW_MAX_PHYSICAL_RANGE >> 16);
2410	reg_write(ohci, OHCI1394_IntEventClear, ~0);
2411	reg_write(ohci, OHCI1394_IntMaskClear, ~0);
2412
2413	ret = configure_1394a_enhancements(ohci);
2414	if (ret < 0)
2415		return ret;
2416
2417	/* Activate link_on bit and contender bit in our self ID packets.*/
2418	ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER);
2419	if (ret < 0)
2420		return ret;
2421
2422	/*
2423	 * When the link is not yet enabled, the atomic config rom
2424	 * update mechanism described below in ohci_set_config_rom()
2425	 * is not active.  We have to update ConfigRomHeader and
2426	 * BusOptions manually, and the write to ConfigROMmap takes
2427	 * effect immediately.  We tie this to the enabling of the
2428	 * link, so we have a valid config rom before enabling - the
2429	 * OHCI requires that ConfigROMhdr and BusOptions have valid
2430	 * values before enabling.
2431	 *
2432	 * However, when the ConfigROMmap is written, some controllers
2433	 * always read back quadlets 0 and 2 from the config rom to
2434	 * the ConfigRomHeader and BusOptions registers on bus reset.
2435	 * They shouldn't do that in this initial case where the link
2436	 * isn't enabled.  This means we have to use the same
2437	 * workaround here, setting the bus header to 0 and then write
2438	 * the right values in the bus reset tasklet.
2439	 */
2440
2441	if (config_rom) {
2442		ohci->next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2443							    &ohci->next_config_rom_bus, GFP_KERNEL);
2444		if (ohci->next_config_rom == NULL)
2445			return -ENOMEM;
2446
2447		copy_config_rom(ohci->next_config_rom, config_rom, length);
2448	} else {
2449		/*
2450		 * In the suspend case, config_rom is NULL, which
2451		 * means that we just reuse the old config rom.
2452		 */
2453		ohci->next_config_rom = ohci->config_rom;
2454		ohci->next_config_rom_bus = ohci->config_rom_bus;
2455	}
2456
2457	ohci->next_header = ohci->next_config_rom[0];
2458	ohci->next_config_rom[0] = 0;
2459	reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
2460	reg_write(ohci, OHCI1394_BusOptions,
2461		  be32_to_cpu(ohci->next_config_rom[2]));
2462	reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
2463
2464	reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
2465
2466	irqs =	OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
2467		OHCI1394_RQPkt | OHCI1394_RSPkt |
2468		OHCI1394_isochTx | OHCI1394_isochRx |
2469		OHCI1394_postedWriteErr |
2470		OHCI1394_selfIDComplete |
2471		OHCI1394_regAccessFail |
2472		OHCI1394_cycleInconsistent |
2473		OHCI1394_unrecoverableError |
2474		OHCI1394_cycleTooLong |
2475		OHCI1394_masterIntEnable;
2476	if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
2477		irqs |= OHCI1394_busReset;
2478	reg_write(ohci, OHCI1394_IntMaskSet, irqs);
2479
2480	reg_write(ohci, OHCI1394_HCControlSet,
2481		  OHCI1394_HCControl_linkEnable |
2482		  OHCI1394_HCControl_BIBimageValid);
2483
2484	reg_write(ohci, OHCI1394_LinkControlSet,
2485		  OHCI1394_LinkControl_rcvSelfID |
2486		  OHCI1394_LinkControl_rcvPhyPkt);
2487
2488	ar_context_run(&ohci->ar_request_ctx);
2489	ar_context_run(&ohci->ar_response_ctx);
2490
2491	flush_writes(ohci);
2492
2493	/* We are ready to go, reset bus to finish initialization. */
2494	fw_schedule_bus_reset(&ohci->card, false, true);
2495
2496	return 0;
2497}
2498
2499static int ohci_set_config_rom(struct fw_card *card,
2500			       const __be32 *config_rom, size_t length)
2501{
2502	struct fw_ohci *ohci;
2503	__be32 *next_config_rom;
2504	dma_addr_t next_config_rom_bus;
2505
2506	ohci = fw_ohci(card);
2507
2508	/*
2509	 * When the OHCI controller is enabled, the config rom update
2510	 * mechanism is a bit tricky, but easy enough to use.  See
2511	 * section 5.5.6 in the OHCI specification.
2512	 *
2513	 * The OHCI controller caches the new config rom address in a
2514	 * shadow register (ConfigROMmapNext) and needs a bus reset
2515	 * for the changes to take place.  When the bus reset is
2516	 * detected, the controller loads the new values for the
2517	 * ConfigRomHeader and BusOptions registers from the specified
2518	 * config rom and loads ConfigROMmap from the ConfigROMmapNext
2519	 * shadow register. All automatically and atomically.
2520	 *
2521	 * Now, there's a twist to this story.  The automatic load of
2522	 * ConfigRomHeader and BusOptions doesn't honor the
2523	 * noByteSwapData bit, so with a be32 config rom, the
2524	 * controller will load be32 values in to these registers
2525	 * during the atomic update, even on litte endian
2526	 * architectures.  The workaround we use is to put a 0 in the
2527	 * header quadlet; 0 is endian agnostic and means that the
2528	 * config rom isn't ready yet.  In the bus reset tasklet we
2529	 * then set up the real values for the two registers.
2530	 *
2531	 * We use ohci->lock to avoid racing with the code that sets
2532	 * ohci->next_config_rom to NULL (see bus_reset_work).
2533	 */
2534
2535	next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2536					      &next_config_rom_bus, GFP_KERNEL);
2537	if (next_config_rom == NULL)
2538		return -ENOMEM;
2539
2540	spin_lock_irq(&ohci->lock);
2541
2542	/*
2543	 * If there is not an already pending config_rom update,
2544	 * push our new allocation into the ohci->next_config_rom
2545	 * and then mark the local variable as null so that we
2546	 * won't deallocate the new buffer.
2547	 *
2548	 * OTOH, if there is a pending config_rom update, just
2549	 * use that buffer with the new config_rom data, and
2550	 * let this routine free the unused DMA allocation.
2551	 */
2552
2553	if (ohci->next_config_rom == NULL) {
2554		ohci->next_config_rom = next_config_rom;
2555		ohci->next_config_rom_bus = next_config_rom_bus;
2556		next_config_rom = NULL;
2557	}
2558
2559	copy_config_rom(ohci->next_config_rom, config_rom, length);
2560
2561	ohci->next_header = config_rom[0];
2562	ohci->next_config_rom[0] = 0;
2563
2564	reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
2565
2566	spin_unlock_irq(&ohci->lock);
2567
2568	/* If we didn't use the DMA allocation, delete it. */
2569	if (next_config_rom != NULL) {
2570		dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, next_config_rom,
2571				   next_config_rom_bus);
2572	}
2573
2574	/*
2575	 * Now initiate a bus reset to have the changes take
2576	 * effect. We clean up the old config rom memory and DMA
2577	 * mappings in the bus reset tasklet, since the OHCI
2578	 * controller could need to access it before the bus reset
2579	 * takes effect.
2580	 */
2581
2582	fw_schedule_bus_reset(&ohci->card, true, true);
2583
2584	return 0;
2585}
2586
2587static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
2588{
2589	struct fw_ohci *ohci = fw_ohci(card);
2590
2591	at_context_transmit(&ohci->at_request_ctx, packet);
2592}
2593
2594static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
2595{
2596	struct fw_ohci *ohci = fw_ohci(card);
2597
2598	at_context_transmit(&ohci->at_response_ctx, packet);
2599}
2600
2601static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
2602{
2603	struct fw_ohci *ohci = fw_ohci(card);
2604	struct context *ctx = &ohci->at_request_ctx;
2605	struct driver_data *driver_data = packet->driver_data;
2606	int ret = -ENOENT;
2607
2608	tasklet_disable_in_atomic(&ctx->tasklet);
2609
2610	if (packet->ack != 0)
2611		goto out;
2612
2613	if (packet->payload_mapped)
2614		dma_unmap_single(ohci->card.device, packet->payload_bus,
2615				 packet->payload_length, DMA_TO_DEVICE);
2616
2617	log_ar_at_event(ohci, 'T', packet->speed, packet->header, 0x20);
2618	driver_data->packet = NULL;
2619	packet->ack = RCODE_CANCELLED;
2620
2621	// Timestamping on behalf of the hardware.
2622	packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
2623
2624	packet->callback(packet, &ohci->card, packet->ack);
2625	ret = 0;
2626 out:
2627	tasklet_enable(&ctx->tasklet);
2628
2629	return ret;
2630}
2631
2632static int ohci_enable_phys_dma(struct fw_card *card,
2633				int node_id, int generation)
2634{
2635	struct fw_ohci *ohci = fw_ohci(card);
2636	unsigned long flags;
2637	int n, ret = 0;
2638
2639	if (param_remote_dma)
2640		return 0;
2641
2642	/*
2643	 * FIXME:  Make sure this bitmask is cleared when we clear the busReset
2644	 * interrupt bit.  Clear physReqResourceAllBuses on bus reset.
2645	 */
2646
2647	spin_lock_irqsave(&ohci->lock, flags);
2648
2649	if (ohci->generation != generation) {
2650		ret = -ESTALE;
2651		goto out;
2652	}
2653
2654	/*
2655	 * Note, if the node ID contains a non-local bus ID, physical DMA is
2656	 * enabled for _all_ nodes on remote buses.
2657	 */
2658
2659	n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
2660	if (n < 32)
2661		reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
2662	else
2663		reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
2664
2665	flush_writes(ohci);
2666 out:
2667	spin_unlock_irqrestore(&ohci->lock, flags);
2668
2669	return ret;
2670}
2671
2672static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
2673{
2674	struct fw_ohci *ohci = fw_ohci(card);
2675	unsigned long flags;
2676	u32 value;
2677
2678	switch (csr_offset) {
2679	case CSR_STATE_CLEAR:
2680	case CSR_STATE_SET:
2681		if (ohci->is_root &&
2682		    (reg_read(ohci, OHCI1394_LinkControlSet) &
2683		     OHCI1394_LinkControl_cycleMaster))
2684			value = CSR_STATE_BIT_CMSTR;
2685		else
2686			value = 0;
2687		if (ohci->csr_state_setclear_abdicate)
2688			value |= CSR_STATE_BIT_ABDICATE;
2689
2690		return value;
2691
2692	case CSR_NODE_IDS:
2693		return reg_read(ohci, OHCI1394_NodeID) << 16;
2694
2695	case CSR_CYCLE_TIME:
2696		return get_cycle_time(ohci);
2697
2698	case CSR_BUS_TIME:
2699		/*
2700		 * We might be called just after the cycle timer has wrapped
2701		 * around but just before the cycle64Seconds handler, so we
2702		 * better check here, too, if the bus time needs to be updated.
2703		 */
2704		spin_lock_irqsave(&ohci->lock, flags);
2705		value = update_bus_time(ohci);
2706		spin_unlock_irqrestore(&ohci->lock, flags);
2707		return value;
2708
2709	case CSR_BUSY_TIMEOUT:
2710		value = reg_read(ohci, OHCI1394_ATRetries);
2711		return (value >> 4) & 0x0ffff00f;
2712
2713	case CSR_PRIORITY_BUDGET:
2714		return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) |
2715			(ohci->pri_req_max << 8);
2716
2717	default:
2718		WARN_ON(1);
2719		return 0;
2720	}
2721}
2722
2723static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
2724{
2725	struct fw_ohci *ohci = fw_ohci(card);
2726	unsigned long flags;
2727
2728	switch (csr_offset) {
2729	case CSR_STATE_CLEAR:
2730		if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
2731			reg_write(ohci, OHCI1394_LinkControlClear,
2732				  OHCI1394_LinkControl_cycleMaster);
2733			flush_writes(ohci);
2734		}
2735		if (value & CSR_STATE_BIT_ABDICATE)
2736			ohci->csr_state_setclear_abdicate = false;
2737		break;
2738
2739	case CSR_STATE_SET:
2740		if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
2741			reg_write(ohci, OHCI1394_LinkControlSet,
2742				  OHCI1394_LinkControl_cycleMaster);
2743			flush_writes(ohci);
2744		}
2745		if (value & CSR_STATE_BIT_ABDICATE)
2746			ohci->csr_state_setclear_abdicate = true;
2747		break;
2748
2749	case CSR_NODE_IDS:
2750		reg_write(ohci, OHCI1394_NodeID, value >> 16);
2751		flush_writes(ohci);
2752		break;
2753
2754	case CSR_CYCLE_TIME:
2755		reg_write(ohci, OHCI1394_IsochronousCycleTimer, value);
2756		reg_write(ohci, OHCI1394_IntEventSet,
2757			  OHCI1394_cycleInconsistent);
2758		flush_writes(ohci);
2759		break;
2760
2761	case CSR_BUS_TIME:
2762		spin_lock_irqsave(&ohci->lock, flags);
2763		ohci->bus_time = (update_bus_time(ohci) & 0x40) |
2764		                 (value & ~0x7f);
2765		spin_unlock_irqrestore(&ohci->lock, flags);
2766		break;
2767
2768	case CSR_BUSY_TIMEOUT:
2769		value = (value & 0xf) | ((value & 0xf) << 4) |
2770			((value & 0xf) << 8) | ((value & 0x0ffff000) << 4);
2771		reg_write(ohci, OHCI1394_ATRetries, value);
2772		flush_writes(ohci);
2773		break;
2774
2775	case CSR_PRIORITY_BUDGET:
2776		reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f);
2777		flush_writes(ohci);
2778		break;
2779
2780	default:
2781		WARN_ON(1);
2782		break;
2783	}
2784}
2785
2786static void flush_iso_completions(struct iso_context *ctx)
2787{
2788	ctx->base.callback.sc(&ctx->base, ctx->last_timestamp,
2789			      ctx->header_length, ctx->header,
2790			      ctx->base.callback_data);
2791	ctx->header_length = 0;
2792}
2793
2794static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr)
2795{
2796	u32 *ctx_hdr;
2797
2798	if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) {
2799		if (ctx->base.drop_overflow_headers)
2800			return;
2801		flush_iso_completions(ctx);
2802	}
2803
2804	ctx_hdr = ctx->header + ctx->header_length;
2805	ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]);
2806
2807	/*
2808	 * The two iso header quadlets are byteswapped to little
2809	 * endian by the controller, but we want to present them
2810	 * as big endian for consistency with the bus endianness.
2811	 */
2812	if (ctx->base.header_size > 0)
2813		ctx_hdr[0] = swab32(dma_hdr[1]); /* iso packet header */
2814	if (ctx->base.header_size > 4)
2815		ctx_hdr[1] = swab32(dma_hdr[0]); /* timestamp */
2816	if (ctx->base.header_size > 8)
2817		memcpy(&ctx_hdr[2], &dma_hdr[2], ctx->base.header_size - 8);
2818	ctx->header_length += ctx->base.header_size;
2819}
2820
2821static int handle_ir_packet_per_buffer(struct context *context,
2822				       struct descriptor *d,
2823				       struct descriptor *last)
2824{
2825	struct iso_context *ctx =
2826		container_of(context, struct iso_context, context);
2827	struct descriptor *pd;
2828	u32 buffer_dma;
2829
2830	for (pd = d; pd <= last; pd++)
2831		if (pd->transfer_status)
2832			break;
2833	if (pd > last)
2834		/* Descriptor(s) not done yet, stop iteration */
2835		return 0;
2836
2837	while (!(d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))) {
2838		d++;
2839		buffer_dma = le32_to_cpu(d->data_address);
2840		dma_sync_single_range_for_cpu(context->ohci->card.device,
2841					      buffer_dma & PAGE_MASK,
2842					      buffer_dma & ~PAGE_MASK,
2843					      le16_to_cpu(d->req_count),
2844					      DMA_FROM_DEVICE);
2845	}
2846
2847	copy_iso_headers(ctx, (u32 *) (last + 1));
2848
2849	if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
2850		flush_iso_completions(ctx);
2851
2852	return 1;
2853}
2854
2855/* d == last because each descriptor block is only a single descriptor. */
2856static int handle_ir_buffer_fill(struct context *context,
2857				 struct descriptor *d,
2858				 struct descriptor *last)
2859{
2860	struct iso_context *ctx =
2861		container_of(context, struct iso_context, context);
2862	unsigned int req_count, res_count, completed;
2863	u32 buffer_dma;
2864
2865	req_count = le16_to_cpu(last->req_count);
2866	res_count = le16_to_cpu(READ_ONCE(last->res_count));
2867	completed = req_count - res_count;
2868	buffer_dma = le32_to_cpu(last->data_address);
2869
2870	if (completed > 0) {
2871		ctx->mc_buffer_bus = buffer_dma;
2872		ctx->mc_completed = completed;
2873	}
2874
2875	if (res_count != 0)
2876		/* Descriptor(s) not done yet, stop iteration */
2877		return 0;
2878
2879	dma_sync_single_range_for_cpu(context->ohci->card.device,
2880				      buffer_dma & PAGE_MASK,
2881				      buffer_dma & ~PAGE_MASK,
2882				      completed, DMA_FROM_DEVICE);
2883
2884	if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) {
2885		ctx->base.callback.mc(&ctx->base,
2886				      buffer_dma + completed,
2887				      ctx->base.callback_data);
2888		ctx->mc_completed = 0;
2889	}
2890
2891	return 1;
2892}
2893
2894static void flush_ir_buffer_fill(struct iso_context *ctx)
2895{
2896	dma_sync_single_range_for_cpu(ctx->context.ohci->card.device,
2897				      ctx->mc_buffer_bus & PAGE_MASK,
2898				      ctx->mc_buffer_bus & ~PAGE_MASK,
2899				      ctx->mc_completed, DMA_FROM_DEVICE);
2900
2901	ctx->base.callback.mc(&ctx->base,
2902			      ctx->mc_buffer_bus + ctx->mc_completed,
2903			      ctx->base.callback_data);
2904	ctx->mc_completed = 0;
2905}
2906
2907static inline void sync_it_packet_for_cpu(struct context *context,
2908					  struct descriptor *pd)
2909{
2910	__le16 control;
2911	u32 buffer_dma;
2912
2913	/* only packets beginning with OUTPUT_MORE* have data buffers */
2914	if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
2915		return;
2916
2917	/* skip over the OUTPUT_MORE_IMMEDIATE descriptor */
2918	pd += 2;
2919
2920	/*
2921	 * If the packet has a header, the first OUTPUT_MORE/LAST descriptor's
2922	 * data buffer is in the context program's coherent page and must not
2923	 * be synced.
2924	 */
2925	if ((le32_to_cpu(pd->data_address) & PAGE_MASK) ==
2926	    (context->current_bus          & PAGE_MASK)) {
2927		if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
2928			return;
2929		pd++;
2930	}
2931
2932	do {
2933		buffer_dma = le32_to_cpu(pd->data_address);
2934		dma_sync_single_range_for_cpu(context->ohci->card.device,
2935					      buffer_dma & PAGE_MASK,
2936					      buffer_dma & ~PAGE_MASK,
2937					      le16_to_cpu(pd->req_count),
2938					      DMA_TO_DEVICE);
2939		control = pd->control;
2940		pd++;
2941	} while (!(control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)));
2942}
2943
2944static int handle_it_packet(struct context *context,
2945			    struct descriptor *d,
2946			    struct descriptor *last)
2947{
2948	struct iso_context *ctx =
2949		container_of(context, struct iso_context, context);
2950	struct descriptor *pd;
2951	__be32 *ctx_hdr;
2952
2953	for (pd = d; pd <= last; pd++)
2954		if (pd->transfer_status)
2955			break;
2956	if (pd > last)
2957		/* Descriptor(s) not done yet, stop iteration */
2958		return 0;
2959
2960	sync_it_packet_for_cpu(context, d);
2961
2962	if (ctx->header_length + 4 > PAGE_SIZE) {
2963		if (ctx->base.drop_overflow_headers)
2964			return 1;
2965		flush_iso_completions(ctx);
2966	}
2967
2968	ctx_hdr = ctx->header + ctx->header_length;
2969	ctx->last_timestamp = le16_to_cpu(last->res_count);
2970	/* Present this value as big-endian to match the receive code */
2971	*ctx_hdr = cpu_to_be32((le16_to_cpu(pd->transfer_status) << 16) |
2972			       le16_to_cpu(pd->res_count));
2973	ctx->header_length += 4;
2974
2975	if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
2976		flush_iso_completions(ctx);
2977
2978	return 1;
2979}
2980
2981static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels)
2982{
2983	u32 hi = channels >> 32, lo = channels;
2984
2985	reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi);
2986	reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo);
2987	reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi);
2988	reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo);
2989	ohci->mc_channels = channels;
2990}
2991
2992static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
2993				int type, int channel, size_t header_size)
2994{
2995	struct fw_ohci *ohci = fw_ohci(card);
2996	struct iso_context *ctx;
2997	descriptor_callback_t callback;
2998	u64 *channels;
2999	u32 *mask, regs;
3000	int index, ret = -EBUSY;
3001
3002	spin_lock_irq(&ohci->lock);
3003
3004	switch (type) {
3005	case FW_ISO_CONTEXT_TRANSMIT:
3006		mask     = &ohci->it_context_mask;
3007		callback = handle_it_packet;
3008		index    = ffs(*mask) - 1;
3009		if (index >= 0) {
3010			*mask &= ~(1 << index);
3011			regs = OHCI1394_IsoXmitContextBase(index);
3012			ctx  = &ohci->it_context_list[index];
3013		}
3014		break;
3015
3016	case FW_ISO_CONTEXT_RECEIVE:
3017		channels = &ohci->ir_context_channels;
3018		mask     = &ohci->ir_context_mask;
3019		callback = handle_ir_packet_per_buffer;
3020		index    = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
3021		if (index >= 0) {
3022			*channels &= ~(1ULL << channel);
3023			*mask     &= ~(1 << index);
3024			regs = OHCI1394_IsoRcvContextBase(index);
3025			ctx  = &ohci->ir_context_list[index];
3026		}
3027		break;
3028
3029	case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3030		mask     = &ohci->ir_context_mask;
3031		callback = handle_ir_buffer_fill;
3032		index    = !ohci->mc_allocated ? ffs(*mask) - 1 : -1;
3033		if (index >= 0) {
3034			ohci->mc_allocated = true;
3035			*mask &= ~(1 << index);
3036			regs = OHCI1394_IsoRcvContextBase(index);
3037			ctx  = &ohci->ir_context_list[index];
3038		}
3039		break;
3040
3041	default:
3042		index = -1;
3043		ret = -ENOSYS;
3044	}
3045
3046	spin_unlock_irq(&ohci->lock);
3047
3048	if (index < 0)
3049		return ERR_PTR(ret);
3050
3051	memset(ctx, 0, sizeof(*ctx));
3052	ctx->header_length = 0;
3053	ctx->header = (void *) __get_free_page(GFP_KERNEL);
3054	if (ctx->header == NULL) {
3055		ret = -ENOMEM;
3056		goto out;
3057	}
3058	ret = context_init(&ctx->context, ohci, regs, callback);
3059	if (ret < 0)
3060		goto out_with_header;
3061
3062	if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) {
3063		set_multichannel_mask(ohci, 0);
3064		ctx->mc_completed = 0;
3065	}
3066
3067	return &ctx->base;
3068
3069 out_with_header:
3070	free_page((unsigned long)ctx->header);
3071 out:
3072	spin_lock_irq(&ohci->lock);
3073
3074	switch (type) {
3075	case FW_ISO_CONTEXT_RECEIVE:
3076		*channels |= 1ULL << channel;
3077		break;
3078
3079	case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3080		ohci->mc_allocated = false;
3081		break;
3082	}
3083	*mask |= 1 << index;
3084
3085	spin_unlock_irq(&ohci->lock);
3086
3087	return ERR_PTR(ret);
3088}
3089
3090static int ohci_start_iso(struct fw_iso_context *base,
3091			  s32 cycle, u32 sync, u32 tags)
3092{
3093	struct iso_context *ctx = container_of(base, struct iso_context, base);
3094	struct fw_ohci *ohci = ctx->context.ohci;
3095	u32 control = IR_CONTEXT_ISOCH_HEADER, match;
3096	int index;
3097
3098	/* the controller cannot start without any queued packets */
3099	if (ctx->context.last->branch_address == 0)
3100		return -ENODATA;
3101
3102	switch (ctx->base.type) {
3103	case FW_ISO_CONTEXT_TRANSMIT:
3104		index = ctx - ohci->it_context_list;
3105		match = 0;
3106		if (cycle >= 0)
3107			match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
3108				(cycle & 0x7fff) << 16;
3109
3110		reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
3111		reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
3112		context_run(&ctx->context, match);
3113		break;
3114
3115	case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3116		control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE;
3117		fallthrough;
3118	case FW_ISO_CONTEXT_RECEIVE:
3119		index = ctx - ohci->ir_context_list;
3120		match = (tags << 28) | (sync << 8) | ctx->base.channel;
3121		if (cycle >= 0) {
3122			match |= (cycle & 0x07fff) << 12;
3123			control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
3124		}
3125
3126		reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
3127		reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
3128		reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
3129		context_run(&ctx->context, control);
3130
3131		ctx->sync = sync;
3132		ctx->tags = tags;
3133
3134		break;
3135	}
3136
3137	return 0;
3138}
3139
3140static int ohci_stop_iso(struct fw_iso_context *base)
3141{
3142	struct fw_ohci *ohci = fw_ohci(base->card);
3143	struct iso_context *ctx = container_of(base, struct iso_context, base);
3144	int index;
3145
3146	switch (ctx->base.type) {
3147	case FW_ISO_CONTEXT_TRANSMIT:
3148		index = ctx - ohci->it_context_list;
3149		reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
3150		break;
3151
3152	case FW_ISO_CONTEXT_RECEIVE:
3153	case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3154		index = ctx - ohci->ir_context_list;
3155		reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
3156		break;
3157	}
3158	flush_writes(ohci);
3159	context_stop(&ctx->context);
3160	tasklet_kill(&ctx->context.tasklet);
3161
3162	return 0;
3163}
3164
3165static void ohci_free_iso_context(struct fw_iso_context *base)
3166{
3167	struct fw_ohci *ohci = fw_ohci(base->card);
3168	struct iso_context *ctx = container_of(base, struct iso_context, base);
3169	unsigned long flags;
3170	int index;
3171
3172	ohci_stop_iso(base);
3173	context_release(&ctx->context);
3174	free_page((unsigned long)ctx->header);
3175
3176	spin_lock_irqsave(&ohci->lock, flags);
3177
3178	switch (base->type) {
3179	case FW_ISO_CONTEXT_TRANSMIT:
3180		index = ctx - ohci->it_context_list;
3181		ohci->it_context_mask |= 1 << index;
3182		break;
3183
3184	case FW_ISO_CONTEXT_RECEIVE:
3185		index = ctx - ohci->ir_context_list;
3186		ohci->ir_context_mask |= 1 << index;
3187		ohci->ir_context_channels |= 1ULL << base->channel;
3188		break;
3189
3190	case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3191		index = ctx - ohci->ir_context_list;
3192		ohci->ir_context_mask |= 1 << index;
3193		ohci->ir_context_channels |= ohci->mc_channels;
3194		ohci->mc_channels = 0;
3195		ohci->mc_allocated = false;
3196		break;
3197	}
3198
3199	spin_unlock_irqrestore(&ohci->lock, flags);
3200}
3201
3202static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels)
3203{
3204	struct fw_ohci *ohci = fw_ohci(base->card);
3205	unsigned long flags;
3206	int ret;
3207
3208	switch (base->type) {
3209	case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3210
3211		spin_lock_irqsave(&ohci->lock, flags);
3212
3213		/* Don't allow multichannel to grab other contexts' channels. */
3214		if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) {
3215			*channels = ohci->ir_context_channels;
3216			ret = -EBUSY;
3217		} else {
3218			set_multichannel_mask(ohci, *channels);
3219			ret = 0;
3220		}
3221
3222		spin_unlock_irqrestore(&ohci->lock, flags);
3223
3224		break;
3225	default:
3226		ret = -EINVAL;
3227	}
3228
3229	return ret;
3230}
3231
3232#ifdef CONFIG_PM
3233static void ohci_resume_iso_dma(struct fw_ohci *ohci)
3234{
3235	int i;
3236	struct iso_context *ctx;
3237
3238	for (i = 0 ; i < ohci->n_ir ; i++) {
3239		ctx = &ohci->ir_context_list[i];
3240		if (ctx->context.running)
3241			ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
3242	}
3243
3244	for (i = 0 ; i < ohci->n_it ; i++) {
3245		ctx = &ohci->it_context_list[i];
3246		if (ctx->context.running)
3247			ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
3248	}
3249}
3250#endif
3251
3252static int queue_iso_transmit(struct iso_context *ctx,
3253			      struct fw_iso_packet *packet,
3254			      struct fw_iso_buffer *buffer,
3255			      unsigned long payload)
3256{
3257	struct descriptor *d, *last, *pd;
3258	struct fw_iso_packet *p;
3259	__le32 *header;
3260	dma_addr_t d_bus, page_bus;
3261	u32 z, header_z, payload_z, irq;
3262	u32 payload_index, payload_end_index, next_page_index;
3263	int page, end_page, i, length, offset;
3264
3265	p = packet;
3266	payload_index = payload;
3267
3268	if (p->skip)
3269		z = 1;
3270	else
3271		z = 2;
3272	if (p->header_length > 0)
3273		z++;
3274
3275	/* Determine the first page the payload isn't contained in. */
3276	end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
3277	if (p->payload_length > 0)
3278		payload_z = end_page - (payload_index >> PAGE_SHIFT);
3279	else
3280		payload_z = 0;
3281
3282	z += payload_z;
3283
3284	/* Get header size in number of descriptors. */
3285	header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
3286
3287	d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
3288	if (d == NULL)
3289		return -ENOMEM;
3290
3291	if (!p->skip) {
3292		d[0].control   = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
3293		d[0].req_count = cpu_to_le16(8);
3294		/*
3295		 * Link the skip address to this descriptor itself.  This causes
3296		 * a context to skip a cycle whenever lost cycles or FIFO
3297		 * overruns occur, without dropping the data.  The application
3298		 * should then decide whether this is an error condition or not.
3299		 * FIXME:  Make the context's cycle-lost behaviour configurable?
3300		 */
3301		d[0].branch_address = cpu_to_le32(d_bus | z);
3302
3303		header = (__le32 *) &d[1];
3304		header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
3305					IT_HEADER_TAG(p->tag) |
3306					IT_HEADER_TCODE(TCODE_STREAM_DATA) |
3307					IT_HEADER_CHANNEL(ctx->base.channel) |
3308					IT_HEADER_SPEED(ctx->base.speed));
3309		header[1] =
3310			cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
3311							  p->payload_length));
3312	}
3313
3314	if (p->header_length > 0) {
3315		d[2].req_count    = cpu_to_le16(p->header_length);
3316		d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
3317		memcpy(&d[z], p->header, p->header_length);
3318	}
3319
3320	pd = d + z - payload_z;
3321	payload_end_index = payload_index + p->payload_length;
3322	for (i = 0; i < payload_z; i++) {
3323		page               = payload_index >> PAGE_SHIFT;
3324		offset             = payload_index & ~PAGE_MASK;
3325		next_page_index    = (page + 1) << PAGE_SHIFT;
3326		length             =
3327			min(next_page_index, payload_end_index) - payload_index;
3328		pd[i].req_count    = cpu_to_le16(length);
3329
3330		page_bus = page_private(buffer->pages[page]);
3331		pd[i].data_address = cpu_to_le32(page_bus + offset);
3332
3333		dma_sync_single_range_for_device(ctx->context.ohci->card.device,
3334						 page_bus, offset, length,
3335						 DMA_TO_DEVICE);
3336
3337		payload_index += length;
3338	}
3339
3340	if (p->interrupt)
3341		irq = DESCRIPTOR_IRQ_ALWAYS;
3342	else
3343		irq = DESCRIPTOR_NO_IRQ;
3344
3345	last = z == 2 ? d : d + z - 1;
3346	last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
3347				     DESCRIPTOR_STATUS |
3348				     DESCRIPTOR_BRANCH_ALWAYS |
3349				     irq);
3350
3351	context_append(&ctx->context, d, z, header_z);
3352
3353	return 0;
3354}
3355
3356static int queue_iso_packet_per_buffer(struct iso_context *ctx,
3357				       struct fw_iso_packet *packet,
3358				       struct fw_iso_buffer *buffer,
3359				       unsigned long payload)
3360{
3361	struct device *device = ctx->context.ohci->card.device;
3362	struct descriptor *d, *pd;
3363	dma_addr_t d_bus, page_bus;
3364	u32 z, header_z, rest;
3365	int i, j, length;
3366	int page, offset, packet_count, header_size, payload_per_buffer;
3367
3368	/*
3369	 * The OHCI controller puts the isochronous header and trailer in the
3370	 * buffer, so we need at least 8 bytes.
3371	 */
3372	packet_count = packet->header_length / ctx->base.header_size;
3373	header_size  = max(ctx->base.header_size, (size_t)8);
3374
3375	/* Get header size in number of descriptors. */
3376	header_z = DIV_ROUND_UP(header_size, sizeof(*d));
3377	page     = payload >> PAGE_SHIFT;
3378	offset   = payload & ~PAGE_MASK;
3379	payload_per_buffer = packet->payload_length / packet_count;
3380
3381	for (i = 0; i < packet_count; i++) {
3382		/* d points to the header descriptor */
3383		z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
3384		d = context_get_descriptors(&ctx->context,
3385				z + header_z, &d_bus);
3386		if (d == NULL)
3387			return -ENOMEM;
3388
3389		d->control      = cpu_to_le16(DESCRIPTOR_STATUS |
3390					      DESCRIPTOR_INPUT_MORE);
3391		if (packet->skip && i == 0)
3392			d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
3393		d->req_count    = cpu_to_le16(header_size);
3394		d->res_count    = d->req_count;
3395		d->transfer_status = 0;
3396		d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
3397
3398		rest = payload_per_buffer;
3399		pd = d;
3400		for (j = 1; j < z; j++) {
3401			pd++;
3402			pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
3403						  DESCRIPTOR_INPUT_MORE);
3404
3405			if (offset + rest < PAGE_SIZE)
3406				length = rest;
3407			else
3408				length = PAGE_SIZE - offset;
3409			pd->req_count = cpu_to_le16(length);
3410			pd->res_count = pd->req_count;
3411			pd->transfer_status = 0;
3412
3413			page_bus = page_private(buffer->pages[page]);
3414			pd->data_address = cpu_to_le32(page_bus + offset);
3415
3416			dma_sync_single_range_for_device(device, page_bus,
3417							 offset, length,
3418							 DMA_FROM_DEVICE);
3419
3420			offset = (offset + length) & ~PAGE_MASK;
3421			rest -= length;
3422			if (offset == 0)
3423				page++;
3424		}
3425		pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
3426					  DESCRIPTOR_INPUT_LAST |
3427					  DESCRIPTOR_BRANCH_ALWAYS);
3428		if (packet->interrupt && i == packet_count - 1)
3429			pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
3430
3431		context_append(&ctx->context, d, z, header_z);
3432	}
3433
3434	return 0;
3435}
3436
3437static int queue_iso_buffer_fill(struct iso_context *ctx,
3438				 struct fw_iso_packet *packet,
3439				 struct fw_iso_buffer *buffer,
3440				 unsigned long payload)
3441{
3442	struct descriptor *d;
3443	dma_addr_t d_bus, page_bus;
3444	int page, offset, rest, z, i, length;
3445
3446	page   = payload >> PAGE_SHIFT;
3447	offset = payload & ~PAGE_MASK;
3448	rest   = packet->payload_length;
3449
3450	/* We need one descriptor for each page in the buffer. */
3451	z = DIV_ROUND_UP(offset + rest, PAGE_SIZE);
3452
3453	if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count))
3454		return -EFAULT;
3455
3456	for (i = 0; i < z; i++) {
3457		d = context_get_descriptors(&ctx->context, 1, &d_bus);
3458		if (d == NULL)
3459			return -ENOMEM;
3460
3461		d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
3462					 DESCRIPTOR_BRANCH_ALWAYS);
3463		if (packet->skip && i == 0)
3464			d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
3465		if (packet->interrupt && i == z - 1)
3466			d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
3467
3468		if (offset + rest < PAGE_SIZE)
3469			length = rest;
3470		else
3471			length = PAGE_SIZE - offset;
3472		d->req_count = cpu_to_le16(length);
3473		d->res_count = d->req_count;
3474		d->transfer_status = 0;
3475
3476		page_bus = page_private(buffer->pages[page]);
3477		d->data_address = cpu_to_le32(page_bus + offset);
3478
3479		dma_sync_single_range_for_device(ctx->context.ohci->card.device,
3480						 page_bus, offset, length,
3481						 DMA_FROM_DEVICE);
3482
3483		rest -= length;
3484		offset = 0;
3485		page++;
3486
3487		context_append(&ctx->context, d, 1, 0);
3488	}
3489
3490	return 0;
3491}
3492
3493static int ohci_queue_iso(struct fw_iso_context *base,
3494			  struct fw_iso_packet *packet,
3495			  struct fw_iso_buffer *buffer,
3496			  unsigned long payload)
3497{
3498	struct iso_context *ctx = container_of(base, struct iso_context, base);
3499	unsigned long flags;
3500	int ret = -ENOSYS;
3501
3502	spin_lock_irqsave(&ctx->context.ohci->lock, flags);
3503	switch (base->type) {
3504	case FW_ISO_CONTEXT_TRANSMIT:
3505		ret = queue_iso_transmit(ctx, packet, buffer, payload);
3506		break;
3507	case FW_ISO_CONTEXT_RECEIVE:
3508		ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload);
3509		break;
3510	case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3511		ret = queue_iso_buffer_fill(ctx, packet, buffer, payload);
3512		break;
3513	}
3514	spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
3515
3516	return ret;
3517}
3518
3519static void ohci_flush_queue_iso(struct fw_iso_context *base)
3520{
3521	struct context *ctx =
3522			&container_of(base, struct iso_context, base)->context;
3523
3524	reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
3525}
3526
3527static int ohci_flush_iso_completions(struct fw_iso_context *base)
3528{
3529	struct iso_context *ctx = container_of(base, struct iso_context, base);
3530	int ret = 0;
3531
3532	tasklet_disable_in_atomic(&ctx->context.tasklet);
3533
3534	if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) {
3535		context_tasklet((unsigned long)&ctx->context);
3536
3537		switch (base->type) {
3538		case FW_ISO_CONTEXT_TRANSMIT:
3539		case FW_ISO_CONTEXT_RECEIVE:
3540			if (ctx->header_length != 0)
3541				flush_iso_completions(ctx);
3542			break;
3543		case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3544			if (ctx->mc_completed != 0)
3545				flush_ir_buffer_fill(ctx);
3546			break;
3547		default:
3548			ret = -ENOSYS;
3549		}
3550
3551		clear_bit_unlock(0, &ctx->flushing_completions);
3552		smp_mb__after_atomic();
3553	}
3554
3555	tasklet_enable(&ctx->context.tasklet);
3556
3557	return ret;
3558}
3559
3560static const struct fw_card_driver ohci_driver = {
3561	.enable			= ohci_enable,
3562	.read_phy_reg		= ohci_read_phy_reg,
3563	.update_phy_reg		= ohci_update_phy_reg,
3564	.set_config_rom		= ohci_set_config_rom,
3565	.send_request		= ohci_send_request,
3566	.send_response		= ohci_send_response,
3567	.cancel_packet		= ohci_cancel_packet,
3568	.enable_phys_dma	= ohci_enable_phys_dma,
3569	.read_csr		= ohci_read_csr,
3570	.write_csr		= ohci_write_csr,
3571
3572	.allocate_iso_context	= ohci_allocate_iso_context,
3573	.free_iso_context	= ohci_free_iso_context,
3574	.set_iso_channels	= ohci_set_iso_channels,
3575	.queue_iso		= ohci_queue_iso,
3576	.flush_queue_iso	= ohci_flush_queue_iso,
3577	.flush_iso_completions	= ohci_flush_iso_completions,
3578	.start_iso		= ohci_start_iso,
3579	.stop_iso		= ohci_stop_iso,
3580};
3581
3582#ifdef CONFIG_PPC_PMAC
3583static void pmac_ohci_on(struct pci_dev *dev)
3584{
3585	if (machine_is(powermac)) {
3586		struct device_node *ofn = pci_device_to_OF_node(dev);
3587
3588		if (ofn) {
3589			pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
3590			pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3591		}
3592	}
3593}
3594
3595static void pmac_ohci_off(struct pci_dev *dev)
3596{
3597	if (machine_is(powermac)) {
3598		struct device_node *ofn = pci_device_to_OF_node(dev);
3599
3600		if (ofn) {
3601			pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3602			pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
3603		}
3604	}
3605}
3606#else
3607static inline void pmac_ohci_on(struct pci_dev *dev) {}
3608static inline void pmac_ohci_off(struct pci_dev *dev) {}
3609#endif /* CONFIG_PPC_PMAC */
3610
3611static void release_ohci(struct device *dev, void *data)
3612{
3613	struct pci_dev *pdev = to_pci_dev(dev);
3614	struct fw_ohci *ohci = pci_get_drvdata(pdev);
3615
3616	pmac_ohci_off(pdev);
3617
3618	ar_context_release(&ohci->ar_response_ctx);
3619	ar_context_release(&ohci->ar_request_ctx);
3620
3621	dev_notice(dev, "removed fw-ohci device\n");
3622}
3623
3624static int pci_probe(struct pci_dev *dev,
3625			       const struct pci_device_id *ent)
3626{
3627	struct fw_ohci *ohci;
3628	u32 bus_options, max_receive, link_speed, version;
3629	u64 guid;
3630	int i, err;
3631	size_t size;
3632
3633	if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) {
3634		dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n");
3635		return -ENOSYS;
3636	}
3637
3638	ohci = devres_alloc(release_ohci, sizeof(*ohci), GFP_KERNEL);
3639	if (ohci == NULL)
3640		return -ENOMEM;
3641	fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
3642	pci_set_drvdata(dev, ohci);
3643	pmac_ohci_on(dev);
3644	devres_add(&dev->dev, ohci);
3645
3646	err = pcim_enable_device(dev);
3647	if (err) {
3648		dev_err(&dev->dev, "failed to enable OHCI hardware\n");
3649		return err;
3650	}
3651
3652	pci_set_master(dev);
3653	pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3654
3655	spin_lock_init(&ohci->lock);
3656	mutex_init(&ohci->phy_reg_mutex);
3657
3658	INIT_WORK(&ohci->bus_reset_work, bus_reset_work);
3659
3660	if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM) ||
3661	    pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE) {
3662		ohci_err(ohci, "invalid MMIO resource\n");
3663		return -ENXIO;
3664	}
3665
3666	err = pcim_iomap_regions(dev, 1 << 0, ohci_driver_name);
3667	if (err) {
3668		ohci_err(ohci, "request and map MMIO resource unavailable\n");
3669		return -ENXIO;
3670	}
3671	ohci->registers = pcim_iomap_table(dev)[0];
3672
3673	for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++)
3674		if ((ohci_quirks[i].vendor == dev->vendor) &&
3675		    (ohci_quirks[i].device == (unsigned short)PCI_ANY_ID ||
3676		     ohci_quirks[i].device == dev->device) &&
3677		    (ohci_quirks[i].revision == (unsigned short)PCI_ANY_ID ||
3678		     ohci_quirks[i].revision >= dev->revision)) {
3679			ohci->quirks = ohci_quirks[i].flags;
3680			break;
3681		}
3682	if (param_quirks)
3683		ohci->quirks = param_quirks;
3684
3685	if (detect_vt630x_with_asm1083_on_amd_ryzen_machine(dev))
3686		ohci->quirks |= QUIRK_REBOOT_BY_CYCLE_TIMER_READ;
3687
3688	/*
3689	 * Because dma_alloc_coherent() allocates at least one page,
3690	 * we save space by using a common buffer for the AR request/
3691	 * response descriptors and the self IDs buffer.
3692	 */
3693	BUILD_BUG_ON(AR_BUFFERS * sizeof(struct descriptor) > PAGE_SIZE/4);
3694	BUILD_BUG_ON(SELF_ID_BUF_SIZE > PAGE_SIZE/2);
3695	ohci->misc_buffer = dmam_alloc_coherent(&dev->dev, PAGE_SIZE, &ohci->misc_buffer_bus,
3696						GFP_KERNEL);
3697	if (!ohci->misc_buffer)
3698		return -ENOMEM;
3699
3700	err = ar_context_init(&ohci->ar_request_ctx, ohci, 0,
3701			      OHCI1394_AsReqRcvContextControlSet);
3702	if (err < 0)
3703		return err;
3704
3705	err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4,
3706			      OHCI1394_AsRspRcvContextControlSet);
3707	if (err < 0)
3708		return err;
3709
3710	err = context_init(&ohci->at_request_ctx, ohci,
3711			   OHCI1394_AsReqTrContextControlSet, handle_at_packet);
3712	if (err < 0)
3713		return err;
3714
3715	err = context_init(&ohci->at_response_ctx, ohci,
3716			   OHCI1394_AsRspTrContextControlSet, handle_at_packet);
3717	if (err < 0)
3718		return err;
3719
3720	reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
3721	ohci->ir_context_channels = ~0ULL;
3722	ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
3723	reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
3724	ohci->ir_context_mask = ohci->ir_context_support;
3725	ohci->n_ir = hweight32(ohci->ir_context_mask);
3726	size = sizeof(struct iso_context) * ohci->n_ir;
3727	ohci->ir_context_list = devm_kzalloc(&dev->dev, size, GFP_KERNEL);
3728	if (!ohci->ir_context_list)
3729		return -ENOMEM;
3730
3731	reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
3732	ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
3733	/* JMicron JMB38x often shows 0 at first read, just ignore it */
3734	if (!ohci->it_context_support) {
3735		ohci_notice(ohci, "overriding IsoXmitIntMask\n");
3736		ohci->it_context_support = 0xf;
3737	}
3738	reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
3739	ohci->it_context_mask = ohci->it_context_support;
3740	ohci->n_it = hweight32(ohci->it_context_mask);
3741	size = sizeof(struct iso_context) * ohci->n_it;
3742	ohci->it_context_list = devm_kzalloc(&dev->dev, size, GFP_KERNEL);
3743	if (!ohci->it_context_list)
3744		return -ENOMEM;
3745
3746	ohci->self_id     = ohci->misc_buffer     + PAGE_SIZE/2;
3747	ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2;
3748
3749	bus_options = reg_read(ohci, OHCI1394_BusOptions);
3750	max_receive = (bus_options >> 12) & 0xf;
3751	link_speed = bus_options & 0x7;
3752	guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
3753		reg_read(ohci, OHCI1394_GUIDLo);
3754
3755	if (!(ohci->quirks & QUIRK_NO_MSI))
3756		pci_enable_msi(dev);
3757	err = devm_request_irq(&dev->dev, dev->irq, irq_handler,
3758			       pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, ohci_driver_name, ohci);
3759	if (err < 0) {
3760		ohci_err(ohci, "failed to allocate interrupt %d\n", dev->irq);
3761		goto fail_msi;
3762	}
3763
3764	err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
3765	if (err)
3766		goto fail_msi;
3767
3768	version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
3769	ohci_notice(ohci,
3770		    "added OHCI v%x.%x device as card %d, "
3771		    "%d IR + %d IT contexts, quirks 0x%x%s\n",
3772		    version >> 16, version & 0xff, ohci->card.index,
3773		    ohci->n_ir, ohci->n_it, ohci->quirks,
3774		    reg_read(ohci, OHCI1394_PhyUpperBound) ?
3775			", physUB" : "");
3776
3777	return 0;
3778
3779 fail_msi:
3780	devm_free_irq(&dev->dev, dev->irq, ohci);
3781	pci_disable_msi(dev);
3782
3783	return err;
3784}
3785
3786static void pci_remove(struct pci_dev *dev)
3787{
3788	struct fw_ohci *ohci = pci_get_drvdata(dev);
3789
3790	/*
3791	 * If the removal is happening from the suspend state, LPS won't be
3792	 * enabled and host registers (eg., IntMaskClear) won't be accessible.
3793	 */
3794	if (reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_LPS) {
3795		reg_write(ohci, OHCI1394_IntMaskClear, ~0);
3796		flush_writes(ohci);
3797	}
3798	cancel_work_sync(&ohci->bus_reset_work);
3799	fw_core_remove_card(&ohci->card);
3800
3801	/*
3802	 * FIXME: Fail all pending packets here, now that the upper
3803	 * layers can't queue any more.
3804	 */
3805
3806	software_reset(ohci);
3807
3808	devm_free_irq(&dev->dev, dev->irq, ohci);
3809	pci_disable_msi(dev);
3810
3811	dev_notice(&dev->dev, "removing fw-ohci device\n");
3812}
3813
3814#ifdef CONFIG_PM
3815static int pci_suspend(struct pci_dev *dev, pm_message_t state)
3816{
3817	struct fw_ohci *ohci = pci_get_drvdata(dev);
3818	int err;
3819
3820	software_reset(ohci);
3821	err = pci_save_state(dev);
3822	if (err) {
3823		ohci_err(ohci, "pci_save_state failed\n");
3824		return err;
3825	}
3826	err = pci_set_power_state(dev, pci_choose_state(dev, state));
3827	if (err)
3828		ohci_err(ohci, "pci_set_power_state failed with %d\n", err);
3829	pmac_ohci_off(dev);
3830
3831	return 0;
3832}
3833
3834static int pci_resume(struct pci_dev *dev)
3835{
3836	struct fw_ohci *ohci = pci_get_drvdata(dev);
3837	int err;
3838
3839	pmac_ohci_on(dev);
3840	pci_set_power_state(dev, PCI_D0);
3841	pci_restore_state(dev);
3842	err = pci_enable_device(dev);
3843	if (err) {
3844		ohci_err(ohci, "pci_enable_device failed\n");
3845		return err;
3846	}
3847
3848	/* Some systems don't setup GUID register on resume from ram  */
3849	if (!reg_read(ohci, OHCI1394_GUIDLo) &&
3850					!reg_read(ohci, OHCI1394_GUIDHi)) {
3851		reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid);
3852		reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32));
3853	}
3854
3855	err = ohci_enable(&ohci->card, NULL, 0);
3856	if (err)
3857		return err;
3858
3859	ohci_resume_iso_dma(ohci);
3860
3861	return 0;
3862}
3863#endif
3864
3865static const struct pci_device_id pci_table[] = {
3866	{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
3867	{ }
3868};
3869
3870MODULE_DEVICE_TABLE(pci, pci_table);
3871
3872static struct pci_driver fw_ohci_pci_driver = {
3873	.name		= ohci_driver_name,
3874	.id_table	= pci_table,
3875	.probe		= pci_probe,
3876	.remove		= pci_remove,
3877#ifdef CONFIG_PM
3878	.resume		= pci_resume,
3879	.suspend	= pci_suspend,
3880#endif
3881};
3882
3883static int __init fw_ohci_init(void)
3884{
3885	selfid_workqueue = alloc_workqueue(KBUILD_MODNAME, WQ_MEM_RECLAIM, 0);
3886	if (!selfid_workqueue)
3887		return -ENOMEM;
3888
3889	return pci_register_driver(&fw_ohci_pci_driver);
3890}
3891
3892static void __exit fw_ohci_cleanup(void)
3893{
3894	pci_unregister_driver(&fw_ohci_pci_driver);
3895	destroy_workqueue(selfid_workqueue);
3896}
3897
3898module_init(fw_ohci_init);
3899module_exit(fw_ohci_cleanup);
3900
3901MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
3902MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
3903MODULE_LICENSE("GPL");
3904
3905/* Provide a module alias so root-on-sbp2 initrds don't break. */
3906MODULE_ALIAS("ohci1394");
3907