1/*
2 *    Disk Array driver for HP Smart Array SAS controllers
3 *    Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
4 *    Copyright 2016 Microsemi Corporation
5 *    Copyright 2014-2015 PMC-Sierra, Inc.
6 *    Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
7 *
8 *    This program is free software; you can redistribute it and/or modify
9 *    it under the terms of the GNU General Public License as published by
10 *    the Free Software Foundation; version 2 of the License.
11 *
12 *    This program is distributed in the hope that it will be useful,
13 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
14 *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
15 *    NON INFRINGEMENT.  See the GNU General Public License for more details.
16 *
17 *    Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
18 *
19 */
20#ifndef HPSA_H
21#define HPSA_H
22
23#include <scsi/scsicam.h>
24
25#define IO_OK		0
26#define IO_ERROR	1
27
28struct ctlr_info;
29
30struct access_method {
31	void (*submit_command)(struct ctlr_info *h,
32		struct CommandList *c);
33	void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
34	bool (*intr_pending)(struct ctlr_info *h);
35	unsigned long (*command_completed)(struct ctlr_info *h, u8 q);
36};
37
38/* for SAS hosts and SAS expanders */
39struct hpsa_sas_node {
40	struct device *parent_dev;
41	struct list_head port_list_head;
42};
43
44struct hpsa_sas_port {
45	struct list_head port_list_entry;
46	u64 sas_address;
47	struct sas_port *port;
48	int next_phy_index;
49	struct list_head phy_list_head;
50	struct hpsa_sas_node *parent_node;
51	struct sas_rphy *rphy;
52};
53
54struct hpsa_sas_phy {
55	struct list_head phy_list_entry;
56	struct sas_phy *phy;
57	struct hpsa_sas_port *parent_port;
58	bool added_to_port;
59};
60
61#define EXTERNAL_QD 128
62struct hpsa_scsi_dev_t {
63	unsigned int devtype;
64	int bus, target, lun;		/* as presented to the OS */
65	unsigned char scsi3addr[8];	/* as presented to the HW */
66	u8 physical_device : 1;
67	u8 expose_device;
68	u8 removed : 1;			/* device is marked for death */
69	u8 was_removed : 1;		/* device actually removed */
70#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
71	unsigned char device_id[16];    /* from inquiry pg. 0x83 */
72	u64 sas_address;
73	u64 eli;			/* from report diags. */
74	unsigned char vendor[8];        /* bytes 8-15 of inquiry data */
75	unsigned char model[16];        /* bytes 16-31 of inquiry data */
76	unsigned char rev;		/* byte 2 of inquiry data */
77	unsigned char raid_level;	/* from inquiry page 0xC1 */
78	unsigned char volume_offline;	/* discovered via TUR or VPD */
79	u16 queue_depth;		/* max queue_depth for this device */
80	atomic_t commands_outstanding;	/* track commands sent to device */
81	atomic_t ioaccel_cmds_out;	/* Only used for physical devices
82					 * counts commands sent to physical
83					 * device via "ioaccel" path.
84					 */
85	bool in_reset;
86	u32 ioaccel_handle;
87	u8 active_path_index;
88	u8 path_map;
89	u8 bay;
90	u8 box[8];
91	u16 phys_connector[8];
92	int offload_config;		/* I/O accel RAID offload configured */
93	int offload_enabled;		/* I/O accel RAID offload enabled */
94	int offload_to_be_enabled;
95	int hba_ioaccel_enabled;
96	int offload_to_mirror;		/* Send next I/O accelerator RAID
97					 * offload request to mirror drive
98					 */
99	struct raid_map_data raid_map;	/* I/O accelerator RAID map */
100
101	/*
102	 * Pointers from logical drive map indices to the phys drives that
103	 * make those logical drives.  Note, multiple logical drives may
104	 * share physical drives.  You can have for instance 5 physical
105	 * drives with 3 logical drives each using those same 5 physical
106	 * disks. We need these pointers for counting i/o's out to physical
107	 * devices in order to honor physical device queue depth limits.
108	 */
109	struct hpsa_scsi_dev_t *phys_disk[RAID_MAP_MAX_ENTRIES];
110	int nphysical_disks;
111	int supports_aborts;
112	struct hpsa_sas_port *sas_port;
113	int external;   /* 1-from external array 0-not <0-unknown */
114};
115
116struct reply_queue_buffer {
117	u64 *head;
118	size_t size;
119	u8 wraparound;
120	u32 current_entry;
121	dma_addr_t busaddr;
122};
123
124#pragma pack(1)
125struct bmic_controller_parameters {
126	u8   led_flags;
127	u8   enable_command_list_verification;
128	u8   backed_out_write_drives;
129	u16  stripes_for_parity;
130	u8   parity_distribution_mode_flags;
131	u16  max_driver_requests;
132	u16  elevator_trend_count;
133	u8   disable_elevator;
134	u8   force_scan_complete;
135	u8   scsi_transfer_mode;
136	u8   force_narrow;
137	u8   rebuild_priority;
138	u8   expand_priority;
139	u8   host_sdb_asic_fix;
140	u8   pdpi_burst_from_host_disabled;
141	char software_name[64];
142	char hardware_name[32];
143	u8   bridge_revision;
144	u8   snapshot_priority;
145	u32  os_specific;
146	u8   post_prompt_timeout;
147	u8   automatic_drive_slamming;
148	u8   reserved1;
149	u8   nvram_flags;
150	u8   cache_nvram_flags;
151	u8   drive_config_flags;
152	u16  reserved2;
153	u8   temp_warning_level;
154	u8   temp_shutdown_level;
155	u8   temp_condition_reset;
156	u8   max_coalesce_commands;
157	u32  max_coalesce_delay;
158	u8   orca_password[4];
159	u8   access_id[16];
160	u8   reserved[356];
161};
162#pragma pack()
163
164struct ctlr_info {
165	unsigned int *reply_map;
166	int	ctlr;
167	char	devname[8];
168	char    *product_name;
169	struct pci_dev *pdev;
170	u32	board_id;
171	u64	sas_address;
172	void __iomem *vaddr;
173	unsigned long paddr;
174	int 	nr_cmds; /* Number of commands allowed on this controller */
175#define HPSA_CMDS_RESERVED_FOR_ABORTS 2
176#define HPSA_CMDS_RESERVED_FOR_DRIVER 1
177	struct CfgTable __iomem *cfgtable;
178	int	interrupts_enabled;
179	int 	max_commands;
180	int	last_collision_tag; /* tags are global */
181	atomic_t commands_outstanding;
182#	define PERF_MODE_INT	0
183#	define DOORBELL_INT	1
184#	define SIMPLE_MODE_INT	2
185#	define MEMQ_MODE_INT	3
186	unsigned int msix_vectors;
187	int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
188	struct access_method access;
189
190	/* queue and queue Info */
191	unsigned int Qdepth;
192	unsigned int maxSG;
193	spinlock_t lock;
194	int maxsgentries;
195	u8 max_cmd_sg_entries;
196	int chainsize;
197	struct SGDescriptor **cmd_sg_list;
198	struct ioaccel2_sg_element **ioaccel2_cmd_sg_list;
199
200	/* pointers to command and error info pool */
201	struct CommandList 	*cmd_pool;
202	dma_addr_t		cmd_pool_dhandle;
203	struct io_accel1_cmd	*ioaccel_cmd_pool;
204	dma_addr_t		ioaccel_cmd_pool_dhandle;
205	struct io_accel2_cmd	*ioaccel2_cmd_pool;
206	dma_addr_t		ioaccel2_cmd_pool_dhandle;
207	struct ErrorInfo 	*errinfo_pool;
208	dma_addr_t		errinfo_pool_dhandle;
209	unsigned long  		*cmd_pool_bits;
210	int			scan_finished;
211	u8			scan_waiting : 1;
212	spinlock_t		scan_lock;
213	wait_queue_head_t	scan_wait_queue;
214
215	struct Scsi_Host *scsi_host;
216	spinlock_t devlock; /* to protect hba[ctlr]->dev[];  */
217	int ndevices; /* number of used elements in .dev[] array. */
218	struct hpsa_scsi_dev_t *dev[HPSA_MAX_DEVICES];
219	/*
220	 * Performant mode tables.
221	 */
222	u32 trans_support;
223	u32 trans_offset;
224	struct TransTable_struct __iomem *transtable;
225	unsigned long transMethod;
226
227	/* cap concurrent passthrus at some reasonable maximum */
228#define HPSA_MAX_CONCURRENT_PASSTHRUS (10)
229	atomic_t passthru_cmds_avail;
230
231	/*
232	 * Performant mode completion buffers
233	 */
234	size_t reply_queue_size;
235	struct reply_queue_buffer reply_queue[MAX_REPLY_QUEUES];
236	u8 nreply_queues;
237	u32 *blockFetchTable;
238	u32 *ioaccel1_blockFetchTable;
239	u32 *ioaccel2_blockFetchTable;
240	u32 __iomem *ioaccel2_bft2_regs;
241	unsigned char *hba_inquiry_data;
242	u32 driver_support;
243	u32 fw_support;
244	int ioaccel_support;
245	int ioaccel_maxsg;
246	u64 last_intr_timestamp;
247	u32 last_heartbeat;
248	u64 last_heartbeat_timestamp;
249	u32 heartbeat_sample_interval;
250	atomic_t firmware_flash_in_progress;
251	u32 __percpu *lockup_detected;
252	struct delayed_work monitor_ctlr_work;
253	struct delayed_work rescan_ctlr_work;
254	struct delayed_work event_monitor_work;
255	int remove_in_progress;
256	/* Address of h->q[x] is passed to intr handler to know which queue */
257	u8 q[MAX_REPLY_QUEUES];
258	char intrname[MAX_REPLY_QUEUES][16];	/* "hpsa0-msix00" names */
259	u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */
260#define HPSATMF_BITS_SUPPORTED  (1 << 0)
261#define HPSATMF_PHYS_LUN_RESET  (1 << 1)
262#define HPSATMF_PHYS_NEX_RESET  (1 << 2)
263#define HPSATMF_PHYS_TASK_ABORT (1 << 3)
264#define HPSATMF_PHYS_TSET_ABORT (1 << 4)
265#define HPSATMF_PHYS_CLEAR_ACA  (1 << 5)
266#define HPSATMF_PHYS_CLEAR_TSET (1 << 6)
267#define HPSATMF_PHYS_QRY_TASK   (1 << 7)
268#define HPSATMF_PHYS_QRY_TSET   (1 << 8)
269#define HPSATMF_PHYS_QRY_ASYNC  (1 << 9)
270#define HPSATMF_IOACCEL_ENABLED (1 << 15)
271#define HPSATMF_MASK_SUPPORTED  (1 << 16)
272#define HPSATMF_LOG_LUN_RESET   (1 << 17)
273#define HPSATMF_LOG_NEX_RESET   (1 << 18)
274#define HPSATMF_LOG_TASK_ABORT  (1 << 19)
275#define HPSATMF_LOG_TSET_ABORT  (1 << 20)
276#define HPSATMF_LOG_CLEAR_ACA   (1 << 21)
277#define HPSATMF_LOG_CLEAR_TSET  (1 << 22)
278#define HPSATMF_LOG_QRY_TASK    (1 << 23)
279#define HPSATMF_LOG_QRY_TSET    (1 << 24)
280#define HPSATMF_LOG_QRY_ASYNC   (1 << 25)
281	u32 events;
282#define CTLR_STATE_CHANGE_EVENT				(1 << 0)
283#define CTLR_ENCLOSURE_HOT_PLUG_EVENT			(1 << 1)
284#define CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV		(1 << 4)
285#define CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV		(1 << 5)
286#define CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL		(1 << 6)
287#define CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED	(1 << 30)
288#define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE	(1 << 31)
289
290#define RESCAN_REQUIRED_EVENT_BITS \
291		(CTLR_ENCLOSURE_HOT_PLUG_EVENT | \
292		CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \
293		CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \
294		CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \
295		CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE)
296	spinlock_t offline_device_lock;
297	struct list_head offline_device_list;
298	int	acciopath_status;
299	int	drv_req_rescan;
300	int	raid_offload_debug;
301	int     discovery_polling;
302	int     legacy_board;
303	struct  ReportLUNdata *lastlogicals;
304	int	needs_abort_tags_swizzled;
305	struct workqueue_struct *resubmit_wq;
306	struct workqueue_struct *rescan_ctlr_wq;
307	struct workqueue_struct *monitor_ctlr_wq;
308	atomic_t abort_cmds_available;
309	wait_queue_head_t event_sync_wait_queue;
310	struct mutex reset_mutex;
311	u8 reset_in_progress;
312	struct hpsa_sas_node *sas_host;
313	spinlock_t reset_lock;
314};
315
316struct offline_device_entry {
317	unsigned char scsi3addr[8];
318	struct list_head offline_list;
319};
320
321#define HPSA_ABORT_MSG 0
322#define HPSA_DEVICE_RESET_MSG 1
323#define HPSA_RESET_TYPE_CONTROLLER 0x00
324#define HPSA_RESET_TYPE_BUS 0x01
325#define HPSA_RESET_TYPE_LUN 0x04
326#define HPSA_PHYS_TARGET_RESET 0x99 /* not defined by cciss spec */
327#define HPSA_MSG_SEND_RETRY_LIMIT 10
328#define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS (10000)
329
330/* Maximum time in seconds driver will wait for command completions
331 * when polling before giving up.
332 */
333#define HPSA_MAX_POLL_TIME_SECS (20)
334
335/* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines
336 * how many times to retry TEST UNIT READY on a device
337 * while waiting for it to become ready before giving up.
338 * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval
339 * between sending TURs while waiting for a device
340 * to become ready.
341 */
342#define HPSA_TUR_RETRY_LIMIT (20)
343#define HPSA_MAX_WAIT_INTERVAL_SECS (30)
344
345/* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board
346 * to become ready, in seconds, before giving up on it.
347 * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
348 * between polling the board to see if it is ready, in
349 * milliseconds.  HPSA_BOARD_READY_POLL_INTERVAL and
350 * HPSA_BOARD_READY_ITERATIONS are derived from those.
351 */
352#define HPSA_BOARD_READY_WAIT_SECS (120)
353#define HPSA_BOARD_NOT_READY_WAIT_SECS (100)
354#define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
355#define HPSA_BOARD_READY_POLL_INTERVAL \
356	((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
357#define HPSA_BOARD_READY_ITERATIONS \
358	((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
359		HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
360#define HPSA_BOARD_NOT_READY_ITERATIONS \
361	((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \
362		HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
363#define HPSA_POST_RESET_PAUSE_MSECS (3000)
364#define HPSA_POST_RESET_NOOP_RETRIES (12)
365
366/*  Defining the diffent access_menthods */
367/*
368 * Memory mapped FIFO interface (SMART 53xx cards)
369 */
370#define SA5_DOORBELL	0x20
371#define SA5_REQUEST_PORT_OFFSET	0x40
372#define SA5_REQUEST_PORT64_LO_OFFSET 0xC0
373#define SA5_REQUEST_PORT64_HI_OFFSET 0xC4
374#define SA5_REPLY_INTR_MASK_OFFSET	0x34
375#define SA5_REPLY_PORT_OFFSET		0x44
376#define SA5_INTR_STATUS		0x30
377#define SA5_SCRATCHPAD_OFFSET	0xB0
378
379#define SA5_CTCFG_OFFSET	0xB4
380#define SA5_CTMEM_OFFSET	0xB8
381
382#define SA5_INTR_OFF		0x08
383#define SA5B_INTR_OFF		0x04
384#define SA5_INTR_PENDING	0x08
385#define SA5B_INTR_PENDING	0x04
386#define FIFO_EMPTY		0xffffffff
387#define HPSA_FIRMWARE_READY	0xffff0000 /* value in scratchpad register */
388
389#define HPSA_ERROR_BIT		0x02
390
391/* Performant mode flags */
392#define SA5_PERF_INTR_PENDING   0x04
393#define SA5_PERF_INTR_OFF       0x05
394#define SA5_OUTDB_STATUS_PERF_BIT       0x01
395#define SA5_OUTDB_CLEAR_PERF_BIT        0x01
396#define SA5_OUTDB_CLEAR         0xA0
397#define SA5_OUTDB_CLEAR_PERF_BIT        0x01
398#define SA5_OUTDB_STATUS        0x9C
399
400
401#define HPSA_INTR_ON 	1
402#define HPSA_INTR_OFF	0
403
404/*
405 * Inbound Post Queue offsets for IO Accelerator Mode 2
406 */
407#define IOACCEL2_INBOUND_POSTQ_32	0x48
408#define IOACCEL2_INBOUND_POSTQ_64_LOW	0xd0
409#define IOACCEL2_INBOUND_POSTQ_64_HI	0xd4
410
411#define HPSA_PHYSICAL_DEVICE_BUS	0
412#define HPSA_RAID_VOLUME_BUS		1
413#define HPSA_EXTERNAL_RAID_VOLUME_BUS	2
414#define HPSA_HBA_BUS			0
415#define HPSA_LEGACY_HBA_BUS		3
416
417/*
418	Send the command to the hardware
419*/
420static void SA5_submit_command(struct ctlr_info *h,
421	struct CommandList *c)
422{
423	writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
424	(void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
425}
426
427static void SA5_submit_command_no_read(struct ctlr_info *h,
428	struct CommandList *c)
429{
430	writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
431}
432
433static void SA5_submit_command_ioaccel2(struct ctlr_info *h,
434	struct CommandList *c)
435{
436	writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
437}
438
439/*
440 *  This card is the opposite of the other cards.
441 *   0 turns interrupts on...
442 *   0x08 turns them off...
443 */
444static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
445{
446	if (val) { /* Turn interrupts on */
447		h->interrupts_enabled = 1;
448		writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
449		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
450	} else { /* Turn them off */
451		h->interrupts_enabled = 0;
452		writel(SA5_INTR_OFF,
453			h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
454		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
455	}
456}
457
458/*
459 *  Variant of the above; 0x04 turns interrupts off...
460 */
461static void SA5B_intr_mask(struct ctlr_info *h, unsigned long val)
462{
463	if (val) { /* Turn interrupts on */
464		h->interrupts_enabled = 1;
465		writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
466		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
467	} else { /* Turn them off */
468		h->interrupts_enabled = 0;
469		writel(SA5B_INTR_OFF,
470		       h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
471		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
472	}
473}
474
475static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
476{
477	if (val) { /* turn on interrupts */
478		h->interrupts_enabled = 1;
479		writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
480		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
481	} else {
482		h->interrupts_enabled = 0;
483		writel(SA5_PERF_INTR_OFF,
484			h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
485		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
486	}
487}
488
489static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
490{
491	struct reply_queue_buffer *rq = &h->reply_queue[q];
492	unsigned long register_value = FIFO_EMPTY;
493
494	/* msi auto clears the interrupt pending bit. */
495	if (unlikely(!(h->pdev->msi_enabled || h->msix_vectors))) {
496		/* flush the controller write of the reply queue by reading
497		 * outbound doorbell status register.
498		 */
499		(void) readl(h->vaddr + SA5_OUTDB_STATUS);
500		writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
501		/* Do a read in order to flush the write to the controller
502		 * (as per spec.)
503		 */
504		(void) readl(h->vaddr + SA5_OUTDB_STATUS);
505	}
506
507	if ((((u32) rq->head[rq->current_entry]) & 1) == rq->wraparound) {
508		register_value = rq->head[rq->current_entry];
509		rq->current_entry++;
510		atomic_dec(&h->commands_outstanding);
511	} else {
512		register_value = FIFO_EMPTY;
513	}
514	/* Check for wraparound */
515	if (rq->current_entry == h->max_commands) {
516		rq->current_entry = 0;
517		rq->wraparound ^= 1;
518	}
519	return register_value;
520}
521
522/*
523 *   returns value read from hardware.
524 *     returns FIFO_EMPTY if there is nothing to read
525 */
526static unsigned long SA5_completed(struct ctlr_info *h,
527	__attribute__((unused)) u8 q)
528{
529	unsigned long register_value
530		= readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
531
532	if (register_value != FIFO_EMPTY)
533		atomic_dec(&h->commands_outstanding);
534
535#ifdef HPSA_DEBUG
536	if (register_value != FIFO_EMPTY)
537		dev_dbg(&h->pdev->dev, "Read %lx back from board\n",
538			register_value);
539	else
540		dev_dbg(&h->pdev->dev, "FIFO Empty read\n");
541#endif
542
543	return register_value;
544}
545/*
546 *	Returns true if an interrupt is pending..
547 */
548static bool SA5_intr_pending(struct ctlr_info *h)
549{
550	unsigned long register_value  =
551		readl(h->vaddr + SA5_INTR_STATUS);
552	return register_value & SA5_INTR_PENDING;
553}
554
555static bool SA5_performant_intr_pending(struct ctlr_info *h)
556{
557	unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
558
559	if (!register_value)
560		return false;
561
562	/* Read outbound doorbell to flush */
563	register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
564	return register_value & SA5_OUTDB_STATUS_PERF_BIT;
565}
566
567#define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT    0x100
568
569static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h)
570{
571	unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
572
573	return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ?
574		true : false;
575}
576
577/*
578 *      Returns true if an interrupt is pending..
579 */
580static bool SA5B_intr_pending(struct ctlr_info *h)
581{
582	return readl(h->vaddr + SA5_INTR_STATUS) & SA5B_INTR_PENDING;
583}
584
585#define IOACCEL_MODE1_REPLY_QUEUE_INDEX  0x1A0
586#define IOACCEL_MODE1_PRODUCER_INDEX     0x1B8
587#define IOACCEL_MODE1_CONSUMER_INDEX     0x1BC
588#define IOACCEL_MODE1_REPLY_UNUSED       0xFFFFFFFFFFFFFFFFULL
589
590static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
591{
592	u64 register_value;
593	struct reply_queue_buffer *rq = &h->reply_queue[q];
594
595	BUG_ON(q >= h->nreply_queues);
596
597	register_value = rq->head[rq->current_entry];
598	if (register_value != IOACCEL_MODE1_REPLY_UNUSED) {
599		rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED;
600		if (++rq->current_entry == rq->size)
601			rq->current_entry = 0;
602		/*
603		 * @todo
604		 *
605		 * Don't really need to write the new index after each command,
606		 * but with current driver design this is easiest.
607		 */
608		wmb();
609		writel((q << 24) | rq->current_entry, h->vaddr +
610				IOACCEL_MODE1_CONSUMER_INDEX);
611		atomic_dec(&h->commands_outstanding);
612	}
613	return (unsigned long) register_value;
614}
615
616static struct access_method SA5_access = {
617	.submit_command =	SA5_submit_command,
618	.set_intr_mask =	SA5_intr_mask,
619	.intr_pending =		SA5_intr_pending,
620	.command_completed =	SA5_completed,
621};
622
623/* Duplicate entry of the above to mark unsupported boards */
624static struct access_method SA5A_access = {
625	.submit_command =	SA5_submit_command,
626	.set_intr_mask =	SA5_intr_mask,
627	.intr_pending =		SA5_intr_pending,
628	.command_completed =	SA5_completed,
629};
630
631static struct access_method SA5B_access = {
632	.submit_command =	SA5_submit_command,
633	.set_intr_mask =	SA5B_intr_mask,
634	.intr_pending =		SA5B_intr_pending,
635	.command_completed =	SA5_completed,
636};
637
638static struct access_method SA5_ioaccel_mode1_access = {
639	.submit_command =	SA5_submit_command,
640	.set_intr_mask =	SA5_performant_intr_mask,
641	.intr_pending =		SA5_ioaccel_mode1_intr_pending,
642	.command_completed =	SA5_ioaccel_mode1_completed,
643};
644
645static struct access_method SA5_ioaccel_mode2_access = {
646	.submit_command =	SA5_submit_command_ioaccel2,
647	.set_intr_mask =	SA5_performant_intr_mask,
648	.intr_pending =		SA5_performant_intr_pending,
649	.command_completed =	SA5_performant_completed,
650};
651
652static struct access_method SA5_performant_access = {
653	.submit_command =	SA5_submit_command,
654	.set_intr_mask =	SA5_performant_intr_mask,
655	.intr_pending =		SA5_performant_intr_pending,
656	.command_completed =	SA5_performant_completed,
657};
658
659static struct access_method SA5_performant_access_no_read = {
660	.submit_command =	SA5_submit_command_no_read,
661	.set_intr_mask =	SA5_performant_intr_mask,
662	.intr_pending =		SA5_performant_intr_pending,
663	.command_completed =	SA5_performant_completed,
664};
665
666struct board_type {
667	u32	board_id;
668	char	*product_name;
669	struct access_method *access;
670};
671
672#endif /* HPSA_H */
673
674