1/*
2** Copyright 2002/03, Thomas Kurschel. All rights reserved.
3** Distributed under the terms of the OpenBeOS License.
4*/
5
6/*
7	Part of Open IDE bus manager
8
9	ATA command protocol
10*/
11
12
13#include "ide_internal.h"
14
15#include "ide_sim.h"
16#include "ide_cmds.h"
17
18
19/** verify that device is ready for further PIO transmission */
20
21static bool
22check_rw_status(ide_device_info *device, bool drqStatus)
23{
24	ide_bus_info *bus = device->bus;
25	int status;
26
27	status = bus->controller->get_altstatus(bus->channel_cookie);
28
29	if ((status & ide_status_bsy) != 0) {
30		device->subsys_status = SCSI_SEQUENCE_FAIL;
31		return false;
32	}
33
34	if (drqStatus != ((status & ide_status_drq) != 0)) {
35		device->subsys_status = SCSI_SEQUENCE_FAIL;
36		return false;
37	}
38
39	return true;
40}
41
42
43/**	DPC called at
44 *	 - begin of each PIO read/write block
45 *	 - end of PUI write transmission
46 */
47
48void
49ata_dpc_PIO(ide_qrequest *qrequest)
50{
51	ide_device_info *device = qrequest->device;
52	uint32 timeout = qrequest->request->timeout > 0 ?
53		qrequest->request->timeout : IDE_STD_TIMEOUT;
54
55	SHOW_FLOW0(3, "");
56
57	if (check_rw_error(device, qrequest)
58		|| !check_rw_status(device, qrequest->is_write ? device->left_blocks > 0 : true))
59	{
60		// failure reported by device
61		SHOW_FLOW0( 3, "command finished unsuccessfully" );
62
63		finish_checksense(qrequest);
64		return;
65	}
66
67	if (qrequest->is_write) {
68		if (device->left_blocks == 0) {
69			// this was the end-of-transmission IRQ
70			SHOW_FLOW0(3, "write access finished");
71			if (!wait_for_drqdown(device)) {
72				SHOW_ERROR0(3, "device wants to transmit data though command is finished");
73				goto finish;
74			}
75			goto finish;
76		}
77
78		// wait until device requests data
79		SHOW_FLOW0(3, "Waiting for device ready to transmit");
80		if (!wait_for_drq(device)) {
81			SHOW_FLOW0(3, "device not ready for data transmission - abort");
82			goto finish;
83		}
84
85		// start async waiting for next block/end of command
86		// we should start that when block is transmitted, but with bad
87		// luck the IRQ fires exactly between transmission and start of waiting,
88		// so we better start waiting too early; as we are in service thread,
89		// a DPC initiated by IRQ cannot overtake us, so there is no need to block
90		// IRQs during sent
91		start_waiting_nolock(device->bus, timeout, ide_state_async_waiting);
92
93		// having a too short data buffer shouldn't happen here
94		// anyway - we are prepared
95		SHOW_FLOW0(3, "Writing one block");
96		if (write_PIO_block(qrequest, 512) == B_ERROR)
97			goto finish_cancel_timeout;
98
99		--device->left_blocks;
100	} else {
101		if (device->left_blocks > 1) {
102			// start async waiting for next command (see above)
103			start_waiting_nolock(device->bus, timeout, ide_state_async_waiting);
104		}
105
106		// see write
107		SHOW_FLOW0( 3, "Reading one block" );
108		if (read_PIO_block(qrequest, 512) == B_ERROR)
109			goto finish_cancel_timeout;
110
111		--device->left_blocks;
112
113		if (device->left_blocks == 0) {
114			// at end of transmission, wait for data request going low
115			SHOW_FLOW0( 3, "Waiting for device to finish transmission" );
116
117			if (!wait_for_drqdown(device))
118				SHOW_FLOW0( 3, "Device continues data transmission - abort command" );
119
120			// we don't cancel timeout as no timeout is started during last block
121			goto finish;
122		}
123	}
124
125	return;
126
127finish_cancel_timeout:
128	cancel_irq_timeout(device->bus);
129
130finish:
131	finish_checksense(qrequest);
132}
133
134
135/** DPC called when IRQ was fired at end of DMA transmission */
136
137void
138ata_dpc_DMA(ide_qrequest *qrequest)
139{
140	ide_device_info *device = qrequest->device;
141	bool dma_success, dev_err;
142
143	dma_success = finish_dma(device);
144	dev_err = check_rw_error(device, qrequest);
145
146	if (dma_success && !dev_err) {
147		// reset error count if DMA worked
148		device->DMA_failures = 0;
149		device->CQ_failures = 0;
150		qrequest->request->data_resid = 0;
151		finish_checksense(qrequest);
152	} else {
153		SHOW_ERROR0( 2, "Error in DMA transmission" );
154
155		set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_COM_FAILURE);
156
157		if (++device->DMA_failures >= MAX_DMA_FAILURES) {
158			SHOW_ERROR0( 2, "Disabled DMA because of too many errors" );
159			device->DMA_enabled = false;
160		}
161
162		// reset queue in case queuing is active
163		finish_reset_queue(qrequest);
164	}
165}
166
167
168// list of LBA48 opcodes
169static uint8 cmd_48[2][2] = {
170	{ IDE_CMD_READ_SECTORS_EXT, IDE_CMD_WRITE_SECTORS_EXT },
171	{ IDE_CMD_READ_DMA_EXT, IDE_CMD_WRITE_DMA_EXT }
172};
173
174
175// list of normal LBA opcodes
176static uint8 cmd_28[2][2] = {
177	{ IDE_CMD_READ_SECTORS, IDE_CMD_WRITE_SECTORS },
178	{ IDE_CMD_READ_DMA, IDE_CMD_WRITE_DMA }
179};
180
181
182/** create IDE read/write command */
183
184static bool
185create_rw_taskfile(ide_device_info *device, ide_qrequest *qrequest,
186	uint64 pos, size_t length, bool write)
187{
188	SHOW_FLOW0( 3, "" );
189
190	// XXX disable any writes
191/*	if( write )
192		goto err;*/
193
194	if (device->use_LBA) {
195		if (device->use_48bits && (pos + length > 0xfffffff || length > 0x100)) {
196			// use LBA48 only if necessary
197			SHOW_FLOW0( 3, "using LBA48" );
198
199			if (length > 0xffff)
200				goto err;
201
202			if (qrequest->queuable) {
203				// queued LBA48
204				device->tf_param_mask = ide_mask_features_48
205					| ide_mask_sector_count
206					| ide_mask_LBA_low_48
207					| ide_mask_LBA_mid_48
208					| ide_mask_LBA_high_48;
209
210				device->tf.queued48.sector_count_0_7 = length & 0xff;
211				device->tf.queued48.sector_count_8_15 = (length >> 8) & 0xff;
212				device->tf.queued48.tag = qrequest->tag;
213				device->tf.queued48.lba_0_7 = pos & 0xff;
214				device->tf.queued48.lba_8_15 = (pos >> 8) & 0xff;
215				device->tf.queued48.lba_16_23 = (pos >> 16) & 0xff;
216				device->tf.queued48.lba_24_31 = (pos >> 24) & 0xff;
217				device->tf.queued48.lba_32_39 = (pos >> 32) & 0xff;
218				device->tf.queued48.lba_40_47 = (pos >> 40) & 0xff;
219				device->tf.queued48.command = write ? IDE_CMD_WRITE_DMA_QUEUED_EXT
220					: IDE_CMD_READ_DMA_QUEUED_EXT;
221				return true;
222			} else {
223				// non-queued LBA48
224				device->tf_param_mask = ide_mask_sector_count_48
225					| ide_mask_LBA_low_48
226					| ide_mask_LBA_mid_48
227					| ide_mask_LBA_high_48;
228
229				device->tf.lba48.sector_count_0_7 = length & 0xff;
230				device->tf.lba48.sector_count_8_15 = (length >> 8) & 0xff;
231				device->tf.lba48.lba_0_7 = pos & 0xff;
232				device->tf.lba48.lba_8_15 = (pos >> 8) & 0xff;
233				device->tf.lba48.lba_16_23 = (pos >> 16) & 0xff;
234				device->tf.lba48.lba_24_31 = (pos >> 24) & 0xff;
235				device->tf.lba48.lba_32_39 = (pos >> 32) & 0xff;
236				device->tf.lba48.lba_40_47 = (pos >> 40) & 0xff;
237				device->tf.lba48.command = cmd_48[qrequest->uses_dma][write];
238				return true;
239			}
240		} else {
241			// normal LBA
242			SHOW_FLOW0(3, "using LBA");
243
244			if (length > 0x100)
245				goto err;
246
247			if (qrequest->queuable) {
248				// queued LBA
249				SHOW_FLOW( 3, "creating DMA queued command, tag=%d", qrequest->tag );
250				device->tf_param_mask = ide_mask_features
251					| ide_mask_sector_count
252					| ide_mask_LBA_low
253					| ide_mask_LBA_mid
254					| ide_mask_LBA_high
255					| ide_mask_device_head;
256
257				device->tf.queued.sector_count = length & 0xff;
258				device->tf.queued.tag = qrequest->tag;
259				device->tf.queued.lba_0_7 = pos & 0xff;
260				device->tf.queued.lba_8_15 = (pos >> 8) & 0xff;
261				device->tf.queued.lba_16_23 = (pos >> 16) & 0xff;
262				device->tf.queued.lba_24_27 = (pos >> 24) & 0xf;
263				device->tf.queued.command = write ? IDE_CMD_WRITE_DMA_QUEUED
264					: IDE_CMD_READ_DMA_QUEUED;
265				return true;
266			} else {
267				// non-queued LBA
268				SHOW_FLOW0( 3, "creating normal DMA/PIO command" );
269				device->tf_param_mask = ide_mask_sector_count
270					| ide_mask_LBA_low
271					| ide_mask_LBA_mid
272					| ide_mask_LBA_high
273					| ide_mask_device_head;
274
275				device->tf.lba.sector_count = length & 0xff;
276				device->tf.lba.lba_0_7 = pos & 0xff;
277				device->tf.lba.lba_8_15 = (pos >> 8) & 0xff;
278				device->tf.lba.lba_16_23 = (pos >> 16) & 0xff;
279				device->tf.lba.lba_24_27 = (pos >> 24) & 0xf;
280				device->tf.lba.command = cmd_28[qrequest->uses_dma][write];
281				return true;
282			}
283		}
284	} else {
285		// CHS mode
286		// (probably, noone would notice if we'd dropped support)
287		uint32 track_size, cylinder_offset, cylinder;
288		ide_device_infoblock *infoblock = &device->infoblock;
289
290		if (length > 0x100)
291			goto err;
292
293		device->tf.chs.mode = ide_mode_chs;
294
295		device->tf_param_mask = ide_mask_sector_count
296			| ide_mask_sector_number
297			| ide_mask_cylinder_low
298			| ide_mask_cylinder_high
299			| ide_mask_device_head;
300
301		device->tf.chs.sector_count = length & 0xff;
302
303		track_size = infoblock->current_heads * infoblock->current_sectors;
304
305		if (track_size == 0) {
306			set_sense(device,
307				SCSIS_KEY_MEDIUM_ERROR, SCSIS_ASC_MEDIUM_FORMAT_CORRUPTED);
308			return false;
309		}
310
311		cylinder = pos / track_size;
312
313		device->tf.chs.cylinder_0_7 = cylinder & 0xff;
314		device->tf.chs.cylinder_8_15 = (cylinder >> 8) & 0xff;
315
316		cylinder_offset = pos - cylinder * track_size;
317
318		device->tf.chs.sector_number = (cylinder_offset % infoblock->current_sectors + 1) & 0xff;
319		device->tf.chs.head = cylinder_offset / infoblock->current_sectors;
320
321		device->tf.chs.command = cmd_28[qrequest->uses_dma][write];
322		return true;
323	}
324
325	return true;
326
327err:
328	set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_CDB_FIELD);
329	return false;
330}
331
332
333/**	execute read/write command
334 *	pos - first block
335 *	length - number of blocks
336 */
337
338void
339ata_send_rw(ide_device_info *device, ide_qrequest *qrequest,
340	uint64 pos, size_t length, bool write)
341{
342	ide_bus_info *bus = device->bus;
343	uint32 timeout;
344
345	// make a copy first as settings may get changed by user during execution
346	qrequest->is_write = write;
347	qrequest->uses_dma = device->DMA_enabled;
348
349	if (qrequest->uses_dma) {
350		if (!prepare_dma(device, qrequest)) {
351			// fall back to PIO on error
352
353			// if command queueing is used and there is another command
354			// already running, we cannot fallback to PIO immediately -> declare
355			// command as not queuable and resubmit it, so the scsi bus manager
356			// will block other requests on retry
357			// (XXX this is not fine if the caller wants to recycle the CCB)
358			if (device->num_running_reqs > 1) {
359				qrequest->request->flags &= ~SCSI_ORDERED_QTAG;
360				finish_retry(qrequest);
361				return;
362			}
363
364			qrequest->uses_dma = false;
365		}
366	}
367
368	if (!qrequest->uses_dma) {
369		prep_PIO_transfer(device, qrequest);
370		device->left_blocks = length;
371	}
372
373	// compose command
374	if (!create_rw_taskfile(device, qrequest, pos, length, write))
375		goto err_setup;
376
377	// if no timeout is specified, use standard
378	timeout = qrequest->request->timeout > 0 ?
379		qrequest->request->timeout : IDE_STD_TIMEOUT;
380
381	// in DMA mode, we continue with "accessing",
382	// on PIO read, we continue with "async waiting"
383	// on PIO write, we continue with "accessing"
384	if (!send_command(device, qrequest, !device->is_atapi, timeout,
385			(!qrequest->uses_dma && !qrequest->is_write) ?
386				ide_state_async_waiting : ide_state_accessing))
387		goto err_send;
388
389	if (qrequest->uses_dma) {
390		// if queuing used, we have to ask device first whether it wants
391		// to postpone the command
392		// XXX: using the bus release IRQ we don't have to busy wait for
393		// a response, but I heard that IBM drives have problems with
394		// that IRQ; to be evaluated
395		if (qrequest->queuable) {
396			if (!wait_for_drdy(device))
397				goto err_send;
398
399			if (check_rw_error(device, qrequest))
400				goto err_send;
401
402			if (device_released_bus(device)) {
403				// device enqueued command, so we have to wait;
404				// in access_finished, we'll ask device whether it wants to
405				// continue some other command
406				bus->active_qrequest = NULL;
407
408				access_finished(bus, device);
409				// we may have rejected commands meanwhile, so tell
410				// the SIM that it can resend them now
411				scsi->cont_send_bus(bus->scsi_cookie);
412				return;
413			}
414
415			//SHOW_ERROR0( 2, "device executes command instantly" );
416		}
417
418		start_dma_wait_no_lock(device, qrequest);
419	} else {
420		// on PIO read, we start with waiting, on PIO write we can
421		// transmit data immediately; we let the service thread do
422		// the writing, so the caller can issue the next command
423		// immediately (this optimisation really pays on SMP systems
424		// only)
425		SHOW_FLOW0(3, "Ready for PIO");
426		if (qrequest->is_write) {
427			SHOW_FLOW0(3, "Scheduling write DPC");
428			scsi->schedule_dpc(bus->scsi_cookie, bus->irq_dpc, ide_dpc, bus);
429		}
430	}
431
432	return;
433
434err_setup:
435	// error during setup
436	if (qrequest->uses_dma)
437		abort_dma(device, qrequest);
438
439	finish_checksense(qrequest);
440	return;
441
442err_send:
443	// error during/after send;
444	// in this case, the device discards queued request automatically
445	if (qrequest->uses_dma)
446		abort_dma(device, qrequest);
447
448	finish_reset_queue(qrequest);
449}
450
451
452/** check for errors reported by read/write command
453 *	return: true, if an error occured
454 */
455
456bool
457check_rw_error(ide_device_info *device, ide_qrequest *qrequest)
458{
459	ide_bus_info *bus = device->bus;
460	uint8 status;
461
462	status = bus->controller->get_altstatus(bus->channel_cookie);
463
464	if ((status & ide_status_err) != 0) {
465		uint8 error;
466
467		if (bus->controller->read_command_block_regs(bus->channel_cookie,
468				&device->tf, ide_mask_error) != B_OK) {
469			device->subsys_status = SCSI_HBA_ERR;
470			return true;
471		}
472
473		error = device->tf.read.error;
474
475		if ((error & ide_error_icrc) != 0) {
476			set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_COM_CRC);
477			return true;
478		}
479
480		if (qrequest->is_write) {
481			if ((error & ide_error_wp) != 0) {
482				set_sense(device, SCSIS_KEY_DATA_PROTECT, SCSIS_ASC_WRITE_PROTECTED);
483				return true;
484			}
485		} else {
486			if ((error & ide_error_unc) != 0) {
487				set_sense(device, SCSIS_KEY_MEDIUM_ERROR, SCSIS_ASC_UNREC_READ_ERR);
488				return true;
489			}
490		}
491
492		if ((error & ide_error_mc) != 0) {
493			set_sense(device, SCSIS_KEY_UNIT_ATTENTION, SCSIS_ASC_MEDIUM_CHANGED);
494			return true;
495		}
496
497		if ((error & ide_error_idnf) != 0) {
498			// ID not found - invalid CHS mapping (was: seek error?)
499			set_sense(device, SCSIS_KEY_MEDIUM_ERROR, SCSIS_ASC_RANDOM_POS_ERROR);
500			return true;
501		}
502
503		if ((error & ide_error_mcr) != 0) {
504			// XXX proper sense key?
505			// for TUR this case is not defined !?
506			set_sense(device, SCSIS_KEY_UNIT_ATTENTION, SCSIS_ASC_REMOVAL_REQUESTED);
507			return true;
508		}
509
510		if ((error & ide_error_nm) != 0) {
511			set_sense(device, SCSIS_KEY_NOT_READY, SCSIS_ASC_NO_MEDIUM);
512			return true;
513		}
514
515		if ((error & ide_error_abrt) != 0) {
516			set_sense(device, SCSIS_KEY_ABORTED_COMMAND, SCSIS_ASC_NO_SENSE);
517			return true;
518		}
519
520		set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE);
521		return true;
522	}
523
524	return false;
525}
526
527
528/** check result of ATA command
529 *	drdy_required - true if drdy must be set by device
530 *	error_mask - bits to be checked in error register
531 *	is_write - true, if command was a write command
532 */
533
534bool
535check_output(ide_device_info *device, bool drdy_required,
536	int error_mask, bool is_write)
537{
538	ide_bus_info *bus = device->bus;
539	uint8 status;
540
541	// check IRQ timeout
542	if (bus->sync_wait_timeout) {
543		bus->sync_wait_timeout = false;
544
545		device->subsys_status = SCSI_CMD_TIMEOUT;
546		return false;
547	}
548
549	status = bus->controller->get_altstatus(bus->channel_cookie);
550
551	// if device is busy, other flags are indeterminate
552	if ((status & ide_status_bsy) != 0) {
553		device->subsys_status = SCSI_SEQUENCE_FAIL;
554		return false;
555	}
556
557	if (drdy_required && ((status & ide_status_drdy) == 0)) {
558		device->subsys_status = SCSI_SEQUENCE_FAIL;
559		return false;
560	}
561
562	if ((status & ide_status_err) != 0) {
563		uint8 error;
564
565		if (bus->controller->read_command_block_regs(bus->channel_cookie,
566				&device->tf, ide_mask_error) != B_OK) {
567			device->subsys_status = SCSI_HBA_ERR;
568			return false;
569		}
570
571		error = device->tf.read.error & error_mask;
572
573		if ((error & ide_error_icrc) != 0) {
574			set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_COM_CRC);
575			return false;
576		}
577
578		if (is_write) {
579			if ((error & ide_error_wp) != 0) {
580				set_sense(device, SCSIS_KEY_DATA_PROTECT, SCSIS_ASC_WRITE_PROTECTED);
581				return false;
582			}
583		} else {
584			if ((error & ide_error_unc) != 0) {
585				set_sense(device, SCSIS_KEY_MEDIUM_ERROR, SCSIS_ASC_UNREC_READ_ERR);
586				return false;
587			}
588		}
589
590		if ((error & ide_error_mc) != 0) {
591			// XXX proper sense key?
592			set_sense(device, SCSIS_KEY_UNIT_ATTENTION, SCSIS_ASC_MEDIUM_CHANGED);
593			return false;
594		}
595
596		if ((error & ide_error_idnf) != 0) {
597			// XXX strange error code, don't really know what it means
598			set_sense(device, SCSIS_KEY_MEDIUM_ERROR, SCSIS_ASC_RANDOM_POS_ERROR);
599			return false;
600		}
601
602		if ((error & ide_error_mcr) != 0) {
603			// XXX proper sense key?
604			set_sense(device, SCSIS_KEY_UNIT_ATTENTION, SCSIS_ASC_REMOVAL_REQUESTED);
605			return false;
606		}
607
608		if ((error & ide_error_nm) != 0) {
609			set_sense(device, SCSIS_KEY_MEDIUM_ERROR, SCSIS_ASC_NO_MEDIUM);
610			return false;
611		}
612
613		if ((error & ide_error_abrt) != 0) {
614			set_sense(device, SCSIS_KEY_ABORTED_COMMAND, SCSIS_ASC_NO_SENSE);
615			return false;
616		}
617
618		// either there was no error bit set or it was masked out
619		set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE);
620		return false;
621	}
622
623	return true;
624}
625
626
627/** execute SET FEATURE command
628 *	set subcommand in task file before calling this
629 */
630
631static bool
632device_set_feature(ide_device_info *device, int feature)
633{
634	device->tf_param_mask = ide_mask_features;
635
636	device->tf.write.features = feature;
637	device->tf.write.command = IDE_CMD_SET_FEATURES;
638
639	if (!send_command(device, NULL, true, 1, ide_state_sync_waiting))
640		return false;
641
642	wait_for_sync(device->bus);
643
644	return check_output(device, true, ide_error_abrt, false);
645}
646
647
648static bool
649configure_rmsn(ide_device_info *device)
650{
651	ide_bus_info *bus = device->bus;
652	int i;
653
654	if (!device->infoblock.RMSN_supported
655		|| device->infoblock._127_RMSN_support != 1)
656		return true;
657
658	if (!device_set_feature(device, IDE_CMD_SET_FEATURES_ENABLE_MSN))
659		return false;
660
661	bus->controller->read_command_block_regs(bus->channel_cookie, &device->tf,
662		ide_mask_LBA_mid | ide_mask_LBA_high);
663
664	for (i = 0; i < 5; ++i) {
665		// don't use TUR as it checks not ide_error_mcr | ide_error_mc | ide_error_wp
666		// but: we don't check wp as well
667		device->combined_sense = 0;
668
669		device->tf_param_mask = 0;
670		device->tf.write.command = IDE_CMD_GET_MEDIA_STATUS;
671
672		if (!send_command(device, NULL, true, 15, ide_state_sync_waiting))
673			continue;
674
675		if (check_output(device, true,
676				ide_error_nm | ide_error_abrt | ide_error_mcr | ide_error_mc,
677				true)
678			|| decode_sense_asc_ascq(device->combined_sense) == SCSIS_ASC_NO_MEDIUM)
679			return true;
680	}
681
682	return false;
683}
684
685
686static bool
687configure_command_queueing(ide_device_info *device)
688{
689	device->CQ_enabled = device->CQ_supported = false;
690
691	if (!device->bus->can_CQ
692		|| !device->infoblock.DMA_QUEUED_supported)
693		return initialize_qreq_array(device, 1);
694
695	if (device->infoblock.RELEASE_irq_supported
696		&& !device_set_feature( device, IDE_CMD_SET_FEATURES_DISABLE_REL_INT))
697		dprintf("Cannot disable release irq\n");
698
699	if (device->infoblock.SERVICE_irq_supported
700		&& !device_set_feature(device, IDE_CMD_SET_FEATURES_DISABLE_SERV_INT))
701		dprintf("Cannot disable service irq\n");
702
703	device->CQ_enabled = device->CQ_supported = true;
704
705	SHOW_INFO0(2, "Enabled command queueing");
706
707	// official IBM docs talk about 31 queue entries, though
708	// their disks report 32; let's hope their docs are wrong
709	return initialize_qreq_array(device, device->infoblock.queue_depth + 1);
710}
711
712
713bool
714prep_ata(ide_device_info *device)
715{
716	ide_device_infoblock *infoblock = &device->infoblock;
717	uint32 chs_capacity;
718
719	SHOW_FLOW0(3, "");
720
721	device->is_atapi = false;
722	device->exec_io = ata_exec_io;
723	device->last_lun = 0;
724
725	// warning: ata == 0 means "this is ata"...
726	if (infoblock->_0.ata.ATA != 0) {
727		// CF has either magic header or CFA bit set
728		// we merge it to "CFA bit set" for easier (later) testing
729		if (*(uint16 *)infoblock == 0x848a)
730			infoblock->CFA_supported = true;
731		else
732			return false;
733	}
734
735	SHOW_FLOW0(3, "1");
736
737	if (!infoblock->_54_58_valid) {
738		// normally, current_xxx contains active CHS mapping,
739		// but if BIOS didn't call INITIALIZE DEVICE PARAMETERS
740		// the default mapping is used
741		infoblock->current_sectors = infoblock->sectors;
742		infoblock->current_cylinders = infoblock->cylinders;
743		infoblock->current_heads = infoblock->heads;
744	}
745
746	// just in case capacity_xxx isn't initialized - calculate it manually
747	// (seems that this information is really redundant; hopefully)
748	chs_capacity = infoblock->current_sectors * infoblock->current_cylinders *
749		infoblock->current_heads;
750
751	infoblock->capacity_low = chs_capacity & 0xff;
752	infoblock->capacity_high = chs_capacity >> 8;
753
754	// checking LBA_supported flag should be sufficient, but it seems
755	// that checking LBA_total_sectors is a good idea
756	device->use_LBA = infoblock->LBA_supported && infoblock->LBA_total_sectors != 0;
757
758	if (device->use_LBA) {
759		device->total_sectors = infoblock->LBA_total_sectors;
760		device->tf.lba.mode = ide_mode_lba;
761	} else {
762		device->total_sectors = chs_capacity;
763		device->tf.chs.mode = ide_mode_chs;
764	}
765
766	device->use_48bits = infoblock->_48_bit_addresses_supported;
767
768	if (device->use_48bits)
769		device->total_sectors = infoblock->LBA48_total_sectors;
770
771	SHOW_FLOW0(3, "2");
772
773	if (!configure_dma(device)
774		|| !configure_command_queueing(device)
775		|| !configure_rmsn(device))
776		return false;
777
778	SHOW_FLOW0(3, "3");
779
780	return true;
781}
782
783
784void
785enable_CQ(ide_device_info *device, bool enable)
786{
787}
788