Deleted Added
full compact
ata-all.c (214016) ata-all.c (214988)
1/*-
2 * Copyright (c) 1998 - 2008 S�ren Schmidt <sos@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 1998 - 2008 S�ren Schmidt <sos@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/dev/ata/ata-all.c 214016 2010-10-18 11:30:13Z mav $");
28__FBSDID("$FreeBSD: head/sys/dev/ata/ata-all.c 214988 2010-11-08 15:36:15Z mav $");
29
30#include "opt_ata.h"
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/ata.h>
34#include <sys/kernel.h>
35#include <sys/module.h>
36#include <sys/endian.h>
37#include <sys/ctype.h>
38#include <sys/conf.h>
39#include <sys/bus.h>
40#include <sys/bio.h>
41#include <sys/malloc.h>
42#include <sys/sysctl.h>
43#include <sys/sema.h>
44#include <sys/taskqueue.h>
45#include <vm/uma.h>
46#include <machine/stdarg.h>
47#include <machine/resource.h>
48#include <machine/bus.h>
49#include <sys/rman.h>
50#include <dev/ata/ata-all.h>
51#include <dev/pci/pcivar.h>
52#include <ata_if.h>
53
54#ifdef ATA_CAM
55#include <cam/cam.h>
56#include <cam/cam_ccb.h>
57#include <cam/cam_sim.h>
58#include <cam/cam_xpt_sim.h>
59#include <cam/cam_debug.h>
60#endif
61
62#ifndef ATA_CAM
63/* device structure */
64static d_ioctl_t ata_ioctl;
65static struct cdevsw ata_cdevsw = {
66 .d_version = D_VERSION,
67 .d_flags = D_NEEDGIANT, /* we need this as newbus isn't mpsafe */
68 .d_ioctl = ata_ioctl,
69 .d_name = "ata",
70};
71#endif
72
73/* prototypes */
74#ifndef ATA_CAM
75static void ata_boot_attach(void);
76static device_t ata_add_child(device_t, struct ata_device *, int);
77#else
78static void ataaction(struct cam_sim *sim, union ccb *ccb);
79static void atapoll(struct cam_sim *sim);
80#endif
81static void ata_conn_event(void *, int);
82static void bswap(int8_t *, int);
83static void btrim(int8_t *, int);
84static void bpack(int8_t *, int8_t *, int);
85static void ata_interrupt_locked(void *data);
86static void ata_periodic_poll(void *data);
87
88/* global vars */
89MALLOC_DEFINE(M_ATA, "ata_generic", "ATA driver generic layer");
90int (*ata_raid_ioctl_func)(u_long cmd, caddr_t data) = NULL;
91struct intr_config_hook *ata_delayed_attach = NULL;
92devclass_t ata_devclass;
93uma_zone_t ata_request_zone;
94uma_zone_t ata_composite_zone;
95int ata_wc = 1;
96int ata_setmax = 0;
97int ata_dma_check_80pin = 1;
98
99/* local vars */
100static int ata_dma = 1;
101static int atapi_dma = 1;
102
103/* sysctl vars */
104SYSCTL_NODE(_hw, OID_AUTO, ata, CTLFLAG_RD, 0, "ATA driver parameters");
105TUNABLE_INT("hw.ata.ata_dma", &ata_dma);
106SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma, CTLFLAG_RDTUN, &ata_dma, 0,
107 "ATA disk DMA mode control");
108TUNABLE_INT("hw.ata.ata_dma_check_80pin", &ata_dma_check_80pin);
109SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma_check_80pin,
110 CTLFLAG_RW, &ata_dma_check_80pin, 1,
111 "Check for 80pin cable before setting ATA DMA mode");
112TUNABLE_INT("hw.ata.atapi_dma", &atapi_dma);
113SYSCTL_INT(_hw_ata, OID_AUTO, atapi_dma, CTLFLAG_RDTUN, &atapi_dma, 0,
114 "ATAPI device DMA mode control");
115TUNABLE_INT("hw.ata.wc", &ata_wc);
116SYSCTL_INT(_hw_ata, OID_AUTO, wc, CTLFLAG_RDTUN, &ata_wc, 0,
117 "ATA disk write caching");
118TUNABLE_INT("hw.ata.setmax", &ata_setmax);
119SYSCTL_INT(_hw_ata, OID_AUTO, setmax, CTLFLAG_RDTUN, &ata_setmax, 0,
120 "ATA disk set max native address");
121
122/*
123 * newbus device interface related functions
124 */
125int
126ata_probe(device_t dev)
127{
128 return 0;
129}
130
131int
132ata_attach(device_t dev)
133{
134 struct ata_channel *ch = device_get_softc(dev);
135 int error, rid;
136#ifdef ATA_CAM
137 struct cam_devq *devq;
138 const char *res;
139 char buf[64];
140 int i, mode;
141#endif
142
143 /* check that we have a virgin channel to attach */
144 if (ch->r_irq)
145 return EEXIST;
146
147 /* initialize the softc basics */
148 ch->dev = dev;
149 ch->state = ATA_IDLE;
150 bzero(&ch->state_mtx, sizeof(struct mtx));
151 mtx_init(&ch->state_mtx, "ATA state lock", NULL, MTX_DEF);
152 bzero(&ch->queue_mtx, sizeof(struct mtx));
153 mtx_init(&ch->queue_mtx, "ATA queue lock", NULL, MTX_DEF);
154 TAILQ_INIT(&ch->ata_queue);
155 TASK_INIT(&ch->conntask, 0, ata_conn_event, dev);
156#ifdef ATA_CAM
157 for (i = 0; i < 16; i++) {
158 ch->user[i].mode = 0;
159 snprintf(buf, sizeof(buf), "dev%d.mode", i);
160 if (resource_string_value(device_get_name(dev),
161 device_get_unit(dev), buf, &res) == 0)
162 mode = ata_str2mode(res);
163 else if (resource_string_value(device_get_name(dev),
164 device_get_unit(dev), "mode", &res) == 0)
165 mode = ata_str2mode(res);
166 else
167 mode = -1;
168 if (mode >= 0)
169 ch->user[i].mode = mode;
170 if (ch->flags & ATA_SATA)
171 ch->user[i].bytecount = 8192;
172 else
173 ch->user[i].bytecount = MAXPHYS;
174 ch->curr[i] = ch->user[i];
175 }
176#endif
177 callout_init(&ch->poll_callout, 1);
178
179 /* reset the controller HW, the channel and device(s) */
180 while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit)
181 pause("ataatch", 1);
182#ifndef ATA_CAM
183 ATA_RESET(dev);
184#endif
185 ATA_LOCKING(dev, ATA_LF_UNLOCK);
186
187 /* allocate DMA resources if DMA HW present*/
188 if (ch->dma.alloc)
189 ch->dma.alloc(dev);
190
191 /* setup interrupt delivery */
192 rid = ATA_IRQ_RID;
193 ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
194 RF_SHAREABLE | RF_ACTIVE);
195 if (!ch->r_irq) {
196 device_printf(dev, "unable to allocate interrupt\n");
197 return ENXIO;
198 }
199 if ((error = bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL,
200 ata_interrupt, ch, &ch->ih))) {
201 bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq);
202 device_printf(dev, "unable to setup interrupt\n");
203 return error;
204 }
205 if (ch->flags & ATA_PERIODIC_POLL)
206 callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch);
207
208#ifndef ATA_CAM
209 /* probe and attach devices on this channel unless we are in early boot */
210 if (!ata_delayed_attach)
211 ata_identify(dev);
212 return (0);
213#else
214 mtx_lock(&ch->state_mtx);
215 /* Create the device queue for our SIM. */
216 devq = cam_simq_alloc(1);
217 if (devq == NULL) {
218 device_printf(dev, "Unable to allocate simq\n");
219 error = ENOMEM;
220 goto err1;
221 }
222 /* Construct SIM entry */
223 ch->sim = cam_sim_alloc(ataaction, atapoll, "ata", ch,
224 device_get_unit(dev), &ch->state_mtx, 1, 0, devq);
225 if (ch->sim == NULL) {
226 device_printf(dev, "unable to allocate sim\n");
227 cam_simq_free(devq);
228 error = ENOMEM;
229 goto err1;
230 }
231 if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) {
232 device_printf(dev, "unable to register xpt bus\n");
233 error = ENXIO;
234 goto err2;
235 }
236 if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim),
237 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
238 device_printf(dev, "unable to create path\n");
239 error = ENXIO;
240 goto err3;
241 }
242 mtx_unlock(&ch->state_mtx);
243 return (0);
244
245err3:
246 xpt_bus_deregister(cam_sim_path(ch->sim));
247err2:
248 cam_sim_free(ch->sim, /*free_devq*/TRUE);
249 ch->sim = NULL;
250err1:
251 bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq);
252 mtx_unlock(&ch->state_mtx);
253 if (ch->flags & ATA_PERIODIC_POLL)
254 callout_drain(&ch->poll_callout);
255 return (error);
256#endif
257}
258
259int
260ata_detach(device_t dev)
261{
262 struct ata_channel *ch = device_get_softc(dev);
263#ifndef ATA_CAM
264 device_t *children;
265 int nchildren, i;
266#endif
267
268 /* check that we have a valid channel to detach */
269 if (!ch->r_irq)
270 return ENXIO;
271
272 /* grap the channel lock so no new requests gets launched */
273 mtx_lock(&ch->state_mtx);
274 ch->state |= ATA_STALL_QUEUE;
275 mtx_unlock(&ch->state_mtx);
276 if (ch->flags & ATA_PERIODIC_POLL)
277 callout_drain(&ch->poll_callout);
278
279#ifndef ATA_CAM
280 /* detach & delete all children */
281 if (!device_get_children(dev, &children, &nchildren)) {
282 for (i = 0; i < nchildren; i++)
283 if (children[i])
284 device_delete_child(dev, children[i]);
285 free(children, M_TEMP);
286 }
287#endif
288 taskqueue_drain(taskqueue_thread, &ch->conntask);
289
290#ifdef ATA_CAM
291 mtx_lock(&ch->state_mtx);
292 xpt_async(AC_LOST_DEVICE, ch->path, NULL);
293 xpt_free_path(ch->path);
294 xpt_bus_deregister(cam_sim_path(ch->sim));
295 cam_sim_free(ch->sim, /*free_devq*/TRUE);
296 ch->sim = NULL;
297 mtx_unlock(&ch->state_mtx);
298#endif
299
300 /* release resources */
301 bus_teardown_intr(dev, ch->r_irq, ch->ih);
302 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq);
303 ch->r_irq = NULL;
304
305 /* free DMA resources if DMA HW present*/
306 if (ch->dma.free)
307 ch->dma.free(dev);
308
309 mtx_destroy(&ch->state_mtx);
310 mtx_destroy(&ch->queue_mtx);
311 return 0;
312}
313
314static void
315ata_conn_event(void *context, int dummy)
316{
317 device_t dev = (device_t)context;
318#ifdef ATA_CAM
319 struct ata_channel *ch = device_get_softc(dev);
320 union ccb *ccb;
321
322 mtx_lock(&ch->state_mtx);
323 if (ch->sim == NULL) {
324 mtx_unlock(&ch->state_mtx);
325 return;
326 }
327 ata_reinit(dev);
328 if ((ccb = xpt_alloc_ccb_nowait()) == NULL)
329 return;
330 if (xpt_create_path(&ccb->ccb_h.path, NULL,
331 cam_sim_path(ch->sim),
332 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
333 xpt_free_ccb(ccb);
334 return;
335 }
336 xpt_rescan(ccb);
337 mtx_unlock(&ch->state_mtx);
338#else
339 ata_reinit(dev);
340#endif
341}
342
343int
344ata_reinit(device_t dev)
345{
346 struct ata_channel *ch = device_get_softc(dev);
347 struct ata_request *request;
348#ifndef ATA_CAM
349 device_t *children;
350 int nchildren, i;
351
352 /* check that we have a valid channel to reinit */
353 if (!ch || !ch->r_irq)
354 return ENXIO;
355
356 if (bootverbose)
357 device_printf(dev, "reiniting channel ..\n");
358
359 /* poll for locking the channel */
360 while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit)
361 pause("atarini", 1);
362
363 /* catch eventual request in ch->running */
364 mtx_lock(&ch->state_mtx);
365 if (ch->state & ATA_STALL_QUEUE) {
366 /* Recursive reinits and reinits during detach prohobited. */
367 mtx_unlock(&ch->state_mtx);
368 return (ENXIO);
369 }
370 if ((request = ch->running))
371 callout_stop(&request->callout);
372 ch->running = NULL;
373
374 /* unconditionally grap the channel lock */
375 ch->state |= ATA_STALL_QUEUE;
376 mtx_unlock(&ch->state_mtx);
377
378 /* reset the controller HW, the channel and device(s) */
379 ATA_RESET(dev);
380
381 /* reinit the children and delete any that fails */
382 if (!device_get_children(dev, &children, &nchildren)) {
383 mtx_lock(&Giant); /* newbus suckage it needs Giant */
384 for (i = 0; i < nchildren; i++) {
385 /* did any children go missing ? */
386 if (children[i] && device_is_attached(children[i]) &&
387 ATA_REINIT(children[i])) {
388 /*
389 * if we had a running request and its device matches
390 * this child we need to inform the request that the
391 * device is gone.
392 */
393 if (request && request->dev == children[i]) {
394 request->result = ENXIO;
395 device_printf(request->dev, "FAILURE - device detached\n");
396
397 /* if not timeout finish request here */
398 if (!(request->flags & ATA_R_TIMEOUT))
399 ata_finish(request);
400 request = NULL;
401 }
402 device_delete_child(dev, children[i]);
403 }
404 }
405 free(children, M_TEMP);
406 mtx_unlock(&Giant); /* newbus suckage dealt with, release Giant */
407 }
408
409 /* if we still have a good request put it on the queue again */
410 if (request && !(request->flags & ATA_R_TIMEOUT)) {
411 device_printf(request->dev,
412 "WARNING - %s requeued due to channel reset",
413 ata_cmd2str(request));
414 if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
415 printf(" LBA=%ju", request->u.ata.lba);
416 printf("\n");
417 request->flags |= ATA_R_REQUEUE;
418 ata_queue_request(request);
419 }
420
421 /* we're done release the channel for new work */
422 mtx_lock(&ch->state_mtx);
423 ch->state = ATA_IDLE;
424 mtx_unlock(&ch->state_mtx);
425 ATA_LOCKING(dev, ATA_LF_UNLOCK);
426
427 /* Add new children. */
428/* ata_identify(dev); */
429
430 if (bootverbose)
431 device_printf(dev, "reinit done ..\n");
432
433 /* kick off requests on the queue */
434 ata_start(dev);
435#else
436 xpt_freeze_simq(ch->sim, 1);
437 if ((request = ch->running)) {
438 ch->running = NULL;
439 if (ch->state == ATA_ACTIVE)
440 ch->state = ATA_IDLE;
441 callout_stop(&request->callout);
442 if (ch->dma.unload)
443 ch->dma.unload(request);
444 request->result = ERESTART;
445 ata_cam_end_transaction(dev, request);
446 }
447 /* reset the controller HW, the channel and device(s) */
448 ATA_RESET(dev);
449 /* Tell the XPT about the event */
450 xpt_async(AC_BUS_RESET, ch->path, NULL);
451 xpt_release_simq(ch->sim, TRUE);
452#endif
453 return(0);
454}
455
456int
457ata_suspend(device_t dev)
458{
459 struct ata_channel *ch;
460
461 /* check for valid device */
462 if (!dev || !(ch = device_get_softc(dev)))
463 return ENXIO;
464
465 if (ch->flags & ATA_PERIODIC_POLL)
466 callout_drain(&ch->poll_callout);
467#ifdef ATA_CAM
468 mtx_lock(&ch->state_mtx);
469 xpt_freeze_simq(ch->sim, 1);
470 while (ch->state != ATA_IDLE)
471 msleep(ch, &ch->state_mtx, PRIBIO, "atasusp", hz/100);
472 mtx_unlock(&ch->state_mtx);
473#else
474 /* wait for the channel to be IDLE or detached before suspending */
475 while (ch->r_irq) {
476 mtx_lock(&ch->state_mtx);
477 if (ch->state == ATA_IDLE) {
478 ch->state = ATA_ACTIVE;
479 mtx_unlock(&ch->state_mtx);
480 break;
481 }
482 mtx_unlock(&ch->state_mtx);
483 tsleep(ch, PRIBIO, "atasusp", hz/10);
484 }
485 ATA_LOCKING(dev, ATA_LF_UNLOCK);
486#endif
487 return(0);
488}
489
490int
491ata_resume(device_t dev)
492{
493 struct ata_channel *ch;
494 int error;
495
496 /* check for valid device */
497 if (!dev || !(ch = device_get_softc(dev)))
498 return ENXIO;
499
500#ifdef ATA_CAM
501 mtx_lock(&ch->state_mtx);
502 error = ata_reinit(dev);
503 xpt_release_simq(ch->sim, TRUE);
504 mtx_unlock(&ch->state_mtx);
505#else
506 /* reinit the devices, we dont know what mode/state they are in */
507 error = ata_reinit(dev);
508 /* kick off requests on the queue */
509 ata_start(dev);
510#endif
511 if (ch->flags & ATA_PERIODIC_POLL)
512 callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch);
513 return error;
514}
515
516void
517ata_interrupt(void *data)
518{
519#ifdef ATA_CAM
520 struct ata_channel *ch = (struct ata_channel *)data;
521
522 mtx_lock(&ch->state_mtx);
523#endif
524 ata_interrupt_locked(data);
525#ifdef ATA_CAM
526 mtx_unlock(&ch->state_mtx);
527#endif
528}
529
530static void
531ata_interrupt_locked(void *data)
532{
533 struct ata_channel *ch = (struct ata_channel *)data;
534 struct ata_request *request;
535
536#ifndef ATA_CAM
537 mtx_lock(&ch->state_mtx);
538#endif
539 do {
540 /* ignore interrupt if its not for us */
541 if (ch->hw.status && !ch->hw.status(ch->dev))
542 break;
543
544 /* do we have a running request */
545 if (!(request = ch->running))
546 break;
547
548 ATA_DEBUG_RQ(request, "interrupt");
549
550 /* safetycheck for the right state */
551 if (ch->state == ATA_IDLE) {
552 device_printf(request->dev, "interrupt on idle channel ignored\n");
553 break;
554 }
555
556 /*
557 * we have the HW locks, so end the transaction for this request
558 * if it finishes immediately otherwise wait for next interrupt
559 */
560 if (ch->hw.end_transaction(request) == ATA_OP_FINISHED) {
561 ch->running = NULL;
562 if (ch->state == ATA_ACTIVE)
563 ch->state = ATA_IDLE;
564#ifdef ATA_CAM
565 ata_cam_end_transaction(ch->dev, request);
566#else
567 mtx_unlock(&ch->state_mtx);
568 ATA_LOCKING(ch->dev, ATA_LF_UNLOCK);
569 ata_finish(request);
570#endif
571 return;
572 }
573 } while (0);
574#ifndef ATA_CAM
575 mtx_unlock(&ch->state_mtx);
576#endif
577}
578
579static void
580ata_periodic_poll(void *data)
581{
582 struct ata_channel *ch = (struct ata_channel *)data;
583
584 callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch);
585 ata_interrupt(ch);
586}
587
588void
589ata_print_cable(device_t dev, u_int8_t *who)
590{
591 device_printf(dev,
592 "DMA limited to UDMA33, %s found non-ATA66 cable\n", who);
593}
594
595int
596ata_check_80pin(device_t dev, int mode)
597{
598 struct ata_device *atadev = device_get_softc(dev);
599
600 if (!ata_dma_check_80pin) {
601 if (bootverbose)
602 device_printf(dev, "Skipping 80pin cable check\n");
603 return mode;
604 }
605
606 if (mode > ATA_UDMA2 && !(atadev->param.hwres & ATA_CABLE_ID)) {
607 ata_print_cable(dev, "device");
608 mode = ATA_UDMA2;
609 }
610 return mode;
611}
612
613void
614ata_setmode(device_t dev)
615{
616 struct ata_channel *ch = device_get_softc(device_get_parent(dev));
617 struct ata_device *atadev = device_get_softc(dev);
618 int error, mode, pmode;
619
620 mode = atadev->mode;
621 do {
622 pmode = mode = ata_limit_mode(dev, mode, ATA_DMA_MAX);
623 mode = ATA_SETMODE(device_get_parent(dev), atadev->unit, mode);
624 if ((ch->flags & (ATA_CHECKS_CABLE | ATA_SATA)) == 0)
625 mode = ata_check_80pin(dev, mode);
626 } while (pmode != mode); /* Interate till successfull negotiation. */
627 error = ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode);
628 if (bootverbose)
629 device_printf(dev, "%ssetting %s\n",
630 (error) ? "FAILURE " : "", ata_mode2str(mode));
631 atadev->mode = mode;
632}
633
634/*
635 * device related interfaces
636 */
637#ifndef ATA_CAM
638static int
639ata_ioctl(struct cdev *dev, u_long cmd, caddr_t data,
640 int32_t flag, struct thread *td)
641{
642 device_t device, *children;
643 struct ata_ioc_devices *devices = (struct ata_ioc_devices *)data;
644 int *value = (int *)data;
645 int i, nchildren, error = ENOTTY;
646
647 switch (cmd) {
648 case IOCATAGMAXCHANNEL:
649 /* In case we have channel 0..n this will return n+1. */
650 *value = devclass_get_maxunit(ata_devclass);
651 error = 0;
652 break;
653
654 case IOCATAREINIT:
655 if (*value >= devclass_get_maxunit(ata_devclass) ||
656 !(device = devclass_get_device(ata_devclass, *value)) ||
657 !device_is_attached(device))
658 return ENXIO;
659 error = ata_reinit(device);
660 break;
661
662 case IOCATAATTACH:
663 if (*value >= devclass_get_maxunit(ata_devclass) ||
664 !(device = devclass_get_device(ata_devclass, *value)) ||
665 !device_is_attached(device))
666 return ENXIO;
667 error = DEVICE_ATTACH(device);
668 break;
669
670 case IOCATADETACH:
671 if (*value >= devclass_get_maxunit(ata_devclass) ||
672 !(device = devclass_get_device(ata_devclass, *value)) ||
673 !device_is_attached(device))
674 return ENXIO;
675 error = DEVICE_DETACH(device);
676 break;
677
678 case IOCATADEVICES:
679 if (devices->channel >= devclass_get_maxunit(ata_devclass) ||
680 !(device = devclass_get_device(ata_devclass, devices->channel)) ||
681 !device_is_attached(device))
682 return ENXIO;
683 bzero(devices->name[0], 32);
684 bzero(&devices->params[0], sizeof(struct ata_params));
685 bzero(devices->name[1], 32);
686 bzero(&devices->params[1], sizeof(struct ata_params));
687 if (!device_get_children(device, &children, &nchildren)) {
688 for (i = 0; i < nchildren; i++) {
689 if (children[i] && device_is_attached(children[i])) {
690 struct ata_device *atadev = device_get_softc(children[i]);
691
692 if (atadev->unit == ATA_MASTER) { /* XXX SOS PM */
693 strncpy(devices->name[0],
694 device_get_nameunit(children[i]), 32);
695 bcopy(&atadev->param, &devices->params[0],
696 sizeof(struct ata_params));
697 }
698 if (atadev->unit == ATA_SLAVE) { /* XXX SOS PM */
699 strncpy(devices->name[1],
700 device_get_nameunit(children[i]), 32);
701 bcopy(&atadev->param, &devices->params[1],
702 sizeof(struct ata_params));
703 }
704 }
705 }
706 free(children, M_TEMP);
707 error = 0;
708 }
709 else
710 error = ENODEV;
711 break;
712
713 default:
714 if (ata_raid_ioctl_func)
715 error = ata_raid_ioctl_func(cmd, data);
716 }
717 return error;
718}
719#endif
720
721int
722ata_device_ioctl(device_t dev, u_long cmd, caddr_t data)
723{
724 struct ata_device *atadev = device_get_softc(dev);
725 struct ata_channel *ch = device_get_softc(device_get_parent(dev));
726 struct ata_ioc_request *ioc_request = (struct ata_ioc_request *)data;
727 struct ata_params *params = (struct ata_params *)data;
728 int *mode = (int *)data;
729 struct ata_request *request;
730 caddr_t buf;
731 int error;
732
733 switch (cmd) {
734 case IOCATAREQUEST:
735 if (ioc_request->count >
736 (ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS)) {
737 return (EFBIG);
738 }
739 if (!(buf = malloc(ioc_request->count, M_ATA, M_NOWAIT))) {
740 return ENOMEM;
741 }
742 if (!(request = ata_alloc_request())) {
743 free(buf, M_ATA);
744 return ENOMEM;
745 }
746 request->dev = atadev->dev;
747 if (ioc_request->flags & ATA_CMD_WRITE) {
748 error = copyin(ioc_request->data, buf, ioc_request->count);
749 if (error) {
750 free(buf, M_ATA);
751 ata_free_request(request);
752 return error;
753 }
754 }
755 if (ioc_request->flags & ATA_CMD_ATAPI) {
756 request->flags = ATA_R_ATAPI;
757 bcopy(ioc_request->u.atapi.ccb, request->u.atapi.ccb, 16);
758 }
759 else {
760 request->u.ata.command = ioc_request->u.ata.command;
761 request->u.ata.feature = ioc_request->u.ata.feature;
762 request->u.ata.lba = ioc_request->u.ata.lba;
763 request->u.ata.count = ioc_request->u.ata.count;
764 }
765 request->timeout = ioc_request->timeout;
766 request->data = buf;
767 request->bytecount = ioc_request->count;
768 request->transfersize = request->bytecount;
769 if (ioc_request->flags & ATA_CMD_CONTROL)
770 request->flags |= ATA_R_CONTROL;
771 if (ioc_request->flags & ATA_CMD_READ)
772 request->flags |= ATA_R_READ;
773 if (ioc_request->flags & ATA_CMD_WRITE)
774 request->flags |= ATA_R_WRITE;
775 ata_queue_request(request);
776 if (request->flags & ATA_R_ATAPI) {
777 bcopy(&request->u.atapi.sense, &ioc_request->u.atapi.sense,
778 sizeof(struct atapi_sense));
779 }
780 else {
781 ioc_request->u.ata.command = request->u.ata.command;
782 ioc_request->u.ata.feature = request->u.ata.feature;
783 ioc_request->u.ata.lba = request->u.ata.lba;
784 ioc_request->u.ata.count = request->u.ata.count;
785 }
786 ioc_request->error = request->result;
787 if (ioc_request->flags & ATA_CMD_READ)
788 error = copyout(buf, ioc_request->data, ioc_request->count);
789 else
790 error = 0;
791 free(buf, M_ATA);
792 ata_free_request(request);
793 return error;
794
795 case IOCATAGPARM:
796 ata_getparam(atadev, 0);
797 bcopy(&atadev->param, params, sizeof(struct ata_params));
798 return 0;
799
800 case IOCATASMODE:
801 atadev->mode = *mode;
802 ata_setmode(dev);
803 return 0;
804
805 case IOCATAGMODE:
806 *mode = atadev->mode |
807 (ATA_GETREV(device_get_parent(dev), atadev->unit) << 8);
808 return 0;
809 case IOCATASSPINDOWN:
810 atadev->spindown = *mode;
811 return 0;
812 case IOCATAGSPINDOWN:
813 *mode = atadev->spindown;
814 return 0;
815 default:
816 return ENOTTY;
817 }
818}
819
820#ifndef ATA_CAM
821static void
822ata_boot_attach(void)
823{
824 struct ata_channel *ch;
825 int ctlr;
826
827 mtx_lock(&Giant); /* newbus suckage it needs Giant */
828
829 /* kick of probe and attach on all channels */
830 for (ctlr = 0; ctlr < devclass_get_maxunit(ata_devclass); ctlr++) {
831 if ((ch = devclass_get_softc(ata_devclass, ctlr))) {
832 ata_identify(ch->dev);
833 }
834 }
835
836 /* release the hook that got us here, we are only needed once during boot */
837 if (ata_delayed_attach) {
838 config_intrhook_disestablish(ata_delayed_attach);
839 free(ata_delayed_attach, M_TEMP);
840 ata_delayed_attach = NULL;
841 }
842
843 mtx_unlock(&Giant); /* newbus suckage dealt with, release Giant */
844}
845#endif
846
847/*
848 * misc support functions
849 */
850#ifndef ATA_CAM
851static device_t
852ata_add_child(device_t parent, struct ata_device *atadev, int unit)
853{
854 device_t child;
855
856 if ((child = device_add_child(parent, NULL, unit))) {
857 device_set_softc(child, atadev);
858 device_quiet(child);
859 atadev->dev = child;
860 atadev->max_iosize = DEV_BSIZE;
861 atadev->mode = ATA_PIO_MAX;
862 }
863 return child;
864}
865#endif
866
867int
868ata_getparam(struct ata_device *atadev, int init)
869{
870 struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev));
871 struct ata_request *request;
872 const char *res;
873 char buf[64];
874 u_int8_t command = 0;
875 int error = ENOMEM, retries = 2, mode = -1;
876
877 if (ch->devices & (ATA_ATA_MASTER << atadev->unit))
878 command = ATA_ATA_IDENTIFY;
879 if (ch->devices & (ATA_ATAPI_MASTER << atadev->unit))
880 command = ATA_ATAPI_IDENTIFY;
881 if (!command)
882 return ENXIO;
883
884 while (retries-- > 0 && error) {
885 if (!(request = ata_alloc_request()))
886 break;
887 request->dev = atadev->dev;
888 request->timeout = 1;
889 request->retries = 0;
890 request->u.ata.command = command;
891 request->flags = (ATA_R_READ|ATA_R_AT_HEAD|ATA_R_DIRECT);
892 if (!bootverbose)
893 request->flags |= ATA_R_QUIET;
894 request->data = (void *)&atadev->param;
895 request->bytecount = sizeof(struct ata_params);
896 request->donecount = 0;
897 request->transfersize = DEV_BSIZE;
898 ata_queue_request(request);
899 error = request->result;
900 ata_free_request(request);
901 }
902
903 if (!error && (isprint(atadev->param.model[0]) ||
904 isprint(atadev->param.model[1]))) {
905 struct ata_params *atacap = &atadev->param;
906 int16_t *ptr;
907
908 for (ptr = (int16_t *)atacap;
909 ptr < (int16_t *)atacap + sizeof(struct ata_params)/2; ptr++) {
910 *ptr = le16toh(*ptr);
911 }
912 if (!(!strncmp(atacap->model, "FX", 2) ||
913 !strncmp(atacap->model, "NEC", 3) ||
914 !strncmp(atacap->model, "Pioneer", 7) ||
915 !strncmp(atacap->model, "SHARP", 5))) {
916 bswap(atacap->model, sizeof(atacap->model));
917 bswap(atacap->revision, sizeof(atacap->revision));
918 bswap(atacap->serial, sizeof(atacap->serial));
919 }
920 btrim(atacap->model, sizeof(atacap->model));
921 bpack(atacap->model, atacap->model, sizeof(atacap->model));
922 btrim(atacap->revision, sizeof(atacap->revision));
923 bpack(atacap->revision, atacap->revision, sizeof(atacap->revision));
924 btrim(atacap->serial, sizeof(atacap->serial));
925 bpack(atacap->serial, atacap->serial, sizeof(atacap->serial));
926
927 if (bootverbose)
928 printf("ata%d-%s: pio=%s wdma=%s udma=%s cable=%s wire\n",
929 device_get_unit(ch->dev),
930 ata_unit2str(atadev),
931 ata_mode2str(ata_pmode(atacap)),
932 ata_mode2str(ata_wmode(atacap)),
933 ata_mode2str(ata_umode(atacap)),
934 (atacap->hwres & ATA_CABLE_ID) ? "80":"40");
935
936 if (init) {
937 char buffer[64];
938
939 sprintf(buffer, "%.40s/%.8s", atacap->model, atacap->revision);
940 device_set_desc_copy(atadev->dev, buffer);
941 if ((atadev->param.config & ATA_PROTO_ATAPI) &&
942 (atadev->param.config != ATA_CFA_MAGIC1) &&
943 (atadev->param.config != ATA_CFA_MAGIC2)) {
944 if (atapi_dma &&
945 (atadev->param.config & ATA_DRQ_MASK) != ATA_DRQ_INTR &&
946 ata_umode(&atadev->param) >= ATA_UDMA2)
947 atadev->mode = ATA_DMA_MAX;
948 }
949 else {
950 if (ata_dma &&
951 (ata_umode(&atadev->param) > 0 ||
952 ata_wmode(&atadev->param) > 0))
953 atadev->mode = ATA_DMA_MAX;
954 }
955 snprintf(buf, sizeof(buf), "dev%d.mode", atadev->unit);
956 if (resource_string_value(device_get_name(ch->dev),
957 device_get_unit(ch->dev), buf, &res) == 0)
958 mode = ata_str2mode(res);
959 else if (resource_string_value(device_get_name(ch->dev),
960 device_get_unit(ch->dev), "mode", &res) == 0)
961 mode = ata_str2mode(res);
962 if (mode >= 0)
963 atadev->mode = mode;
964 }
965 }
966 else {
967 if (!error)
968 error = ENXIO;
969 }
970 return error;
971}
972
973#ifndef ATA_CAM
974int
975ata_identify(device_t dev)
976{
977 struct ata_channel *ch = device_get_softc(dev);
978 struct ata_device *atadev;
979 device_t *children;
980 device_t child, master = NULL;
981 int nchildren, i, n = ch->devices;
982
983 if (bootverbose)
984 device_printf(dev, "Identifying devices: %08x\n", ch->devices);
985
986 mtx_lock(&Giant);
987 /* Skip existing devices. */
988 if (!device_get_children(dev, &children, &nchildren)) {
989 for (i = 0; i < nchildren; i++) {
990 if (children[i] && (atadev = device_get_softc(children[i])))
991 n &= ~((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << atadev->unit);
992 }
993 free(children, M_TEMP);
994 }
995 /* Create new devices. */
996 if (bootverbose)
997 device_printf(dev, "New devices: %08x\n", n);
998 if (n == 0) {
999 mtx_unlock(&Giant);
1000 return (0);
1001 }
1002 for (i = 0; i < ATA_PM; ++i) {
1003 if (n & (((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << i))) {
1004 int unit = -1;
1005
1006 if (!(atadev = malloc(sizeof(struct ata_device),
1007 M_ATA, M_NOWAIT | M_ZERO))) {
1008 device_printf(dev, "out of memory\n");
1009 return ENOMEM;
1010 }
1011 atadev->unit = i;
1012#ifdef ATA_STATIC_ID
1013 if (n & (ATA_ATA_MASTER << i))
1014 unit = (device_get_unit(dev) << 1) + i;
1015#endif
1016 if ((child = ata_add_child(dev, atadev, unit))) {
1017 /*
1018 * PATA slave should be identified first, to allow
1019 * device cable detection on master to work properly.
1020 */
1021 if (i == 0 && (n & ATA_PORTMULTIPLIER) == 0 &&
1022 (n & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << 1)) != 0) {
1023 master = child;
1024 continue;
1025 }
1026 if (ata_getparam(atadev, 1)) {
1027 device_delete_child(dev, child);
1028 free(atadev, M_ATA);
1029 }
1030 }
1031 else
1032 free(atadev, M_ATA);
1033 }
1034 }
1035 if (master) {
1036 atadev = device_get_softc(master);
1037 if (ata_getparam(atadev, 1)) {
1038 device_delete_child(dev, master);
1039 free(atadev, M_ATA);
1040 }
1041 }
1042 bus_generic_probe(dev);
1043 bus_generic_attach(dev);
1044 mtx_unlock(&Giant);
1045 return 0;
1046}
1047#endif
1048
1049void
1050ata_default_registers(device_t dev)
1051{
1052 struct ata_channel *ch = device_get_softc(dev);
1053
1054 /* fill in the defaults from whats setup already */
1055 ch->r_io[ATA_ERROR].res = ch->r_io[ATA_FEATURE].res;
1056 ch->r_io[ATA_ERROR].offset = ch->r_io[ATA_FEATURE].offset;
1057 ch->r_io[ATA_IREASON].res = ch->r_io[ATA_COUNT].res;
1058 ch->r_io[ATA_IREASON].offset = ch->r_io[ATA_COUNT].offset;
1059 ch->r_io[ATA_STATUS].res = ch->r_io[ATA_COMMAND].res;
1060 ch->r_io[ATA_STATUS].offset = ch->r_io[ATA_COMMAND].offset;
1061 ch->r_io[ATA_ALTSTAT].res = ch->r_io[ATA_CONTROL].res;
1062 ch->r_io[ATA_ALTSTAT].offset = ch->r_io[ATA_CONTROL].offset;
1063}
1064
1065void
1066ata_modify_if_48bit(struct ata_request *request)
1067{
1068 struct ata_channel *ch = device_get_softc(request->parent);
1069 struct ata_device *atadev = device_get_softc(request->dev);
1070
1071 request->flags &= ~ATA_R_48BIT;
1072
1073 if (((request->u.ata.lba + request->u.ata.count) >= ATA_MAX_28BIT_LBA ||
1074 request->u.ata.count > 256) &&
1075 atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) {
1076
1077 /* translate command into 48bit version */
1078 switch (request->u.ata.command) {
1079 case ATA_READ:
1080 request->u.ata.command = ATA_READ48;
1081 break;
1082 case ATA_READ_MUL:
1083 request->u.ata.command = ATA_READ_MUL48;
1084 break;
1085 case ATA_READ_DMA:
1086 if (ch->flags & ATA_NO_48BIT_DMA) {
1087 if (request->transfersize > DEV_BSIZE)
1088 request->u.ata.command = ATA_READ_MUL48;
1089 else
1090 request->u.ata.command = ATA_READ48;
1091 request->flags &= ~ATA_R_DMA;
1092 }
1093 else
1094 request->u.ata.command = ATA_READ_DMA48;
1095 break;
1096 case ATA_READ_DMA_QUEUED:
1097 if (ch->flags & ATA_NO_48BIT_DMA) {
1098 if (request->transfersize > DEV_BSIZE)
1099 request->u.ata.command = ATA_READ_MUL48;
1100 else
1101 request->u.ata.command = ATA_READ48;
1102 request->flags &= ~ATA_R_DMA;
1103 }
1104 else
1105 request->u.ata.command = ATA_READ_DMA_QUEUED48;
1106 break;
1107 case ATA_WRITE:
1108 request->u.ata.command = ATA_WRITE48;
1109 break;
1110 case ATA_WRITE_MUL:
1111 request->u.ata.command = ATA_WRITE_MUL48;
1112 break;
1113 case ATA_WRITE_DMA:
1114 if (ch->flags & ATA_NO_48BIT_DMA) {
1115 if (request->transfersize > DEV_BSIZE)
1116 request->u.ata.command = ATA_WRITE_MUL48;
1117 else
1118 request->u.ata.command = ATA_WRITE48;
1119 request->flags &= ~ATA_R_DMA;
1120 }
1121 else
1122 request->u.ata.command = ATA_WRITE_DMA48;
1123 break;
1124 case ATA_WRITE_DMA_QUEUED:
1125 if (ch->flags & ATA_NO_48BIT_DMA) {
1126 if (request->transfersize > DEV_BSIZE)
1127 request->u.ata.command = ATA_WRITE_MUL48;
1128 else
1129 request->u.ata.command = ATA_WRITE48;
1130 request->u.ata.command = ATA_WRITE48;
1131 request->flags &= ~ATA_R_DMA;
1132 }
1133 else
1134 request->u.ata.command = ATA_WRITE_DMA_QUEUED48;
1135 break;
1136 case ATA_FLUSHCACHE:
1137 request->u.ata.command = ATA_FLUSHCACHE48;
1138 break;
1139 case ATA_SET_MAX_ADDRESS:
1140 request->u.ata.command = ATA_SET_MAX_ADDRESS48;
1141 break;
1142 default:
1143 return;
1144 }
1145 request->flags |= ATA_R_48BIT;
1146 }
1147 else if (atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) {
1148
1149 /* translate command into 48bit version */
1150 switch (request->u.ata.command) {
1151 case ATA_FLUSHCACHE:
1152 request->u.ata.command = ATA_FLUSHCACHE48;
1153 break;
1154 case ATA_READ_NATIVE_MAX_ADDRESS:
1155 request->u.ata.command = ATA_READ_NATIVE_MAX_ADDRESS48;
1156 break;
1157 case ATA_SET_MAX_ADDRESS:
1158 request->u.ata.command = ATA_SET_MAX_ADDRESS48;
1159 break;
1160 default:
1161 return;
1162 }
1163 request->flags |= ATA_R_48BIT;
1164 }
1165}
1166
1167void
1168ata_udelay(int interval)
1169{
1170 /* for now just use DELAY, the timer/sleep subsytems are not there yet */
1171 if (1 || interval < (1000000/hz) || ata_delayed_attach)
1172 DELAY(interval);
1173 else
1174 pause("ataslp", interval/(1000000/hz));
1175}
1176
1177char *
1178ata_unit2str(struct ata_device *atadev)
1179{
1180 struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev));
1181 static char str[8];
1182
1183 if (ch->devices & ATA_PORTMULTIPLIER)
1184 sprintf(str, "port%d", atadev->unit);
1185 else
1186 sprintf(str, "%s", atadev->unit == ATA_MASTER ? "master" : "slave");
1187 return str;
1188}
1189
1190const char *
1191ata_mode2str(int mode)
1192{
1193 switch (mode) {
1194 case -1: return "UNSUPPORTED";
1195 case ATA_PIO0: return "PIO0";
1196 case ATA_PIO1: return "PIO1";
1197 case ATA_PIO2: return "PIO2";
1198 case ATA_PIO3: return "PIO3";
1199 case ATA_PIO4: return "PIO4";
1200 case ATA_WDMA0: return "WDMA0";
1201 case ATA_WDMA1: return "WDMA1";
1202 case ATA_WDMA2: return "WDMA2";
1203 case ATA_UDMA0: return "UDMA16";
1204 case ATA_UDMA1: return "UDMA25";
1205 case ATA_UDMA2: return "UDMA33";
1206 case ATA_UDMA3: return "UDMA40";
1207 case ATA_UDMA4: return "UDMA66";
1208 case ATA_UDMA5: return "UDMA100";
1209 case ATA_UDMA6: return "UDMA133";
1210 case ATA_SA150: return "SATA150";
1211 case ATA_SA300: return "SATA300";
1212 default:
1213 if (mode & ATA_DMA_MASK)
1214 return "BIOSDMA";
1215 else
1216 return "BIOSPIO";
1217 }
1218}
1219
1220int
1221ata_str2mode(const char *str)
1222{
1223
1224 if (!strcasecmp(str, "PIO0")) return (ATA_PIO0);
1225 if (!strcasecmp(str, "PIO1")) return (ATA_PIO1);
1226 if (!strcasecmp(str, "PIO2")) return (ATA_PIO2);
1227 if (!strcasecmp(str, "PIO3")) return (ATA_PIO3);
1228 if (!strcasecmp(str, "PIO4")) return (ATA_PIO4);
1229 if (!strcasecmp(str, "WDMA0")) return (ATA_WDMA0);
1230 if (!strcasecmp(str, "WDMA1")) return (ATA_WDMA1);
1231 if (!strcasecmp(str, "WDMA2")) return (ATA_WDMA2);
1232 if (!strcasecmp(str, "UDMA0")) return (ATA_UDMA0);
1233 if (!strcasecmp(str, "UDMA16")) return (ATA_UDMA0);
1234 if (!strcasecmp(str, "UDMA1")) return (ATA_UDMA1);
1235 if (!strcasecmp(str, "UDMA25")) return (ATA_UDMA1);
1236 if (!strcasecmp(str, "UDMA2")) return (ATA_UDMA2);
1237 if (!strcasecmp(str, "UDMA33")) return (ATA_UDMA2);
1238 if (!strcasecmp(str, "UDMA3")) return (ATA_UDMA3);
1239 if (!strcasecmp(str, "UDMA44")) return (ATA_UDMA3);
1240 if (!strcasecmp(str, "UDMA4")) return (ATA_UDMA4);
1241 if (!strcasecmp(str, "UDMA66")) return (ATA_UDMA4);
1242 if (!strcasecmp(str, "UDMA5")) return (ATA_UDMA5);
1243 if (!strcasecmp(str, "UDMA100")) return (ATA_UDMA5);
1244 if (!strcasecmp(str, "UDMA6")) return (ATA_UDMA6);
1245 if (!strcasecmp(str, "UDMA133")) return (ATA_UDMA6);
1246 return (-1);
1247}
1248
1249const char *
1250ata_satarev2str(int rev)
1251{
1252 switch (rev) {
1253 case 0: return "";
1254 case 1: return "SATA 1.5Gb/s";
1255 case 2: return "SATA 3Gb/s";
1256 case 3: return "SATA 6Gb/s";
1257 case 0xff: return "SATA";
1258 default: return "???";
1259 }
1260}
1261
1262int
1263ata_atapi(device_t dev, int target)
1264{
1265 struct ata_channel *ch = device_get_softc(dev);
1266
1267 return (ch->devices & (ATA_ATAPI_MASTER << target));
1268}
1269
1270int
1271ata_pmode(struct ata_params *ap)
1272{
1273 if (ap->atavalid & ATA_FLAG_64_70) {
1274 if (ap->apiomodes & 0x02)
1275 return ATA_PIO4;
1276 if (ap->apiomodes & 0x01)
1277 return ATA_PIO3;
1278 }
1279 if (ap->mwdmamodes & 0x04)
1280 return ATA_PIO4;
1281 if (ap->mwdmamodes & 0x02)
1282 return ATA_PIO3;
1283 if (ap->mwdmamodes & 0x01)
1284 return ATA_PIO2;
1285 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x200)
1286 return ATA_PIO2;
1287 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x100)
1288 return ATA_PIO1;
1289 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x000)
1290 return ATA_PIO0;
1291 return ATA_PIO0;
1292}
1293
1294int
1295ata_wmode(struct ata_params *ap)
1296{
1297 if (ap->mwdmamodes & 0x04)
1298 return ATA_WDMA2;
1299 if (ap->mwdmamodes & 0x02)
1300 return ATA_WDMA1;
1301 if (ap->mwdmamodes & 0x01)
1302 return ATA_WDMA0;
1303 return -1;
1304}
1305
1306int
1307ata_umode(struct ata_params *ap)
1308{
1309 if (ap->atavalid & ATA_FLAG_88) {
1310 if (ap->udmamodes & 0x40)
1311 return ATA_UDMA6;
1312 if (ap->udmamodes & 0x20)
1313 return ATA_UDMA5;
1314 if (ap->udmamodes & 0x10)
1315 return ATA_UDMA4;
1316 if (ap->udmamodes & 0x08)
1317 return ATA_UDMA3;
1318 if (ap->udmamodes & 0x04)
1319 return ATA_UDMA2;
1320 if (ap->udmamodes & 0x02)
1321 return ATA_UDMA1;
1322 if (ap->udmamodes & 0x01)
1323 return ATA_UDMA0;
1324 }
1325 return -1;
1326}
1327
1328int
1329ata_limit_mode(device_t dev, int mode, int maxmode)
1330{
1331 struct ata_device *atadev = device_get_softc(dev);
1332
1333 if (maxmode && mode > maxmode)
1334 mode = maxmode;
1335
1336 if (mode >= ATA_UDMA0 && ata_umode(&atadev->param) > 0)
1337 return min(mode, ata_umode(&atadev->param));
1338
1339 if (mode >= ATA_WDMA0 && ata_wmode(&atadev->param) > 0)
1340 return min(mode, ata_wmode(&atadev->param));
1341
1342 if (mode > ata_pmode(&atadev->param))
1343 return min(mode, ata_pmode(&atadev->param));
1344
1345 return mode;
1346}
1347
1348static void
1349bswap(int8_t *buf, int len)
1350{
1351 u_int16_t *ptr = (u_int16_t*)(buf + len);
1352
1353 while (--ptr >= (u_int16_t*)buf)
1354 *ptr = ntohs(*ptr);
1355}
1356
1357static void
1358btrim(int8_t *buf, int len)
1359{
1360 int8_t *ptr;
1361
1362 for (ptr = buf; ptr < buf+len; ++ptr)
1363 if (!*ptr || *ptr == '_')
1364 *ptr = ' ';
1365 for (ptr = buf + len - 1; ptr >= buf && *ptr == ' '; --ptr)
1366 *ptr = 0;
1367}
1368
1369static void
1370bpack(int8_t *src, int8_t *dst, int len)
1371{
1372 int i, j, blank;
1373
1374 for (i = j = blank = 0 ; i < len; i++) {
1375 if (blank && src[i] == ' ') continue;
1376 if (blank && src[i] != ' ') {
1377 dst[j++] = src[i];
1378 blank = 0;
1379 continue;
1380 }
1381 if (src[i] == ' ') {
1382 blank = 1;
1383 if (i == 0)
1384 continue;
1385 }
1386 dst[j++] = src[i];
1387 }
1388 if (j < len)
1389 dst[j] = 0x00;
1390}
1391
1392#ifdef ATA_CAM
1393void
1394ata_cam_begin_transaction(device_t dev, union ccb *ccb)
1395{
1396 struct ata_channel *ch = device_get_softc(dev);
1397 struct ata_request *request;
1398
1399 if (!(request = ata_alloc_request())) {
1400 device_printf(dev, "FAILURE - out of memory in start\n");
1401 ccb->ccb_h.status = CAM_REQ_INVALID;
1402 xpt_done(ccb);
1403 return;
1404 }
1405 bzero(request, sizeof(*request));
1406
1407 /* setup request */
1408 request->dev = NULL;
1409 request->parent = dev;
1410 request->unit = ccb->ccb_h.target_id;
1411 if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1412 request->data = ccb->ataio.data_ptr;
1413 request->bytecount = ccb->ataio.dxfer_len;
1414 request->u.ata.command = ccb->ataio.cmd.command;
1415 request->u.ata.feature = ((uint16_t)ccb->ataio.cmd.features_exp << 8) |
1416 (uint16_t)ccb->ataio.cmd.features;
1417 request->u.ata.count = ((uint16_t)ccb->ataio.cmd.sector_count_exp << 8) |
1418 (uint16_t)ccb->ataio.cmd.sector_count;
1419 if (ccb->ataio.cmd.flags & CAM_ATAIO_48BIT) {
1420 request->flags |= ATA_R_48BIT;
1421 request->u.ata.lba =
1422 ((uint64_t)ccb->ataio.cmd.lba_high_exp << 40) |
1423 ((uint64_t)ccb->ataio.cmd.lba_mid_exp << 32) |
1424 ((uint64_t)ccb->ataio.cmd.lba_low_exp << 24);
1425 } else {
1426 request->u.ata.lba =
1427 ((uint64_t)(ccb->ataio.cmd.device & 0x0f) << 24);
1428 }
1429 request->u.ata.lba |= ((uint64_t)ccb->ataio.cmd.lba_high << 16) |
1430 ((uint64_t)ccb->ataio.cmd.lba_mid << 8) |
1431 (uint64_t)ccb->ataio.cmd.lba_low;
1432 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
1433 ccb->ataio.cmd.flags & CAM_ATAIO_DMA)
1434 request->flags |= ATA_R_DMA;
1435 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1436 request->flags |= ATA_R_READ;
1437 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1438 request->flags |= ATA_R_WRITE;
1439 } else {
1440 request->data = ccb->csio.data_ptr;
1441 request->bytecount = ccb->csio.dxfer_len;
1442 bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
1443 ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes,
1444 request->u.atapi.ccb, ccb->csio.cdb_len);
1445 request->flags |= ATA_R_ATAPI;
1446 if (ch->curr[ccb->ccb_h.target_id].atapi == 16)
1447 request->flags |= ATA_R_ATAPI16;
1448 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
1449 ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA)
1450 request->flags |= ATA_R_DMA;
1451 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1452 request->flags |= ATA_R_READ;
1453 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1454 request->flags |= ATA_R_WRITE;
1455 }
1456 request->transfersize = min(request->bytecount,
1457 ch->curr[ccb->ccb_h.target_id].bytecount);
1458 request->retries = 0;
1459 request->timeout = (ccb->ccb_h.timeout + 999) / 1000;
1460 callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED);
1461 request->ccb = ccb;
1462
1463 ch->running = request;
1464 ch->state = ATA_ACTIVE;
1465 if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) {
1466 ch->running = NULL;
1467 ch->state = ATA_IDLE;
1468 ata_cam_end_transaction(dev, request);
1469 return;
1470 }
1471}
1472
1473void
1474ata_cam_end_transaction(device_t dev, struct ata_request *request)
1475{
1476 struct ata_channel *ch = device_get_softc(dev);
1477 union ccb *ccb = request->ccb;
1478 int fatalerr = 0;
1479
1480 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1481 if (request->flags & ATA_R_TIMEOUT) {
1482 xpt_freeze_simq(ch->sim, 1);
1483 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1484 ccb->ccb_h.status |= CAM_CMD_TIMEOUT | CAM_RELEASE_SIMQ;
1485 fatalerr = 1;
1486 } else if (request->status & ATA_S_ERROR) {
1487 if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1488 ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR;
1489 } else {
1490 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1491 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
1492 }
1493 } else if (request->result == ERESTART)
1494 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1495 else if (request->result != 0)
1496 ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
1497 else
1498 ccb->ccb_h.status |= CAM_REQ_CMP;
1499 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP &&
1500 !(ccb->ccb_h.status & CAM_DEV_QFRZN)) {
1501 xpt_freeze_devq(ccb->ccb_h.path, 1);
1502 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1503 }
1504 if (ccb->ccb_h.func_code == XPT_ATA_IO &&
1505 ((request->status & ATA_S_ERROR) ||
1506 (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT))) {
1507 struct ata_res *res = &ccb->ataio.res;
1508 res->status = request->status;
1509 res->error = request->error;
1510 res->lba_low = request->u.ata.lba;
1511 res->lba_mid = request->u.ata.lba >> 8;
1512 res->lba_high = request->u.ata.lba >> 16;
1513 res->device = request->u.ata.lba >> 24;
1514 res->lba_low_exp = request->u.ata.lba >> 24;
1515 res->lba_mid_exp = request->u.ata.lba >> 32;
1516 res->lba_high_exp = request->u.ata.lba >> 40;
1517 res->sector_count = request->u.ata.count;
1518 res->sector_count_exp = request->u.ata.count >> 8;
1519 }
29
30#include "opt_ata.h"
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/ata.h>
34#include <sys/kernel.h>
35#include <sys/module.h>
36#include <sys/endian.h>
37#include <sys/ctype.h>
38#include <sys/conf.h>
39#include <sys/bus.h>
40#include <sys/bio.h>
41#include <sys/malloc.h>
42#include <sys/sysctl.h>
43#include <sys/sema.h>
44#include <sys/taskqueue.h>
45#include <vm/uma.h>
46#include <machine/stdarg.h>
47#include <machine/resource.h>
48#include <machine/bus.h>
49#include <sys/rman.h>
50#include <dev/ata/ata-all.h>
51#include <dev/pci/pcivar.h>
52#include <ata_if.h>
53
54#ifdef ATA_CAM
55#include <cam/cam.h>
56#include <cam/cam_ccb.h>
57#include <cam/cam_sim.h>
58#include <cam/cam_xpt_sim.h>
59#include <cam/cam_debug.h>
60#endif
61
62#ifndef ATA_CAM
63/* device structure */
64static d_ioctl_t ata_ioctl;
65static struct cdevsw ata_cdevsw = {
66 .d_version = D_VERSION,
67 .d_flags = D_NEEDGIANT, /* we need this as newbus isn't mpsafe */
68 .d_ioctl = ata_ioctl,
69 .d_name = "ata",
70};
71#endif
72
73/* prototypes */
74#ifndef ATA_CAM
75static void ata_boot_attach(void);
76static device_t ata_add_child(device_t, struct ata_device *, int);
77#else
78static void ataaction(struct cam_sim *sim, union ccb *ccb);
79static void atapoll(struct cam_sim *sim);
80#endif
81static void ata_conn_event(void *, int);
82static void bswap(int8_t *, int);
83static void btrim(int8_t *, int);
84static void bpack(int8_t *, int8_t *, int);
85static void ata_interrupt_locked(void *data);
86static void ata_periodic_poll(void *data);
87
88/* global vars */
89MALLOC_DEFINE(M_ATA, "ata_generic", "ATA driver generic layer");
90int (*ata_raid_ioctl_func)(u_long cmd, caddr_t data) = NULL;
91struct intr_config_hook *ata_delayed_attach = NULL;
92devclass_t ata_devclass;
93uma_zone_t ata_request_zone;
94uma_zone_t ata_composite_zone;
95int ata_wc = 1;
96int ata_setmax = 0;
97int ata_dma_check_80pin = 1;
98
99/* local vars */
100static int ata_dma = 1;
101static int atapi_dma = 1;
102
103/* sysctl vars */
104SYSCTL_NODE(_hw, OID_AUTO, ata, CTLFLAG_RD, 0, "ATA driver parameters");
105TUNABLE_INT("hw.ata.ata_dma", &ata_dma);
106SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma, CTLFLAG_RDTUN, &ata_dma, 0,
107 "ATA disk DMA mode control");
108TUNABLE_INT("hw.ata.ata_dma_check_80pin", &ata_dma_check_80pin);
109SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma_check_80pin,
110 CTLFLAG_RW, &ata_dma_check_80pin, 1,
111 "Check for 80pin cable before setting ATA DMA mode");
112TUNABLE_INT("hw.ata.atapi_dma", &atapi_dma);
113SYSCTL_INT(_hw_ata, OID_AUTO, atapi_dma, CTLFLAG_RDTUN, &atapi_dma, 0,
114 "ATAPI device DMA mode control");
115TUNABLE_INT("hw.ata.wc", &ata_wc);
116SYSCTL_INT(_hw_ata, OID_AUTO, wc, CTLFLAG_RDTUN, &ata_wc, 0,
117 "ATA disk write caching");
118TUNABLE_INT("hw.ata.setmax", &ata_setmax);
119SYSCTL_INT(_hw_ata, OID_AUTO, setmax, CTLFLAG_RDTUN, &ata_setmax, 0,
120 "ATA disk set max native address");
121
122/*
123 * newbus device interface related functions
124 */
125int
126ata_probe(device_t dev)
127{
128 return 0;
129}
130
131int
132ata_attach(device_t dev)
133{
134 struct ata_channel *ch = device_get_softc(dev);
135 int error, rid;
136#ifdef ATA_CAM
137 struct cam_devq *devq;
138 const char *res;
139 char buf[64];
140 int i, mode;
141#endif
142
143 /* check that we have a virgin channel to attach */
144 if (ch->r_irq)
145 return EEXIST;
146
147 /* initialize the softc basics */
148 ch->dev = dev;
149 ch->state = ATA_IDLE;
150 bzero(&ch->state_mtx, sizeof(struct mtx));
151 mtx_init(&ch->state_mtx, "ATA state lock", NULL, MTX_DEF);
152 bzero(&ch->queue_mtx, sizeof(struct mtx));
153 mtx_init(&ch->queue_mtx, "ATA queue lock", NULL, MTX_DEF);
154 TAILQ_INIT(&ch->ata_queue);
155 TASK_INIT(&ch->conntask, 0, ata_conn_event, dev);
156#ifdef ATA_CAM
157 for (i = 0; i < 16; i++) {
158 ch->user[i].mode = 0;
159 snprintf(buf, sizeof(buf), "dev%d.mode", i);
160 if (resource_string_value(device_get_name(dev),
161 device_get_unit(dev), buf, &res) == 0)
162 mode = ata_str2mode(res);
163 else if (resource_string_value(device_get_name(dev),
164 device_get_unit(dev), "mode", &res) == 0)
165 mode = ata_str2mode(res);
166 else
167 mode = -1;
168 if (mode >= 0)
169 ch->user[i].mode = mode;
170 if (ch->flags & ATA_SATA)
171 ch->user[i].bytecount = 8192;
172 else
173 ch->user[i].bytecount = MAXPHYS;
174 ch->curr[i] = ch->user[i];
175 }
176#endif
177 callout_init(&ch->poll_callout, 1);
178
179 /* reset the controller HW, the channel and device(s) */
180 while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit)
181 pause("ataatch", 1);
182#ifndef ATA_CAM
183 ATA_RESET(dev);
184#endif
185 ATA_LOCKING(dev, ATA_LF_UNLOCK);
186
187 /* allocate DMA resources if DMA HW present*/
188 if (ch->dma.alloc)
189 ch->dma.alloc(dev);
190
191 /* setup interrupt delivery */
192 rid = ATA_IRQ_RID;
193 ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
194 RF_SHAREABLE | RF_ACTIVE);
195 if (!ch->r_irq) {
196 device_printf(dev, "unable to allocate interrupt\n");
197 return ENXIO;
198 }
199 if ((error = bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL,
200 ata_interrupt, ch, &ch->ih))) {
201 bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq);
202 device_printf(dev, "unable to setup interrupt\n");
203 return error;
204 }
205 if (ch->flags & ATA_PERIODIC_POLL)
206 callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch);
207
208#ifndef ATA_CAM
209 /* probe and attach devices on this channel unless we are in early boot */
210 if (!ata_delayed_attach)
211 ata_identify(dev);
212 return (0);
213#else
214 mtx_lock(&ch->state_mtx);
215 /* Create the device queue for our SIM. */
216 devq = cam_simq_alloc(1);
217 if (devq == NULL) {
218 device_printf(dev, "Unable to allocate simq\n");
219 error = ENOMEM;
220 goto err1;
221 }
222 /* Construct SIM entry */
223 ch->sim = cam_sim_alloc(ataaction, atapoll, "ata", ch,
224 device_get_unit(dev), &ch->state_mtx, 1, 0, devq);
225 if (ch->sim == NULL) {
226 device_printf(dev, "unable to allocate sim\n");
227 cam_simq_free(devq);
228 error = ENOMEM;
229 goto err1;
230 }
231 if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) {
232 device_printf(dev, "unable to register xpt bus\n");
233 error = ENXIO;
234 goto err2;
235 }
236 if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim),
237 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
238 device_printf(dev, "unable to create path\n");
239 error = ENXIO;
240 goto err3;
241 }
242 mtx_unlock(&ch->state_mtx);
243 return (0);
244
245err3:
246 xpt_bus_deregister(cam_sim_path(ch->sim));
247err2:
248 cam_sim_free(ch->sim, /*free_devq*/TRUE);
249 ch->sim = NULL;
250err1:
251 bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq);
252 mtx_unlock(&ch->state_mtx);
253 if (ch->flags & ATA_PERIODIC_POLL)
254 callout_drain(&ch->poll_callout);
255 return (error);
256#endif
257}
258
259int
260ata_detach(device_t dev)
261{
262 struct ata_channel *ch = device_get_softc(dev);
263#ifndef ATA_CAM
264 device_t *children;
265 int nchildren, i;
266#endif
267
268 /* check that we have a valid channel to detach */
269 if (!ch->r_irq)
270 return ENXIO;
271
272 /* grap the channel lock so no new requests gets launched */
273 mtx_lock(&ch->state_mtx);
274 ch->state |= ATA_STALL_QUEUE;
275 mtx_unlock(&ch->state_mtx);
276 if (ch->flags & ATA_PERIODIC_POLL)
277 callout_drain(&ch->poll_callout);
278
279#ifndef ATA_CAM
280 /* detach & delete all children */
281 if (!device_get_children(dev, &children, &nchildren)) {
282 for (i = 0; i < nchildren; i++)
283 if (children[i])
284 device_delete_child(dev, children[i]);
285 free(children, M_TEMP);
286 }
287#endif
288 taskqueue_drain(taskqueue_thread, &ch->conntask);
289
290#ifdef ATA_CAM
291 mtx_lock(&ch->state_mtx);
292 xpt_async(AC_LOST_DEVICE, ch->path, NULL);
293 xpt_free_path(ch->path);
294 xpt_bus_deregister(cam_sim_path(ch->sim));
295 cam_sim_free(ch->sim, /*free_devq*/TRUE);
296 ch->sim = NULL;
297 mtx_unlock(&ch->state_mtx);
298#endif
299
300 /* release resources */
301 bus_teardown_intr(dev, ch->r_irq, ch->ih);
302 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq);
303 ch->r_irq = NULL;
304
305 /* free DMA resources if DMA HW present*/
306 if (ch->dma.free)
307 ch->dma.free(dev);
308
309 mtx_destroy(&ch->state_mtx);
310 mtx_destroy(&ch->queue_mtx);
311 return 0;
312}
313
314static void
315ata_conn_event(void *context, int dummy)
316{
317 device_t dev = (device_t)context;
318#ifdef ATA_CAM
319 struct ata_channel *ch = device_get_softc(dev);
320 union ccb *ccb;
321
322 mtx_lock(&ch->state_mtx);
323 if (ch->sim == NULL) {
324 mtx_unlock(&ch->state_mtx);
325 return;
326 }
327 ata_reinit(dev);
328 if ((ccb = xpt_alloc_ccb_nowait()) == NULL)
329 return;
330 if (xpt_create_path(&ccb->ccb_h.path, NULL,
331 cam_sim_path(ch->sim),
332 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
333 xpt_free_ccb(ccb);
334 return;
335 }
336 xpt_rescan(ccb);
337 mtx_unlock(&ch->state_mtx);
338#else
339 ata_reinit(dev);
340#endif
341}
342
343int
344ata_reinit(device_t dev)
345{
346 struct ata_channel *ch = device_get_softc(dev);
347 struct ata_request *request;
348#ifndef ATA_CAM
349 device_t *children;
350 int nchildren, i;
351
352 /* check that we have a valid channel to reinit */
353 if (!ch || !ch->r_irq)
354 return ENXIO;
355
356 if (bootverbose)
357 device_printf(dev, "reiniting channel ..\n");
358
359 /* poll for locking the channel */
360 while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit)
361 pause("atarini", 1);
362
363 /* catch eventual request in ch->running */
364 mtx_lock(&ch->state_mtx);
365 if (ch->state & ATA_STALL_QUEUE) {
366 /* Recursive reinits and reinits during detach prohobited. */
367 mtx_unlock(&ch->state_mtx);
368 return (ENXIO);
369 }
370 if ((request = ch->running))
371 callout_stop(&request->callout);
372 ch->running = NULL;
373
374 /* unconditionally grap the channel lock */
375 ch->state |= ATA_STALL_QUEUE;
376 mtx_unlock(&ch->state_mtx);
377
378 /* reset the controller HW, the channel and device(s) */
379 ATA_RESET(dev);
380
381 /* reinit the children and delete any that fails */
382 if (!device_get_children(dev, &children, &nchildren)) {
383 mtx_lock(&Giant); /* newbus suckage it needs Giant */
384 for (i = 0; i < nchildren; i++) {
385 /* did any children go missing ? */
386 if (children[i] && device_is_attached(children[i]) &&
387 ATA_REINIT(children[i])) {
388 /*
389 * if we had a running request and its device matches
390 * this child we need to inform the request that the
391 * device is gone.
392 */
393 if (request && request->dev == children[i]) {
394 request->result = ENXIO;
395 device_printf(request->dev, "FAILURE - device detached\n");
396
397 /* if not timeout finish request here */
398 if (!(request->flags & ATA_R_TIMEOUT))
399 ata_finish(request);
400 request = NULL;
401 }
402 device_delete_child(dev, children[i]);
403 }
404 }
405 free(children, M_TEMP);
406 mtx_unlock(&Giant); /* newbus suckage dealt with, release Giant */
407 }
408
409 /* if we still have a good request put it on the queue again */
410 if (request && !(request->flags & ATA_R_TIMEOUT)) {
411 device_printf(request->dev,
412 "WARNING - %s requeued due to channel reset",
413 ata_cmd2str(request));
414 if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
415 printf(" LBA=%ju", request->u.ata.lba);
416 printf("\n");
417 request->flags |= ATA_R_REQUEUE;
418 ata_queue_request(request);
419 }
420
421 /* we're done release the channel for new work */
422 mtx_lock(&ch->state_mtx);
423 ch->state = ATA_IDLE;
424 mtx_unlock(&ch->state_mtx);
425 ATA_LOCKING(dev, ATA_LF_UNLOCK);
426
427 /* Add new children. */
428/* ata_identify(dev); */
429
430 if (bootverbose)
431 device_printf(dev, "reinit done ..\n");
432
433 /* kick off requests on the queue */
434 ata_start(dev);
435#else
436 xpt_freeze_simq(ch->sim, 1);
437 if ((request = ch->running)) {
438 ch->running = NULL;
439 if (ch->state == ATA_ACTIVE)
440 ch->state = ATA_IDLE;
441 callout_stop(&request->callout);
442 if (ch->dma.unload)
443 ch->dma.unload(request);
444 request->result = ERESTART;
445 ata_cam_end_transaction(dev, request);
446 }
447 /* reset the controller HW, the channel and device(s) */
448 ATA_RESET(dev);
449 /* Tell the XPT about the event */
450 xpt_async(AC_BUS_RESET, ch->path, NULL);
451 xpt_release_simq(ch->sim, TRUE);
452#endif
453 return(0);
454}
455
456int
457ata_suspend(device_t dev)
458{
459 struct ata_channel *ch;
460
461 /* check for valid device */
462 if (!dev || !(ch = device_get_softc(dev)))
463 return ENXIO;
464
465 if (ch->flags & ATA_PERIODIC_POLL)
466 callout_drain(&ch->poll_callout);
467#ifdef ATA_CAM
468 mtx_lock(&ch->state_mtx);
469 xpt_freeze_simq(ch->sim, 1);
470 while (ch->state != ATA_IDLE)
471 msleep(ch, &ch->state_mtx, PRIBIO, "atasusp", hz/100);
472 mtx_unlock(&ch->state_mtx);
473#else
474 /* wait for the channel to be IDLE or detached before suspending */
475 while (ch->r_irq) {
476 mtx_lock(&ch->state_mtx);
477 if (ch->state == ATA_IDLE) {
478 ch->state = ATA_ACTIVE;
479 mtx_unlock(&ch->state_mtx);
480 break;
481 }
482 mtx_unlock(&ch->state_mtx);
483 tsleep(ch, PRIBIO, "atasusp", hz/10);
484 }
485 ATA_LOCKING(dev, ATA_LF_UNLOCK);
486#endif
487 return(0);
488}
489
490int
491ata_resume(device_t dev)
492{
493 struct ata_channel *ch;
494 int error;
495
496 /* check for valid device */
497 if (!dev || !(ch = device_get_softc(dev)))
498 return ENXIO;
499
500#ifdef ATA_CAM
501 mtx_lock(&ch->state_mtx);
502 error = ata_reinit(dev);
503 xpt_release_simq(ch->sim, TRUE);
504 mtx_unlock(&ch->state_mtx);
505#else
506 /* reinit the devices, we dont know what mode/state they are in */
507 error = ata_reinit(dev);
508 /* kick off requests on the queue */
509 ata_start(dev);
510#endif
511 if (ch->flags & ATA_PERIODIC_POLL)
512 callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch);
513 return error;
514}
515
516void
517ata_interrupt(void *data)
518{
519#ifdef ATA_CAM
520 struct ata_channel *ch = (struct ata_channel *)data;
521
522 mtx_lock(&ch->state_mtx);
523#endif
524 ata_interrupt_locked(data);
525#ifdef ATA_CAM
526 mtx_unlock(&ch->state_mtx);
527#endif
528}
529
530static void
531ata_interrupt_locked(void *data)
532{
533 struct ata_channel *ch = (struct ata_channel *)data;
534 struct ata_request *request;
535
536#ifndef ATA_CAM
537 mtx_lock(&ch->state_mtx);
538#endif
539 do {
540 /* ignore interrupt if its not for us */
541 if (ch->hw.status && !ch->hw.status(ch->dev))
542 break;
543
544 /* do we have a running request */
545 if (!(request = ch->running))
546 break;
547
548 ATA_DEBUG_RQ(request, "interrupt");
549
550 /* safetycheck for the right state */
551 if (ch->state == ATA_IDLE) {
552 device_printf(request->dev, "interrupt on idle channel ignored\n");
553 break;
554 }
555
556 /*
557 * we have the HW locks, so end the transaction for this request
558 * if it finishes immediately otherwise wait for next interrupt
559 */
560 if (ch->hw.end_transaction(request) == ATA_OP_FINISHED) {
561 ch->running = NULL;
562 if (ch->state == ATA_ACTIVE)
563 ch->state = ATA_IDLE;
564#ifdef ATA_CAM
565 ata_cam_end_transaction(ch->dev, request);
566#else
567 mtx_unlock(&ch->state_mtx);
568 ATA_LOCKING(ch->dev, ATA_LF_UNLOCK);
569 ata_finish(request);
570#endif
571 return;
572 }
573 } while (0);
574#ifndef ATA_CAM
575 mtx_unlock(&ch->state_mtx);
576#endif
577}
578
579static void
580ata_periodic_poll(void *data)
581{
582 struct ata_channel *ch = (struct ata_channel *)data;
583
584 callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch);
585 ata_interrupt(ch);
586}
587
588void
589ata_print_cable(device_t dev, u_int8_t *who)
590{
591 device_printf(dev,
592 "DMA limited to UDMA33, %s found non-ATA66 cable\n", who);
593}
594
595int
596ata_check_80pin(device_t dev, int mode)
597{
598 struct ata_device *atadev = device_get_softc(dev);
599
600 if (!ata_dma_check_80pin) {
601 if (bootverbose)
602 device_printf(dev, "Skipping 80pin cable check\n");
603 return mode;
604 }
605
606 if (mode > ATA_UDMA2 && !(atadev->param.hwres & ATA_CABLE_ID)) {
607 ata_print_cable(dev, "device");
608 mode = ATA_UDMA2;
609 }
610 return mode;
611}
612
613void
614ata_setmode(device_t dev)
615{
616 struct ata_channel *ch = device_get_softc(device_get_parent(dev));
617 struct ata_device *atadev = device_get_softc(dev);
618 int error, mode, pmode;
619
620 mode = atadev->mode;
621 do {
622 pmode = mode = ata_limit_mode(dev, mode, ATA_DMA_MAX);
623 mode = ATA_SETMODE(device_get_parent(dev), atadev->unit, mode);
624 if ((ch->flags & (ATA_CHECKS_CABLE | ATA_SATA)) == 0)
625 mode = ata_check_80pin(dev, mode);
626 } while (pmode != mode); /* Interate till successfull negotiation. */
627 error = ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode);
628 if (bootverbose)
629 device_printf(dev, "%ssetting %s\n",
630 (error) ? "FAILURE " : "", ata_mode2str(mode));
631 atadev->mode = mode;
632}
633
634/*
635 * device related interfaces
636 */
637#ifndef ATA_CAM
638static int
639ata_ioctl(struct cdev *dev, u_long cmd, caddr_t data,
640 int32_t flag, struct thread *td)
641{
642 device_t device, *children;
643 struct ata_ioc_devices *devices = (struct ata_ioc_devices *)data;
644 int *value = (int *)data;
645 int i, nchildren, error = ENOTTY;
646
647 switch (cmd) {
648 case IOCATAGMAXCHANNEL:
649 /* In case we have channel 0..n this will return n+1. */
650 *value = devclass_get_maxunit(ata_devclass);
651 error = 0;
652 break;
653
654 case IOCATAREINIT:
655 if (*value >= devclass_get_maxunit(ata_devclass) ||
656 !(device = devclass_get_device(ata_devclass, *value)) ||
657 !device_is_attached(device))
658 return ENXIO;
659 error = ata_reinit(device);
660 break;
661
662 case IOCATAATTACH:
663 if (*value >= devclass_get_maxunit(ata_devclass) ||
664 !(device = devclass_get_device(ata_devclass, *value)) ||
665 !device_is_attached(device))
666 return ENXIO;
667 error = DEVICE_ATTACH(device);
668 break;
669
670 case IOCATADETACH:
671 if (*value >= devclass_get_maxunit(ata_devclass) ||
672 !(device = devclass_get_device(ata_devclass, *value)) ||
673 !device_is_attached(device))
674 return ENXIO;
675 error = DEVICE_DETACH(device);
676 break;
677
678 case IOCATADEVICES:
679 if (devices->channel >= devclass_get_maxunit(ata_devclass) ||
680 !(device = devclass_get_device(ata_devclass, devices->channel)) ||
681 !device_is_attached(device))
682 return ENXIO;
683 bzero(devices->name[0], 32);
684 bzero(&devices->params[0], sizeof(struct ata_params));
685 bzero(devices->name[1], 32);
686 bzero(&devices->params[1], sizeof(struct ata_params));
687 if (!device_get_children(device, &children, &nchildren)) {
688 for (i = 0; i < nchildren; i++) {
689 if (children[i] && device_is_attached(children[i])) {
690 struct ata_device *atadev = device_get_softc(children[i]);
691
692 if (atadev->unit == ATA_MASTER) { /* XXX SOS PM */
693 strncpy(devices->name[0],
694 device_get_nameunit(children[i]), 32);
695 bcopy(&atadev->param, &devices->params[0],
696 sizeof(struct ata_params));
697 }
698 if (atadev->unit == ATA_SLAVE) { /* XXX SOS PM */
699 strncpy(devices->name[1],
700 device_get_nameunit(children[i]), 32);
701 bcopy(&atadev->param, &devices->params[1],
702 sizeof(struct ata_params));
703 }
704 }
705 }
706 free(children, M_TEMP);
707 error = 0;
708 }
709 else
710 error = ENODEV;
711 break;
712
713 default:
714 if (ata_raid_ioctl_func)
715 error = ata_raid_ioctl_func(cmd, data);
716 }
717 return error;
718}
719#endif
720
721int
722ata_device_ioctl(device_t dev, u_long cmd, caddr_t data)
723{
724 struct ata_device *atadev = device_get_softc(dev);
725 struct ata_channel *ch = device_get_softc(device_get_parent(dev));
726 struct ata_ioc_request *ioc_request = (struct ata_ioc_request *)data;
727 struct ata_params *params = (struct ata_params *)data;
728 int *mode = (int *)data;
729 struct ata_request *request;
730 caddr_t buf;
731 int error;
732
733 switch (cmd) {
734 case IOCATAREQUEST:
735 if (ioc_request->count >
736 (ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS)) {
737 return (EFBIG);
738 }
739 if (!(buf = malloc(ioc_request->count, M_ATA, M_NOWAIT))) {
740 return ENOMEM;
741 }
742 if (!(request = ata_alloc_request())) {
743 free(buf, M_ATA);
744 return ENOMEM;
745 }
746 request->dev = atadev->dev;
747 if (ioc_request->flags & ATA_CMD_WRITE) {
748 error = copyin(ioc_request->data, buf, ioc_request->count);
749 if (error) {
750 free(buf, M_ATA);
751 ata_free_request(request);
752 return error;
753 }
754 }
755 if (ioc_request->flags & ATA_CMD_ATAPI) {
756 request->flags = ATA_R_ATAPI;
757 bcopy(ioc_request->u.atapi.ccb, request->u.atapi.ccb, 16);
758 }
759 else {
760 request->u.ata.command = ioc_request->u.ata.command;
761 request->u.ata.feature = ioc_request->u.ata.feature;
762 request->u.ata.lba = ioc_request->u.ata.lba;
763 request->u.ata.count = ioc_request->u.ata.count;
764 }
765 request->timeout = ioc_request->timeout;
766 request->data = buf;
767 request->bytecount = ioc_request->count;
768 request->transfersize = request->bytecount;
769 if (ioc_request->flags & ATA_CMD_CONTROL)
770 request->flags |= ATA_R_CONTROL;
771 if (ioc_request->flags & ATA_CMD_READ)
772 request->flags |= ATA_R_READ;
773 if (ioc_request->flags & ATA_CMD_WRITE)
774 request->flags |= ATA_R_WRITE;
775 ata_queue_request(request);
776 if (request->flags & ATA_R_ATAPI) {
777 bcopy(&request->u.atapi.sense, &ioc_request->u.atapi.sense,
778 sizeof(struct atapi_sense));
779 }
780 else {
781 ioc_request->u.ata.command = request->u.ata.command;
782 ioc_request->u.ata.feature = request->u.ata.feature;
783 ioc_request->u.ata.lba = request->u.ata.lba;
784 ioc_request->u.ata.count = request->u.ata.count;
785 }
786 ioc_request->error = request->result;
787 if (ioc_request->flags & ATA_CMD_READ)
788 error = copyout(buf, ioc_request->data, ioc_request->count);
789 else
790 error = 0;
791 free(buf, M_ATA);
792 ata_free_request(request);
793 return error;
794
795 case IOCATAGPARM:
796 ata_getparam(atadev, 0);
797 bcopy(&atadev->param, params, sizeof(struct ata_params));
798 return 0;
799
800 case IOCATASMODE:
801 atadev->mode = *mode;
802 ata_setmode(dev);
803 return 0;
804
805 case IOCATAGMODE:
806 *mode = atadev->mode |
807 (ATA_GETREV(device_get_parent(dev), atadev->unit) << 8);
808 return 0;
809 case IOCATASSPINDOWN:
810 atadev->spindown = *mode;
811 return 0;
812 case IOCATAGSPINDOWN:
813 *mode = atadev->spindown;
814 return 0;
815 default:
816 return ENOTTY;
817 }
818}
819
820#ifndef ATA_CAM
821static void
822ata_boot_attach(void)
823{
824 struct ata_channel *ch;
825 int ctlr;
826
827 mtx_lock(&Giant); /* newbus suckage it needs Giant */
828
829 /* kick of probe and attach on all channels */
830 for (ctlr = 0; ctlr < devclass_get_maxunit(ata_devclass); ctlr++) {
831 if ((ch = devclass_get_softc(ata_devclass, ctlr))) {
832 ata_identify(ch->dev);
833 }
834 }
835
836 /* release the hook that got us here, we are only needed once during boot */
837 if (ata_delayed_attach) {
838 config_intrhook_disestablish(ata_delayed_attach);
839 free(ata_delayed_attach, M_TEMP);
840 ata_delayed_attach = NULL;
841 }
842
843 mtx_unlock(&Giant); /* newbus suckage dealt with, release Giant */
844}
845#endif
846
847/*
848 * misc support functions
849 */
850#ifndef ATA_CAM
851static device_t
852ata_add_child(device_t parent, struct ata_device *atadev, int unit)
853{
854 device_t child;
855
856 if ((child = device_add_child(parent, NULL, unit))) {
857 device_set_softc(child, atadev);
858 device_quiet(child);
859 atadev->dev = child;
860 atadev->max_iosize = DEV_BSIZE;
861 atadev->mode = ATA_PIO_MAX;
862 }
863 return child;
864}
865#endif
866
867int
868ata_getparam(struct ata_device *atadev, int init)
869{
870 struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev));
871 struct ata_request *request;
872 const char *res;
873 char buf[64];
874 u_int8_t command = 0;
875 int error = ENOMEM, retries = 2, mode = -1;
876
877 if (ch->devices & (ATA_ATA_MASTER << atadev->unit))
878 command = ATA_ATA_IDENTIFY;
879 if (ch->devices & (ATA_ATAPI_MASTER << atadev->unit))
880 command = ATA_ATAPI_IDENTIFY;
881 if (!command)
882 return ENXIO;
883
884 while (retries-- > 0 && error) {
885 if (!(request = ata_alloc_request()))
886 break;
887 request->dev = atadev->dev;
888 request->timeout = 1;
889 request->retries = 0;
890 request->u.ata.command = command;
891 request->flags = (ATA_R_READ|ATA_R_AT_HEAD|ATA_R_DIRECT);
892 if (!bootverbose)
893 request->flags |= ATA_R_QUIET;
894 request->data = (void *)&atadev->param;
895 request->bytecount = sizeof(struct ata_params);
896 request->donecount = 0;
897 request->transfersize = DEV_BSIZE;
898 ata_queue_request(request);
899 error = request->result;
900 ata_free_request(request);
901 }
902
903 if (!error && (isprint(atadev->param.model[0]) ||
904 isprint(atadev->param.model[1]))) {
905 struct ata_params *atacap = &atadev->param;
906 int16_t *ptr;
907
908 for (ptr = (int16_t *)atacap;
909 ptr < (int16_t *)atacap + sizeof(struct ata_params)/2; ptr++) {
910 *ptr = le16toh(*ptr);
911 }
912 if (!(!strncmp(atacap->model, "FX", 2) ||
913 !strncmp(atacap->model, "NEC", 3) ||
914 !strncmp(atacap->model, "Pioneer", 7) ||
915 !strncmp(atacap->model, "SHARP", 5))) {
916 bswap(atacap->model, sizeof(atacap->model));
917 bswap(atacap->revision, sizeof(atacap->revision));
918 bswap(atacap->serial, sizeof(atacap->serial));
919 }
920 btrim(atacap->model, sizeof(atacap->model));
921 bpack(atacap->model, atacap->model, sizeof(atacap->model));
922 btrim(atacap->revision, sizeof(atacap->revision));
923 bpack(atacap->revision, atacap->revision, sizeof(atacap->revision));
924 btrim(atacap->serial, sizeof(atacap->serial));
925 bpack(atacap->serial, atacap->serial, sizeof(atacap->serial));
926
927 if (bootverbose)
928 printf("ata%d-%s: pio=%s wdma=%s udma=%s cable=%s wire\n",
929 device_get_unit(ch->dev),
930 ata_unit2str(atadev),
931 ata_mode2str(ata_pmode(atacap)),
932 ata_mode2str(ata_wmode(atacap)),
933 ata_mode2str(ata_umode(atacap)),
934 (atacap->hwres & ATA_CABLE_ID) ? "80":"40");
935
936 if (init) {
937 char buffer[64];
938
939 sprintf(buffer, "%.40s/%.8s", atacap->model, atacap->revision);
940 device_set_desc_copy(atadev->dev, buffer);
941 if ((atadev->param.config & ATA_PROTO_ATAPI) &&
942 (atadev->param.config != ATA_CFA_MAGIC1) &&
943 (atadev->param.config != ATA_CFA_MAGIC2)) {
944 if (atapi_dma &&
945 (atadev->param.config & ATA_DRQ_MASK) != ATA_DRQ_INTR &&
946 ata_umode(&atadev->param) >= ATA_UDMA2)
947 atadev->mode = ATA_DMA_MAX;
948 }
949 else {
950 if (ata_dma &&
951 (ata_umode(&atadev->param) > 0 ||
952 ata_wmode(&atadev->param) > 0))
953 atadev->mode = ATA_DMA_MAX;
954 }
955 snprintf(buf, sizeof(buf), "dev%d.mode", atadev->unit);
956 if (resource_string_value(device_get_name(ch->dev),
957 device_get_unit(ch->dev), buf, &res) == 0)
958 mode = ata_str2mode(res);
959 else if (resource_string_value(device_get_name(ch->dev),
960 device_get_unit(ch->dev), "mode", &res) == 0)
961 mode = ata_str2mode(res);
962 if (mode >= 0)
963 atadev->mode = mode;
964 }
965 }
966 else {
967 if (!error)
968 error = ENXIO;
969 }
970 return error;
971}
972
973#ifndef ATA_CAM
974int
975ata_identify(device_t dev)
976{
977 struct ata_channel *ch = device_get_softc(dev);
978 struct ata_device *atadev;
979 device_t *children;
980 device_t child, master = NULL;
981 int nchildren, i, n = ch->devices;
982
983 if (bootverbose)
984 device_printf(dev, "Identifying devices: %08x\n", ch->devices);
985
986 mtx_lock(&Giant);
987 /* Skip existing devices. */
988 if (!device_get_children(dev, &children, &nchildren)) {
989 for (i = 0; i < nchildren; i++) {
990 if (children[i] && (atadev = device_get_softc(children[i])))
991 n &= ~((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << atadev->unit);
992 }
993 free(children, M_TEMP);
994 }
995 /* Create new devices. */
996 if (bootverbose)
997 device_printf(dev, "New devices: %08x\n", n);
998 if (n == 0) {
999 mtx_unlock(&Giant);
1000 return (0);
1001 }
1002 for (i = 0; i < ATA_PM; ++i) {
1003 if (n & (((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << i))) {
1004 int unit = -1;
1005
1006 if (!(atadev = malloc(sizeof(struct ata_device),
1007 M_ATA, M_NOWAIT | M_ZERO))) {
1008 device_printf(dev, "out of memory\n");
1009 return ENOMEM;
1010 }
1011 atadev->unit = i;
1012#ifdef ATA_STATIC_ID
1013 if (n & (ATA_ATA_MASTER << i))
1014 unit = (device_get_unit(dev) << 1) + i;
1015#endif
1016 if ((child = ata_add_child(dev, atadev, unit))) {
1017 /*
1018 * PATA slave should be identified first, to allow
1019 * device cable detection on master to work properly.
1020 */
1021 if (i == 0 && (n & ATA_PORTMULTIPLIER) == 0 &&
1022 (n & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << 1)) != 0) {
1023 master = child;
1024 continue;
1025 }
1026 if (ata_getparam(atadev, 1)) {
1027 device_delete_child(dev, child);
1028 free(atadev, M_ATA);
1029 }
1030 }
1031 else
1032 free(atadev, M_ATA);
1033 }
1034 }
1035 if (master) {
1036 atadev = device_get_softc(master);
1037 if (ata_getparam(atadev, 1)) {
1038 device_delete_child(dev, master);
1039 free(atadev, M_ATA);
1040 }
1041 }
1042 bus_generic_probe(dev);
1043 bus_generic_attach(dev);
1044 mtx_unlock(&Giant);
1045 return 0;
1046}
1047#endif
1048
1049void
1050ata_default_registers(device_t dev)
1051{
1052 struct ata_channel *ch = device_get_softc(dev);
1053
1054 /* fill in the defaults from whats setup already */
1055 ch->r_io[ATA_ERROR].res = ch->r_io[ATA_FEATURE].res;
1056 ch->r_io[ATA_ERROR].offset = ch->r_io[ATA_FEATURE].offset;
1057 ch->r_io[ATA_IREASON].res = ch->r_io[ATA_COUNT].res;
1058 ch->r_io[ATA_IREASON].offset = ch->r_io[ATA_COUNT].offset;
1059 ch->r_io[ATA_STATUS].res = ch->r_io[ATA_COMMAND].res;
1060 ch->r_io[ATA_STATUS].offset = ch->r_io[ATA_COMMAND].offset;
1061 ch->r_io[ATA_ALTSTAT].res = ch->r_io[ATA_CONTROL].res;
1062 ch->r_io[ATA_ALTSTAT].offset = ch->r_io[ATA_CONTROL].offset;
1063}
1064
1065void
1066ata_modify_if_48bit(struct ata_request *request)
1067{
1068 struct ata_channel *ch = device_get_softc(request->parent);
1069 struct ata_device *atadev = device_get_softc(request->dev);
1070
1071 request->flags &= ~ATA_R_48BIT;
1072
1073 if (((request->u.ata.lba + request->u.ata.count) >= ATA_MAX_28BIT_LBA ||
1074 request->u.ata.count > 256) &&
1075 atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) {
1076
1077 /* translate command into 48bit version */
1078 switch (request->u.ata.command) {
1079 case ATA_READ:
1080 request->u.ata.command = ATA_READ48;
1081 break;
1082 case ATA_READ_MUL:
1083 request->u.ata.command = ATA_READ_MUL48;
1084 break;
1085 case ATA_READ_DMA:
1086 if (ch->flags & ATA_NO_48BIT_DMA) {
1087 if (request->transfersize > DEV_BSIZE)
1088 request->u.ata.command = ATA_READ_MUL48;
1089 else
1090 request->u.ata.command = ATA_READ48;
1091 request->flags &= ~ATA_R_DMA;
1092 }
1093 else
1094 request->u.ata.command = ATA_READ_DMA48;
1095 break;
1096 case ATA_READ_DMA_QUEUED:
1097 if (ch->flags & ATA_NO_48BIT_DMA) {
1098 if (request->transfersize > DEV_BSIZE)
1099 request->u.ata.command = ATA_READ_MUL48;
1100 else
1101 request->u.ata.command = ATA_READ48;
1102 request->flags &= ~ATA_R_DMA;
1103 }
1104 else
1105 request->u.ata.command = ATA_READ_DMA_QUEUED48;
1106 break;
1107 case ATA_WRITE:
1108 request->u.ata.command = ATA_WRITE48;
1109 break;
1110 case ATA_WRITE_MUL:
1111 request->u.ata.command = ATA_WRITE_MUL48;
1112 break;
1113 case ATA_WRITE_DMA:
1114 if (ch->flags & ATA_NO_48BIT_DMA) {
1115 if (request->transfersize > DEV_BSIZE)
1116 request->u.ata.command = ATA_WRITE_MUL48;
1117 else
1118 request->u.ata.command = ATA_WRITE48;
1119 request->flags &= ~ATA_R_DMA;
1120 }
1121 else
1122 request->u.ata.command = ATA_WRITE_DMA48;
1123 break;
1124 case ATA_WRITE_DMA_QUEUED:
1125 if (ch->flags & ATA_NO_48BIT_DMA) {
1126 if (request->transfersize > DEV_BSIZE)
1127 request->u.ata.command = ATA_WRITE_MUL48;
1128 else
1129 request->u.ata.command = ATA_WRITE48;
1130 request->u.ata.command = ATA_WRITE48;
1131 request->flags &= ~ATA_R_DMA;
1132 }
1133 else
1134 request->u.ata.command = ATA_WRITE_DMA_QUEUED48;
1135 break;
1136 case ATA_FLUSHCACHE:
1137 request->u.ata.command = ATA_FLUSHCACHE48;
1138 break;
1139 case ATA_SET_MAX_ADDRESS:
1140 request->u.ata.command = ATA_SET_MAX_ADDRESS48;
1141 break;
1142 default:
1143 return;
1144 }
1145 request->flags |= ATA_R_48BIT;
1146 }
1147 else if (atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) {
1148
1149 /* translate command into 48bit version */
1150 switch (request->u.ata.command) {
1151 case ATA_FLUSHCACHE:
1152 request->u.ata.command = ATA_FLUSHCACHE48;
1153 break;
1154 case ATA_READ_NATIVE_MAX_ADDRESS:
1155 request->u.ata.command = ATA_READ_NATIVE_MAX_ADDRESS48;
1156 break;
1157 case ATA_SET_MAX_ADDRESS:
1158 request->u.ata.command = ATA_SET_MAX_ADDRESS48;
1159 break;
1160 default:
1161 return;
1162 }
1163 request->flags |= ATA_R_48BIT;
1164 }
1165}
1166
1167void
1168ata_udelay(int interval)
1169{
1170 /* for now just use DELAY, the timer/sleep subsytems are not there yet */
1171 if (1 || interval < (1000000/hz) || ata_delayed_attach)
1172 DELAY(interval);
1173 else
1174 pause("ataslp", interval/(1000000/hz));
1175}
1176
1177char *
1178ata_unit2str(struct ata_device *atadev)
1179{
1180 struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev));
1181 static char str[8];
1182
1183 if (ch->devices & ATA_PORTMULTIPLIER)
1184 sprintf(str, "port%d", atadev->unit);
1185 else
1186 sprintf(str, "%s", atadev->unit == ATA_MASTER ? "master" : "slave");
1187 return str;
1188}
1189
1190const char *
1191ata_mode2str(int mode)
1192{
1193 switch (mode) {
1194 case -1: return "UNSUPPORTED";
1195 case ATA_PIO0: return "PIO0";
1196 case ATA_PIO1: return "PIO1";
1197 case ATA_PIO2: return "PIO2";
1198 case ATA_PIO3: return "PIO3";
1199 case ATA_PIO4: return "PIO4";
1200 case ATA_WDMA0: return "WDMA0";
1201 case ATA_WDMA1: return "WDMA1";
1202 case ATA_WDMA2: return "WDMA2";
1203 case ATA_UDMA0: return "UDMA16";
1204 case ATA_UDMA1: return "UDMA25";
1205 case ATA_UDMA2: return "UDMA33";
1206 case ATA_UDMA3: return "UDMA40";
1207 case ATA_UDMA4: return "UDMA66";
1208 case ATA_UDMA5: return "UDMA100";
1209 case ATA_UDMA6: return "UDMA133";
1210 case ATA_SA150: return "SATA150";
1211 case ATA_SA300: return "SATA300";
1212 default:
1213 if (mode & ATA_DMA_MASK)
1214 return "BIOSDMA";
1215 else
1216 return "BIOSPIO";
1217 }
1218}
1219
1220int
1221ata_str2mode(const char *str)
1222{
1223
1224 if (!strcasecmp(str, "PIO0")) return (ATA_PIO0);
1225 if (!strcasecmp(str, "PIO1")) return (ATA_PIO1);
1226 if (!strcasecmp(str, "PIO2")) return (ATA_PIO2);
1227 if (!strcasecmp(str, "PIO3")) return (ATA_PIO3);
1228 if (!strcasecmp(str, "PIO4")) return (ATA_PIO4);
1229 if (!strcasecmp(str, "WDMA0")) return (ATA_WDMA0);
1230 if (!strcasecmp(str, "WDMA1")) return (ATA_WDMA1);
1231 if (!strcasecmp(str, "WDMA2")) return (ATA_WDMA2);
1232 if (!strcasecmp(str, "UDMA0")) return (ATA_UDMA0);
1233 if (!strcasecmp(str, "UDMA16")) return (ATA_UDMA0);
1234 if (!strcasecmp(str, "UDMA1")) return (ATA_UDMA1);
1235 if (!strcasecmp(str, "UDMA25")) return (ATA_UDMA1);
1236 if (!strcasecmp(str, "UDMA2")) return (ATA_UDMA2);
1237 if (!strcasecmp(str, "UDMA33")) return (ATA_UDMA2);
1238 if (!strcasecmp(str, "UDMA3")) return (ATA_UDMA3);
1239 if (!strcasecmp(str, "UDMA44")) return (ATA_UDMA3);
1240 if (!strcasecmp(str, "UDMA4")) return (ATA_UDMA4);
1241 if (!strcasecmp(str, "UDMA66")) return (ATA_UDMA4);
1242 if (!strcasecmp(str, "UDMA5")) return (ATA_UDMA5);
1243 if (!strcasecmp(str, "UDMA100")) return (ATA_UDMA5);
1244 if (!strcasecmp(str, "UDMA6")) return (ATA_UDMA6);
1245 if (!strcasecmp(str, "UDMA133")) return (ATA_UDMA6);
1246 return (-1);
1247}
1248
1249const char *
1250ata_satarev2str(int rev)
1251{
1252 switch (rev) {
1253 case 0: return "";
1254 case 1: return "SATA 1.5Gb/s";
1255 case 2: return "SATA 3Gb/s";
1256 case 3: return "SATA 6Gb/s";
1257 case 0xff: return "SATA";
1258 default: return "???";
1259 }
1260}
1261
1262int
1263ata_atapi(device_t dev, int target)
1264{
1265 struct ata_channel *ch = device_get_softc(dev);
1266
1267 return (ch->devices & (ATA_ATAPI_MASTER << target));
1268}
1269
1270int
1271ata_pmode(struct ata_params *ap)
1272{
1273 if (ap->atavalid & ATA_FLAG_64_70) {
1274 if (ap->apiomodes & 0x02)
1275 return ATA_PIO4;
1276 if (ap->apiomodes & 0x01)
1277 return ATA_PIO3;
1278 }
1279 if (ap->mwdmamodes & 0x04)
1280 return ATA_PIO4;
1281 if (ap->mwdmamodes & 0x02)
1282 return ATA_PIO3;
1283 if (ap->mwdmamodes & 0x01)
1284 return ATA_PIO2;
1285 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x200)
1286 return ATA_PIO2;
1287 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x100)
1288 return ATA_PIO1;
1289 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x000)
1290 return ATA_PIO0;
1291 return ATA_PIO0;
1292}
1293
1294int
1295ata_wmode(struct ata_params *ap)
1296{
1297 if (ap->mwdmamodes & 0x04)
1298 return ATA_WDMA2;
1299 if (ap->mwdmamodes & 0x02)
1300 return ATA_WDMA1;
1301 if (ap->mwdmamodes & 0x01)
1302 return ATA_WDMA0;
1303 return -1;
1304}
1305
1306int
1307ata_umode(struct ata_params *ap)
1308{
1309 if (ap->atavalid & ATA_FLAG_88) {
1310 if (ap->udmamodes & 0x40)
1311 return ATA_UDMA6;
1312 if (ap->udmamodes & 0x20)
1313 return ATA_UDMA5;
1314 if (ap->udmamodes & 0x10)
1315 return ATA_UDMA4;
1316 if (ap->udmamodes & 0x08)
1317 return ATA_UDMA3;
1318 if (ap->udmamodes & 0x04)
1319 return ATA_UDMA2;
1320 if (ap->udmamodes & 0x02)
1321 return ATA_UDMA1;
1322 if (ap->udmamodes & 0x01)
1323 return ATA_UDMA0;
1324 }
1325 return -1;
1326}
1327
1328int
1329ata_limit_mode(device_t dev, int mode, int maxmode)
1330{
1331 struct ata_device *atadev = device_get_softc(dev);
1332
1333 if (maxmode && mode > maxmode)
1334 mode = maxmode;
1335
1336 if (mode >= ATA_UDMA0 && ata_umode(&atadev->param) > 0)
1337 return min(mode, ata_umode(&atadev->param));
1338
1339 if (mode >= ATA_WDMA0 && ata_wmode(&atadev->param) > 0)
1340 return min(mode, ata_wmode(&atadev->param));
1341
1342 if (mode > ata_pmode(&atadev->param))
1343 return min(mode, ata_pmode(&atadev->param));
1344
1345 return mode;
1346}
1347
1348static void
1349bswap(int8_t *buf, int len)
1350{
1351 u_int16_t *ptr = (u_int16_t*)(buf + len);
1352
1353 while (--ptr >= (u_int16_t*)buf)
1354 *ptr = ntohs(*ptr);
1355}
1356
1357static void
1358btrim(int8_t *buf, int len)
1359{
1360 int8_t *ptr;
1361
1362 for (ptr = buf; ptr < buf+len; ++ptr)
1363 if (!*ptr || *ptr == '_')
1364 *ptr = ' ';
1365 for (ptr = buf + len - 1; ptr >= buf && *ptr == ' '; --ptr)
1366 *ptr = 0;
1367}
1368
1369static void
1370bpack(int8_t *src, int8_t *dst, int len)
1371{
1372 int i, j, blank;
1373
1374 for (i = j = blank = 0 ; i < len; i++) {
1375 if (blank && src[i] == ' ') continue;
1376 if (blank && src[i] != ' ') {
1377 dst[j++] = src[i];
1378 blank = 0;
1379 continue;
1380 }
1381 if (src[i] == ' ') {
1382 blank = 1;
1383 if (i == 0)
1384 continue;
1385 }
1386 dst[j++] = src[i];
1387 }
1388 if (j < len)
1389 dst[j] = 0x00;
1390}
1391
1392#ifdef ATA_CAM
1393void
1394ata_cam_begin_transaction(device_t dev, union ccb *ccb)
1395{
1396 struct ata_channel *ch = device_get_softc(dev);
1397 struct ata_request *request;
1398
1399 if (!(request = ata_alloc_request())) {
1400 device_printf(dev, "FAILURE - out of memory in start\n");
1401 ccb->ccb_h.status = CAM_REQ_INVALID;
1402 xpt_done(ccb);
1403 return;
1404 }
1405 bzero(request, sizeof(*request));
1406
1407 /* setup request */
1408 request->dev = NULL;
1409 request->parent = dev;
1410 request->unit = ccb->ccb_h.target_id;
1411 if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1412 request->data = ccb->ataio.data_ptr;
1413 request->bytecount = ccb->ataio.dxfer_len;
1414 request->u.ata.command = ccb->ataio.cmd.command;
1415 request->u.ata.feature = ((uint16_t)ccb->ataio.cmd.features_exp << 8) |
1416 (uint16_t)ccb->ataio.cmd.features;
1417 request->u.ata.count = ((uint16_t)ccb->ataio.cmd.sector_count_exp << 8) |
1418 (uint16_t)ccb->ataio.cmd.sector_count;
1419 if (ccb->ataio.cmd.flags & CAM_ATAIO_48BIT) {
1420 request->flags |= ATA_R_48BIT;
1421 request->u.ata.lba =
1422 ((uint64_t)ccb->ataio.cmd.lba_high_exp << 40) |
1423 ((uint64_t)ccb->ataio.cmd.lba_mid_exp << 32) |
1424 ((uint64_t)ccb->ataio.cmd.lba_low_exp << 24);
1425 } else {
1426 request->u.ata.lba =
1427 ((uint64_t)(ccb->ataio.cmd.device & 0x0f) << 24);
1428 }
1429 request->u.ata.lba |= ((uint64_t)ccb->ataio.cmd.lba_high << 16) |
1430 ((uint64_t)ccb->ataio.cmd.lba_mid << 8) |
1431 (uint64_t)ccb->ataio.cmd.lba_low;
1432 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
1433 ccb->ataio.cmd.flags & CAM_ATAIO_DMA)
1434 request->flags |= ATA_R_DMA;
1435 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1436 request->flags |= ATA_R_READ;
1437 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1438 request->flags |= ATA_R_WRITE;
1439 } else {
1440 request->data = ccb->csio.data_ptr;
1441 request->bytecount = ccb->csio.dxfer_len;
1442 bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
1443 ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes,
1444 request->u.atapi.ccb, ccb->csio.cdb_len);
1445 request->flags |= ATA_R_ATAPI;
1446 if (ch->curr[ccb->ccb_h.target_id].atapi == 16)
1447 request->flags |= ATA_R_ATAPI16;
1448 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
1449 ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA)
1450 request->flags |= ATA_R_DMA;
1451 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1452 request->flags |= ATA_R_READ;
1453 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1454 request->flags |= ATA_R_WRITE;
1455 }
1456 request->transfersize = min(request->bytecount,
1457 ch->curr[ccb->ccb_h.target_id].bytecount);
1458 request->retries = 0;
1459 request->timeout = (ccb->ccb_h.timeout + 999) / 1000;
1460 callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED);
1461 request->ccb = ccb;
1462
1463 ch->running = request;
1464 ch->state = ATA_ACTIVE;
1465 if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) {
1466 ch->running = NULL;
1467 ch->state = ATA_IDLE;
1468 ata_cam_end_transaction(dev, request);
1469 return;
1470 }
1471}
1472
1473void
1474ata_cam_end_transaction(device_t dev, struct ata_request *request)
1475{
1476 struct ata_channel *ch = device_get_softc(dev);
1477 union ccb *ccb = request->ccb;
1478 int fatalerr = 0;
1479
1480 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1481 if (request->flags & ATA_R_TIMEOUT) {
1482 xpt_freeze_simq(ch->sim, 1);
1483 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1484 ccb->ccb_h.status |= CAM_CMD_TIMEOUT | CAM_RELEASE_SIMQ;
1485 fatalerr = 1;
1486 } else if (request->status & ATA_S_ERROR) {
1487 if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1488 ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR;
1489 } else {
1490 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1491 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
1492 }
1493 } else if (request->result == ERESTART)
1494 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1495 else if (request->result != 0)
1496 ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
1497 else
1498 ccb->ccb_h.status |= CAM_REQ_CMP;
1499 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP &&
1500 !(ccb->ccb_h.status & CAM_DEV_QFRZN)) {
1501 xpt_freeze_devq(ccb->ccb_h.path, 1);
1502 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1503 }
1504 if (ccb->ccb_h.func_code == XPT_ATA_IO &&
1505 ((request->status & ATA_S_ERROR) ||
1506 (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT))) {
1507 struct ata_res *res = &ccb->ataio.res;
1508 res->status = request->status;
1509 res->error = request->error;
1510 res->lba_low = request->u.ata.lba;
1511 res->lba_mid = request->u.ata.lba >> 8;
1512 res->lba_high = request->u.ata.lba >> 16;
1513 res->device = request->u.ata.lba >> 24;
1514 res->lba_low_exp = request->u.ata.lba >> 24;
1515 res->lba_mid_exp = request->u.ata.lba >> 32;
1516 res->lba_high_exp = request->u.ata.lba >> 40;
1517 res->sector_count = request->u.ata.count;
1518 res->sector_count_exp = request->u.ata.count >> 8;
1519 }
1520 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1521 if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1522 ccb->ataio.resid =
1523 ccb->ataio.dxfer_len - request->donecount;
1524 } else {
1525 ccb->csio.resid =
1526 ccb->csio.dxfer_len - request->donecount;
1527 }
1528 }
1520 ata_free_request(request);
1521 xpt_done(ccb);
1522 /* Do error recovery if needed. */
1523 if (fatalerr)
1524 ata_reinit(dev);
1525}
1526
1527static int
1528ata_check_ids(device_t dev, union ccb *ccb)
1529{
1530 struct ata_channel *ch = device_get_softc(dev);
1531
1532 if (ccb->ccb_h.target_id > ((ch->flags & ATA_NO_SLAVE) ? 0 : 1)) {
1533 ccb->ccb_h.status = CAM_TID_INVALID;
1534 xpt_done(ccb);
1535 return (-1);
1536 }
1537 if (ccb->ccb_h.target_lun != 0) {
1538 ccb->ccb_h.status = CAM_LUN_INVALID;
1539 xpt_done(ccb);
1540 return (-1);
1541 }
1542 return (0);
1543}
1544
1545static void
1546ataaction(struct cam_sim *sim, union ccb *ccb)
1547{
1548 device_t dev, parent;
1549 struct ata_channel *ch;
1550
1551 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ataaction func_code=%x\n",
1552 ccb->ccb_h.func_code));
1553
1554 ch = (struct ata_channel *)cam_sim_softc(sim);
1555 dev = ch->dev;
1556 switch (ccb->ccb_h.func_code) {
1557 /* Common cases first */
1558 case XPT_ATA_IO: /* Execute the requested I/O operation */
1559 case XPT_SCSI_IO:
1560 if (ata_check_ids(dev, ccb))
1561 return;
1562 if ((ch->devices & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER)
1563 << ccb->ccb_h.target_id)) == 0) {
1564 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1565 break;
1566 }
1567 if (ch->running)
1568 device_printf(dev, "already running!\n");
1569 if (ccb->ccb_h.func_code == XPT_ATA_IO &&
1570 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) &&
1571 (ccb->ataio.cmd.control & ATA_A_RESET)) {
1572 struct ata_res *res = &ccb->ataio.res;
1573
1574 bzero(res, sizeof(*res));
1575 if (ch->devices & (ATA_ATA_MASTER << ccb->ccb_h.target_id)) {
1576 res->lba_high = 0;
1577 res->lba_mid = 0;
1578 } else {
1579 res->lba_high = 0xeb;
1580 res->lba_mid = 0x14;
1581 }
1582 ccb->ccb_h.status = CAM_REQ_CMP;
1583 break;
1584 }
1585 ata_cam_begin_transaction(dev, ccb);
1586 return;
1587 case XPT_EN_LUN: /* Enable LUN as a target */
1588 case XPT_TARGET_IO: /* Execute target I/O request */
1589 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */
1590 case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/
1591 case XPT_ABORT: /* Abort the specified CCB */
1592 /* XXX Implement */
1593 ccb->ccb_h.status = CAM_REQ_INVALID;
1594 break;
1595 case XPT_SET_TRAN_SETTINGS:
1596 {
1597 struct ccb_trans_settings *cts = &ccb->cts;
1598 struct ata_cam_device *d;
1599
1600 if (ata_check_ids(dev, ccb))
1601 return;
1602 if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
1603 d = &ch->curr[ccb->ccb_h.target_id];
1604 else
1605 d = &ch->user[ccb->ccb_h.target_id];
1606 if (ch->flags & ATA_SATA) {
1607 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION)
1608 d->revision = cts->xport_specific.sata.revision;
1609 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) {
1610 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1611 d->mode = ATA_SETMODE(ch->dev,
1612 ccb->ccb_h.target_id,
1613 cts->xport_specific.sata.mode);
1614 } else
1615 d->mode = cts->xport_specific.sata.mode;
1616 }
1617 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT)
1618 d->bytecount = min(8192, cts->xport_specific.sata.bytecount);
1619 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI)
1620 d->atapi = cts->xport_specific.sata.atapi;
1621 } else {
1622 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_MODE) {
1623 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1624 d->mode = ATA_SETMODE(ch->dev,
1625 ccb->ccb_h.target_id,
1626 cts->xport_specific.ata.mode);
1627 } else
1628 d->mode = cts->xport_specific.ata.mode;
1629 }
1630 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_BYTECOUNT)
1631 d->bytecount = cts->xport_specific.ata.bytecount;
1632 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_ATAPI)
1633 d->atapi = cts->xport_specific.ata.atapi;
1634 }
1635 ccb->ccb_h.status = CAM_REQ_CMP;
1636 break;
1637 }
1638 case XPT_GET_TRAN_SETTINGS:
1639 {
1640 struct ccb_trans_settings *cts = &ccb->cts;
1641 struct ata_cam_device *d;
1642
1643 if (ata_check_ids(dev, ccb))
1644 return;
1645 if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
1646 d = &ch->curr[ccb->ccb_h.target_id];
1647 else
1648 d = &ch->user[ccb->ccb_h.target_id];
1649 cts->protocol = PROTO_ATA;
1650 cts->protocol_version = PROTO_VERSION_UNSPECIFIED;
1651 if (ch->flags & ATA_SATA) {
1652 cts->transport = XPORT_SATA;
1653 cts->transport_version = XPORT_VERSION_UNSPECIFIED;
1654 cts->xport_specific.sata.valid = 0;
1655 cts->xport_specific.sata.mode = d->mode;
1656 cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE;
1657 cts->xport_specific.sata.bytecount = d->bytecount;
1658 cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT;
1659 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1660 cts->xport_specific.sata.revision =
1661 ATA_GETREV(dev, ccb->ccb_h.target_id);
1662 if (cts->xport_specific.sata.revision != 0xff) {
1663 cts->xport_specific.sata.valid |=
1664 CTS_SATA_VALID_REVISION;
1665 }
1666 } else {
1667 cts->xport_specific.sata.revision = d->revision;
1668 cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION;
1669 }
1670 cts->xport_specific.sata.atapi = d->atapi;
1671 cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI;
1672 } else {
1673 cts->transport = XPORT_ATA;
1674 cts->transport_version = XPORT_VERSION_UNSPECIFIED;
1675 cts->xport_specific.ata.valid = 0;
1676 cts->xport_specific.ata.mode = d->mode;
1677 cts->xport_specific.ata.valid |= CTS_ATA_VALID_MODE;
1678 cts->xport_specific.ata.bytecount = d->bytecount;
1679 cts->xport_specific.ata.valid |= CTS_ATA_VALID_BYTECOUNT;
1680 cts->xport_specific.ata.atapi = d->atapi;
1681 cts->xport_specific.ata.valid |= CTS_ATA_VALID_ATAPI;
1682 }
1683 ccb->ccb_h.status = CAM_REQ_CMP;
1684 break;
1685 }
1686 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
1687 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
1688 ata_reinit(dev);
1689 ccb->ccb_h.status = CAM_REQ_CMP;
1690 break;
1691 case XPT_TERM_IO: /* Terminate the I/O process */
1692 /* XXX Implement */
1693 ccb->ccb_h.status = CAM_REQ_INVALID;
1694 break;
1695 case XPT_PATH_INQ: /* Path routing inquiry */
1696 {
1697 struct ccb_pathinq *cpi = &ccb->cpi;
1698
1699 parent = device_get_parent(dev);
1700 cpi->version_num = 1; /* XXX??? */
1701 cpi->hba_inquiry = PI_SDTR_ABLE;
1702 cpi->target_sprt = 0;
1703 cpi->hba_misc = PIM_SEQSCAN;
1704 cpi->hba_eng_cnt = 0;
1705 if (ch->flags & ATA_NO_SLAVE)
1706 cpi->max_target = 0;
1707 else
1708 cpi->max_target = 1;
1709 cpi->max_lun = 0;
1710 cpi->initiator_id = 0;
1711 cpi->bus_id = cam_sim_bus(sim);
1712 if (ch->flags & ATA_SATA)
1713 cpi->base_transfer_speed = 150000;
1714 else
1715 cpi->base_transfer_speed = 3300;
1716 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1717 strncpy(cpi->hba_vid, "ATA", HBA_IDLEN);
1718 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1719 cpi->unit_number = cam_sim_unit(sim);
1720 if (ch->flags & ATA_SATA)
1721 cpi->transport = XPORT_SATA;
1722 else
1723 cpi->transport = XPORT_ATA;
1724 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
1725 cpi->protocol = PROTO_ATA;
1726 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
1727 cpi->maxio = ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS;
1728 if (device_get_devclass(device_get_parent(parent)) ==
1729 devclass_find("pci")) {
1730 cpi->hba_vendor = pci_get_vendor(parent);
1731 cpi->hba_device = pci_get_device(parent);
1732 cpi->hba_subvendor = pci_get_subvendor(parent);
1733 cpi->hba_subdevice = pci_get_subdevice(parent);
1734 }
1735 cpi->ccb_h.status = CAM_REQ_CMP;
1736 break;
1737 }
1738 default:
1739 ccb->ccb_h.status = CAM_REQ_INVALID;
1740 break;
1741 }
1742 xpt_done(ccb);
1743}
1744
1745static void
1746atapoll(struct cam_sim *sim)
1747{
1748 struct ata_channel *ch = (struct ata_channel *)cam_sim_softc(sim);
1749
1750 ata_interrupt_locked(ch);
1751}
1752#endif
1753
1754/*
1755 * module handeling
1756 */
1757static int
1758ata_module_event_handler(module_t mod, int what, void *arg)
1759{
1760#ifndef ATA_CAM
1761 static struct cdev *atacdev;
1762#endif
1763
1764 switch (what) {
1765 case MOD_LOAD:
1766#ifndef ATA_CAM
1767 /* register controlling device */
1768 atacdev = make_dev(&ata_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "ata");
1769
1770 if (cold) {
1771 /* register boot attach to be run when interrupts are enabled */
1772 if (!(ata_delayed_attach = (struct intr_config_hook *)
1773 malloc(sizeof(struct intr_config_hook),
1774 M_TEMP, M_NOWAIT | M_ZERO))) {
1775 printf("ata: malloc of delayed attach hook failed\n");
1776 return EIO;
1777 }
1778 ata_delayed_attach->ich_func = (void*)ata_boot_attach;
1779 if (config_intrhook_establish(ata_delayed_attach) != 0) {
1780 printf("ata: config_intrhook_establish failed\n");
1781 free(ata_delayed_attach, M_TEMP);
1782 }
1783 }
1784#endif
1785 return 0;
1786
1787 case MOD_UNLOAD:
1788#ifndef ATA_CAM
1789 /* deregister controlling device */
1790 destroy_dev(atacdev);
1791#endif
1792 return 0;
1793
1794 default:
1795 return EOPNOTSUPP;
1796 }
1797}
1798
1799static moduledata_t ata_moduledata = { "ata", ata_module_event_handler, NULL };
1800DECLARE_MODULE(ata, ata_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
1801MODULE_VERSION(ata, 1);
1802#ifdef ATA_CAM
1803MODULE_DEPEND(ata, cam, 1, 1, 1);
1804#endif
1805
1806static void
1807ata_init(void)
1808{
1809 ata_request_zone = uma_zcreate("ata_request", sizeof(struct ata_request),
1810 NULL, NULL, NULL, NULL, 0, 0);
1811 ata_composite_zone = uma_zcreate("ata_composite",
1812 sizeof(struct ata_composite),
1813 NULL, NULL, NULL, NULL, 0, 0);
1814}
1815SYSINIT(ata_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_init, NULL);
1816
1817static void
1818ata_uninit(void)
1819{
1820 uma_zdestroy(ata_composite_zone);
1821 uma_zdestroy(ata_request_zone);
1822}
1823SYSUNINIT(ata_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_uninit, NULL);
1529 ata_free_request(request);
1530 xpt_done(ccb);
1531 /* Do error recovery if needed. */
1532 if (fatalerr)
1533 ata_reinit(dev);
1534}
1535
1536static int
1537ata_check_ids(device_t dev, union ccb *ccb)
1538{
1539 struct ata_channel *ch = device_get_softc(dev);
1540
1541 if (ccb->ccb_h.target_id > ((ch->flags & ATA_NO_SLAVE) ? 0 : 1)) {
1542 ccb->ccb_h.status = CAM_TID_INVALID;
1543 xpt_done(ccb);
1544 return (-1);
1545 }
1546 if (ccb->ccb_h.target_lun != 0) {
1547 ccb->ccb_h.status = CAM_LUN_INVALID;
1548 xpt_done(ccb);
1549 return (-1);
1550 }
1551 return (0);
1552}
1553
1554static void
1555ataaction(struct cam_sim *sim, union ccb *ccb)
1556{
1557 device_t dev, parent;
1558 struct ata_channel *ch;
1559
1560 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ataaction func_code=%x\n",
1561 ccb->ccb_h.func_code));
1562
1563 ch = (struct ata_channel *)cam_sim_softc(sim);
1564 dev = ch->dev;
1565 switch (ccb->ccb_h.func_code) {
1566 /* Common cases first */
1567 case XPT_ATA_IO: /* Execute the requested I/O operation */
1568 case XPT_SCSI_IO:
1569 if (ata_check_ids(dev, ccb))
1570 return;
1571 if ((ch->devices & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER)
1572 << ccb->ccb_h.target_id)) == 0) {
1573 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1574 break;
1575 }
1576 if (ch->running)
1577 device_printf(dev, "already running!\n");
1578 if (ccb->ccb_h.func_code == XPT_ATA_IO &&
1579 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) &&
1580 (ccb->ataio.cmd.control & ATA_A_RESET)) {
1581 struct ata_res *res = &ccb->ataio.res;
1582
1583 bzero(res, sizeof(*res));
1584 if (ch->devices & (ATA_ATA_MASTER << ccb->ccb_h.target_id)) {
1585 res->lba_high = 0;
1586 res->lba_mid = 0;
1587 } else {
1588 res->lba_high = 0xeb;
1589 res->lba_mid = 0x14;
1590 }
1591 ccb->ccb_h.status = CAM_REQ_CMP;
1592 break;
1593 }
1594 ata_cam_begin_transaction(dev, ccb);
1595 return;
1596 case XPT_EN_LUN: /* Enable LUN as a target */
1597 case XPT_TARGET_IO: /* Execute target I/O request */
1598 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */
1599 case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/
1600 case XPT_ABORT: /* Abort the specified CCB */
1601 /* XXX Implement */
1602 ccb->ccb_h.status = CAM_REQ_INVALID;
1603 break;
1604 case XPT_SET_TRAN_SETTINGS:
1605 {
1606 struct ccb_trans_settings *cts = &ccb->cts;
1607 struct ata_cam_device *d;
1608
1609 if (ata_check_ids(dev, ccb))
1610 return;
1611 if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
1612 d = &ch->curr[ccb->ccb_h.target_id];
1613 else
1614 d = &ch->user[ccb->ccb_h.target_id];
1615 if (ch->flags & ATA_SATA) {
1616 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION)
1617 d->revision = cts->xport_specific.sata.revision;
1618 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) {
1619 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1620 d->mode = ATA_SETMODE(ch->dev,
1621 ccb->ccb_h.target_id,
1622 cts->xport_specific.sata.mode);
1623 } else
1624 d->mode = cts->xport_specific.sata.mode;
1625 }
1626 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT)
1627 d->bytecount = min(8192, cts->xport_specific.sata.bytecount);
1628 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI)
1629 d->atapi = cts->xport_specific.sata.atapi;
1630 } else {
1631 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_MODE) {
1632 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1633 d->mode = ATA_SETMODE(ch->dev,
1634 ccb->ccb_h.target_id,
1635 cts->xport_specific.ata.mode);
1636 } else
1637 d->mode = cts->xport_specific.ata.mode;
1638 }
1639 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_BYTECOUNT)
1640 d->bytecount = cts->xport_specific.ata.bytecount;
1641 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_ATAPI)
1642 d->atapi = cts->xport_specific.ata.atapi;
1643 }
1644 ccb->ccb_h.status = CAM_REQ_CMP;
1645 break;
1646 }
1647 case XPT_GET_TRAN_SETTINGS:
1648 {
1649 struct ccb_trans_settings *cts = &ccb->cts;
1650 struct ata_cam_device *d;
1651
1652 if (ata_check_ids(dev, ccb))
1653 return;
1654 if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
1655 d = &ch->curr[ccb->ccb_h.target_id];
1656 else
1657 d = &ch->user[ccb->ccb_h.target_id];
1658 cts->protocol = PROTO_ATA;
1659 cts->protocol_version = PROTO_VERSION_UNSPECIFIED;
1660 if (ch->flags & ATA_SATA) {
1661 cts->transport = XPORT_SATA;
1662 cts->transport_version = XPORT_VERSION_UNSPECIFIED;
1663 cts->xport_specific.sata.valid = 0;
1664 cts->xport_specific.sata.mode = d->mode;
1665 cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE;
1666 cts->xport_specific.sata.bytecount = d->bytecount;
1667 cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT;
1668 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1669 cts->xport_specific.sata.revision =
1670 ATA_GETREV(dev, ccb->ccb_h.target_id);
1671 if (cts->xport_specific.sata.revision != 0xff) {
1672 cts->xport_specific.sata.valid |=
1673 CTS_SATA_VALID_REVISION;
1674 }
1675 } else {
1676 cts->xport_specific.sata.revision = d->revision;
1677 cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION;
1678 }
1679 cts->xport_specific.sata.atapi = d->atapi;
1680 cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI;
1681 } else {
1682 cts->transport = XPORT_ATA;
1683 cts->transport_version = XPORT_VERSION_UNSPECIFIED;
1684 cts->xport_specific.ata.valid = 0;
1685 cts->xport_specific.ata.mode = d->mode;
1686 cts->xport_specific.ata.valid |= CTS_ATA_VALID_MODE;
1687 cts->xport_specific.ata.bytecount = d->bytecount;
1688 cts->xport_specific.ata.valid |= CTS_ATA_VALID_BYTECOUNT;
1689 cts->xport_specific.ata.atapi = d->atapi;
1690 cts->xport_specific.ata.valid |= CTS_ATA_VALID_ATAPI;
1691 }
1692 ccb->ccb_h.status = CAM_REQ_CMP;
1693 break;
1694 }
1695 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
1696 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
1697 ata_reinit(dev);
1698 ccb->ccb_h.status = CAM_REQ_CMP;
1699 break;
1700 case XPT_TERM_IO: /* Terminate the I/O process */
1701 /* XXX Implement */
1702 ccb->ccb_h.status = CAM_REQ_INVALID;
1703 break;
1704 case XPT_PATH_INQ: /* Path routing inquiry */
1705 {
1706 struct ccb_pathinq *cpi = &ccb->cpi;
1707
1708 parent = device_get_parent(dev);
1709 cpi->version_num = 1; /* XXX??? */
1710 cpi->hba_inquiry = PI_SDTR_ABLE;
1711 cpi->target_sprt = 0;
1712 cpi->hba_misc = PIM_SEQSCAN;
1713 cpi->hba_eng_cnt = 0;
1714 if (ch->flags & ATA_NO_SLAVE)
1715 cpi->max_target = 0;
1716 else
1717 cpi->max_target = 1;
1718 cpi->max_lun = 0;
1719 cpi->initiator_id = 0;
1720 cpi->bus_id = cam_sim_bus(sim);
1721 if (ch->flags & ATA_SATA)
1722 cpi->base_transfer_speed = 150000;
1723 else
1724 cpi->base_transfer_speed = 3300;
1725 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1726 strncpy(cpi->hba_vid, "ATA", HBA_IDLEN);
1727 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1728 cpi->unit_number = cam_sim_unit(sim);
1729 if (ch->flags & ATA_SATA)
1730 cpi->transport = XPORT_SATA;
1731 else
1732 cpi->transport = XPORT_ATA;
1733 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
1734 cpi->protocol = PROTO_ATA;
1735 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
1736 cpi->maxio = ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS;
1737 if (device_get_devclass(device_get_parent(parent)) ==
1738 devclass_find("pci")) {
1739 cpi->hba_vendor = pci_get_vendor(parent);
1740 cpi->hba_device = pci_get_device(parent);
1741 cpi->hba_subvendor = pci_get_subvendor(parent);
1742 cpi->hba_subdevice = pci_get_subdevice(parent);
1743 }
1744 cpi->ccb_h.status = CAM_REQ_CMP;
1745 break;
1746 }
1747 default:
1748 ccb->ccb_h.status = CAM_REQ_INVALID;
1749 break;
1750 }
1751 xpt_done(ccb);
1752}
1753
1754static void
1755atapoll(struct cam_sim *sim)
1756{
1757 struct ata_channel *ch = (struct ata_channel *)cam_sim_softc(sim);
1758
1759 ata_interrupt_locked(ch);
1760}
1761#endif
1762
1763/*
1764 * module handeling
1765 */
1766static int
1767ata_module_event_handler(module_t mod, int what, void *arg)
1768{
1769#ifndef ATA_CAM
1770 static struct cdev *atacdev;
1771#endif
1772
1773 switch (what) {
1774 case MOD_LOAD:
1775#ifndef ATA_CAM
1776 /* register controlling device */
1777 atacdev = make_dev(&ata_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "ata");
1778
1779 if (cold) {
1780 /* register boot attach to be run when interrupts are enabled */
1781 if (!(ata_delayed_attach = (struct intr_config_hook *)
1782 malloc(sizeof(struct intr_config_hook),
1783 M_TEMP, M_NOWAIT | M_ZERO))) {
1784 printf("ata: malloc of delayed attach hook failed\n");
1785 return EIO;
1786 }
1787 ata_delayed_attach->ich_func = (void*)ata_boot_attach;
1788 if (config_intrhook_establish(ata_delayed_attach) != 0) {
1789 printf("ata: config_intrhook_establish failed\n");
1790 free(ata_delayed_attach, M_TEMP);
1791 }
1792 }
1793#endif
1794 return 0;
1795
1796 case MOD_UNLOAD:
1797#ifndef ATA_CAM
1798 /* deregister controlling device */
1799 destroy_dev(atacdev);
1800#endif
1801 return 0;
1802
1803 default:
1804 return EOPNOTSUPP;
1805 }
1806}
1807
1808static moduledata_t ata_moduledata = { "ata", ata_module_event_handler, NULL };
1809DECLARE_MODULE(ata, ata_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
1810MODULE_VERSION(ata, 1);
1811#ifdef ATA_CAM
1812MODULE_DEPEND(ata, cam, 1, 1, 1);
1813#endif
1814
1815static void
1816ata_init(void)
1817{
1818 ata_request_zone = uma_zcreate("ata_request", sizeof(struct ata_request),
1819 NULL, NULL, NULL, NULL, 0, 0);
1820 ata_composite_zone = uma_zcreate("ata_composite",
1821 sizeof(struct ata_composite),
1822 NULL, NULL, NULL, NULL, 0, 0);
1823}
1824SYSINIT(ata_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_init, NULL);
1825
1826static void
1827ata_uninit(void)
1828{
1829 uma_zdestroy(ata_composite_zone);
1830 uma_zdestroy(ata_request_zone);
1831}
1832SYSUNINIT(ata_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_uninit, NULL);