Lines Matching defs:ahb

28  * $FreeBSD: releng/10.3/sys/dev/ahb/ahb.c 281826 2015-04-21 11:27:50Z mav $
55 #include <dev/ahb/ahbreg.h>
60 #define ahb_inb(ahb, port) \
61 bus_read_1((ahb)->res, port)
63 #define ahb_inl(ahb, port) \
64 bus_read_4((ahb)->res, port)
66 #define ahb_outb(ahb, port, value) \
67 bus_write_1((ahb)->res, port, value)
69 #define ahb_outl(ahb, port, value) \
70 bus_write_4((ahb)->res, port, value)
74 static void ahbfree(struct ahb_softc *ahb);
75 static int ahbreset(struct ahb_softc *ahb);
78 static int ahbxptattach(struct ahb_softc *ahb);
79 static void ahbhandleimmed(struct ahb_softc *ahb,
81 static void ahbcalcresid(struct ahb_softc *ahb,
83 static __inline void ahbdone(struct ahb_softc *ahb, u_int32_t mbox,
86 static void ahbintr_locked(struct ahb_softc *ahb);
94 static __inline struct ecb* ahbecbget(struct ahb_softc *ahb);
95 static __inline void ahbecbfree(struct ahb_softc* ahb,
97 static __inline u_int32_t ahbecbvtop(struct ahb_softc *ahb,
99 static __inline struct ecb* ahbecbptov(struct ahb_softc *ahb,
104 static __inline void ahbqueuembox(struct ahb_softc *ahb,
109 ahbecbget(struct ahb_softc *ahb)
114 mtx_assert(&ahb->lock, MA_OWNED);
115 if ((ecb = SLIST_FIRST(&ahb->free_ecbs)) != NULL)
116 SLIST_REMOVE_HEAD(&ahb->free_ecbs, links);
122 ahbecbfree(struct ahb_softc* ahb, struct ecb* ecb)
126 mtx_assert(&ahb->lock, MA_OWNED);
128 SLIST_INSERT_HEAD(&ahb->free_ecbs, ecb, links);
132 ahbecbvtop(struct ahb_softc *ahb, struct ecb *ecb)
134 return (ahb->ecb_physbase
135 + (u_int32_t)((caddr_t)ecb - (caddr_t)ahb->ecb_array));
139 ahbecbptov(struct ahb_softc *ahb, u_int32_t ecb_addr)
141 return (ahb->ecb_array
143 - (struct ecb*)(uintptr_t)ahb->ecb_physbase));
165 ahbqueuembox(struct ahb_softc *ahb, u_int32_t mboxval, u_int attn_code)
171 status = ahb_inb(ahb, HOSTSTAT);
179 device_get_nameunit(ahb->dev));
181 ahb_outl(ahb, MBOXOUT0, mboxval);
182 ahb_outb(ahb, ATTN, attn_code);
261 struct ahb_softc *ahb;
276 ahb = ahballoc(dev, io);
278 if (ahbreset(ahb) != 0)
306 /* lockarg */ &ahb->lock,
307 &ahb->buffer_dmat) != 0)
310 ahb->init_level++;
322 + sizeof(*ahb->ha_inq_data),
328 &ahb->ecb_dmat) != 0)
331 ahb->init_level++;
334 if (bus_dmamem_alloc(ahb->ecb_dmat, (void **)&ahb->ecb_array,
335 BUS_DMA_NOWAIT, &ahb->ecb_dmamap) != 0)
338 ahb->ha_inq_data = (struct ha_inquiry_data *)&ahb->ecb_array[AHB_NECB];
340 ahb->init_level++;
343 bus_dmamap_load(ahb->ecb_dmat, ahb->ecb_dmamap,
344 ahb->ecb_array, AHB_NSEG * sizeof(struct ecb),
345 ahbmapecbs, ahb, /*flags*/0);
347 ahb->init_level++;
350 bzero(ahb->ecb_array, (AHB_NECB * sizeof(struct ecb))
351 + sizeof(*ahb->ha_inq_data));
352 next_ecb = ahb->ecb_array;
353 while (ahb->num_ecbs < AHB_NECB) {
356 if (bus_dmamap_create(ahb->buffer_dmat, /*flags*/0,
359 callout_init_mtx(&next_ecb->timer, &ahb->lock, 0);
360 ecb_paddr = ahbecbvtop(ahb, next_ecb);
363 ahb->num_ecbs++;
364 ahbecbfree(ahb, next_ecb);
368 ahb->init_level++;
374 if (ahbxptattach(ahb))
379 NULL, ahbintr, ahb, &ih) != 0)
390 ahbfree(ahb);
400 struct ahb_softc *ahb;
402 ahb = device_get_softc(dev);
403 SLIST_INIT(&ahb->free_ecbs);
404 LIST_INIT(&ahb->pending_ccbs);
405 ahb->res = res;
406 ahb->disc_permitted = ~0;
407 ahb->tags_permitted = ~0;
408 ahb->dev = dev;
409 mtx_init(&ahb->lock, "ahb", NULL, MTX_DEF);
411 return (ahb);
415 ahbfree(struct ahb_softc *ahb)
417 switch (ahb->init_level) {
420 bus_dmamap_unload(ahb->ecb_dmat, ahb->ecb_dmamap);
422 bus_dmamem_free(ahb->ecb_dmat, ahb->ecb_array,
423 ahb->ecb_dmamap);
424 bus_dmamap_destroy(ahb->ecb_dmat, ahb->ecb_dmamap);
426 bus_dma_tag_destroy(ahb->ecb_dmat);
428 bus_dma_tag_destroy(ahb->buffer_dmat);
432 mtx_destroy(&ahb->lock);
439 ahbreset(struct ahb_softc *ahb)
444 if ((ahb_inb(ahb, PORTADDR) & PORTADDR_ENHANCED) == 0) {
449 ahb_outb(ahb, CONTROL, CNTRL_HARD_RST);
451 ahb_outb(ahb, CONTROL, 0);
454 if ((ahb_inb(ahb, HOSTSTAT) & HOSTSTAT_BUSY) == 0)
462 if ((test = ahb_inb(ahb, MBOXIN0)) != 0) {
466 while (ahb_inb(ahb, HOSTSTAT) & HOSTSTAT_INTPEND) {
467 ahb_outb(ahb, CONTROL, CNTRL_CLRINT);
476 struct ahb_softc* ahb;
478 ahb = (struct ahb_softc*)arg;
479 ahb->ecb_physbase = segs->ds_addr;
484 ahb->ha_inq_physbase = ahbecbvtop(ahb, &ahb->ecb_array[AHB_NECB]);
488 ahbxptattach(struct ahb_softc *ahb)
494 mtx_lock(&ahb->lock);
497 ahb->scsi_id = ahb_inb(ahb, SCSIDEF) & HSCSIID;
500 ahb->extended_trans = ahb_inb(ahb, RESV1) & EXTENDED_TRANS;
503 ecb = ahbecbget(ahb); /* Always succeeds - no outstanding commands */
506 ecb->hecb.data_ptr = ahb->ha_inq_physbase;
512 ahbqueuembox(ahb, ahbecbvtop(ahb, ecb),
513 ATTN_STARTECB|ahb->scsi_id);
517 ahbintr_locked(ahb);
521 ahb->num_ecbs = MIN(ahb->num_ecbs,
522 ahb->ha_inq_data->scsi_data.spc2_flags);
523 device_printf(ahb->dev,
525 ahb->ha_inq_data->scsi_data.product,
526 (ahb->ha_inq_data->scsi_data.flags & 0x4) ? "Differential"
528 ahb->ha_inq_data->scsi_data.revision,
529 ahb->scsi_id, ahb->num_ecbs);
532 ecb->hecb.sense_ptr = ahbsensepaddr(ahbecbvtop(ahb, ecb));
534 ahbecbfree(ahb, ecb);
539 devq = cam_simq_alloc(ahb->num_ecbs);
541 mtx_unlock(&ahb->lock);
548 ahb->sim = cam_sim_alloc(ahbaction, ahbpoll, "ahb", ahb,
549 device_get_unit(ahb->dev), &ahb->lock, 2, ahb->num_ecbs, devq);
550 if (ahb->sim == NULL) {
552 mtx_unlock(&ahb->lock);
556 if (xpt_bus_register(ahb->sim, ahb->dev, 0) != CAM_SUCCESS) {
557 cam_sim_free(ahb->sim, /*free_devq*/TRUE);
558 mtx_unlock(&ahb->lock);
562 if (xpt_create_path(&ahb->path, /*periph*/NULL,
563 cam_sim_path(ahb->sim), CAM_TARGET_WILDCARD,
565 xpt_bus_deregister(cam_sim_path(ahb->sim));
566 cam_sim_free(ahb->sim, /*free_devq*/TRUE);
567 mtx_unlock(&ahb->lock);
574 ahb_outb(ahb, INTDEF, ahb_inb(ahb, INTDEF) | INTEN);
575 mtx_unlock(&ahb->lock);
581 ahbhandleimmed(struct ahb_softc *ahb, u_int32_t mbox, u_int intstat)
586 if (ahb->immed_cmd == 0) {
587 device_printf(ahb->dev, "Immediate Command complete with no "
594 ccb_h = LIST_FIRST(&ahb->pending_ccbs);
603 || target_id == ahb->scsi_id) {
607 bus_dmamap_unload(ahb->buffer_dmat,
609 if (pending_ecb == ahb->immed_ecb)
612 else if (target_id == ahb->scsi_id)
616 ahbecbfree(ahb, pending_ecb);
618 } else if (ahb->immed_ecb != NULL) {
626 if (ahb->immed_ecb != NULL) {
627 ahb->immed_ecb = NULL;
628 device_printf(ahb->dev, "No longer in timeout\n");
629 } else if (target_id == ahb->scsi_id)
630 device_printf(ahb->dev, "SCSI Bus Reset Delivered\n");
632 device_printf(ahb->dev,
635 ahb->immed_cmd = 0;
639 ahbcalcresid(struct ahb_softc *ahb, struct ecb *ecb, union ccb *ccb)
688 ahbprocesserror(struct ahb_softc *ahb, struct ecb *ecb, union ccb *ccb)
716 ahbcalcresid(ahb, ecb, ccb);
743 ahb->tags_permitted &= ~(0x01 << ccb->ccb_h.target_id);
763 device_printf(ahb->dev,
773 device_get_nameunit(ahb->dev), status->ha_status);
783 ahbdone(struct ahb_softc *ahb, u_int32_t mbox, u_int intstat)
788 ecb = ahbecbptov(ahb, mbox);
806 bus_dmamap_sync(ahb->buffer_dmat, ecb->dmamap, op);
807 bus_dmamap_unload(ahb->buffer_dmat, ecb->dmamap);
814 ahbprocesserror(ahb, ecb, ccb);
816 ahbecbfree(ahb, ecb);
821 device_printf(ahb->dev, "Command 0%x Failed %x:%x:%x\n",
836 struct ahb_softc *ahb;
838 ahb = arg;
839 mtx_lock(&ahb->lock);
840 ahbintr_locked(ahb);
841 mtx_unlock(&ahb->lock);
845 ahbintr_locked(struct ahb_softc *ahb)
850 while (ahb_inb(ahb, HOSTSTAT) & HOSTSTAT_INTPEND) {
854 intstat = ahb_inb(ahb, INTSTAT);
855 mbox = ahb_inl(ahb, MBOXIN0);
860 ahb_outb(ahb, CONTROL, CNTRL_CLRINT);
869 ahbdone(ahb, mbox, intstat);
872 if ((intstat & INTSTAT_TARGET_MASK) == ahb->scsi_id) {
874 xpt_print_path(ahb->path);
886 xpt_async(AC_BUS_RESET, ahb->path, NULL);
893 ahbhandleimmed(ahb, mbox, intstat);
906 struct ahb_softc *ahb;
911 ahb = (struct ahb_softc *)ccb->ccb_h.ccb_ahb_ptr;
912 mtx_assert(&ahb->lock, MA_OWNED);
916 device_printf(ahb->dev,
923 ahbecbfree(ahb, ecb);
928 ecb_paddr = ahbecbvtop(ahb, ecb);
963 bus_dmamap_sync(ahb->buffer_dmat, ecb->dmamap, op);
976 bus_dmamap_unload(ahb->buffer_dmat, ecb->dmamap);
977 ahbecbfree(ahb, ecb);
984 LIST_INSERT_HEAD(&ahb->pending_ccbs, &ccb->ccb_h, sim_links.le);
987 ahbqueuembox(ahb, ecb_paddr, ATTN_STARTECB|ccb->ccb_h.target_id);
996 struct ahb_softc *ahb;
1000 ahb = (struct ahb_softc *)cam_sim_softc(sim);
1001 mtx_assert(&ahb->lock, MA_OWNED);
1014 if ((ecb = ahbecbget(ahb)) == NULL) {
1024 ccb->ccb_h.ccb_ahb_ptr = ahb;
1051 ahbecbfree(ahb, ecb);
1061 ahb->buffer_dmat,
1071 xpt_freeze_simq(ahb->sim, 1);
1109 if ((ahb->disc_permitted & target_mask) != 0)
1111 if ((ahb->tags_permitted & target_mask) != 0)
1135 ahb->immed_cmd = IMMED_RESET;
1136 ahbqueuembox(ahb, IMMED_RESET, ATTN_IMMED|ccb->ccb_h.target_id);
1138 for (i = 1000; ahb->immed_cmd != 0 && i != 0; i--) {
1146 cam_calc_geometry(&ccb->ccg, ahb->extended_trans);
1154 ahb->immed_cmd = IMMED_RESET;
1155 ahbqueuembox(ahb, IMMED_RESET, ATTN_IMMED|ahb->scsi_id);
1157 for (i = 1000; ahb->immed_cmd != 0 && i != 0; i--)
1179 cpi->initiator_id = ahb->scsi_id;
1217 struct ahb_softc *ahb;
1221 ahb = (struct ahb_softc *)ccb->ccb_h.ccb_ahb_ptr;
1222 mtx_assert(&ahb->lock, MA_OWNED);
1246 xpt_freeze_simq(ahb->sim, /*count*/1);
1250 LIST_FOREACH(ccb_h, &ahb->pending_ccbs, sim_links.le) {
1258 ahb->immed_ecb = ecb;
1276 ahb->immed_cmd = IMMED_RESET;
1277 ahbqueuembox(ahb, IMMED_RESET, ATTN_IMMED|ccb->ccb_h.target_id);
1287 ahb->immed_cmd = IMMED_RESET;
1288 ahbqueuembox(ahb, IMMED_RESET, ATTN_IMMED|ahb->scsi_id);
1291 ahbreset(ahb);
1294 ahbhandleimmed(ahb, 0, ahb->scsi_id|INTSTAT_IMMED_OK);
1307 "ahb",
1314 DRIVER_MODULE(ahb, eisa, ahb_eisa_driver, ahb_devclass, 0, 0);
1315 MODULE_DEPEND(ahb, eisa, 1, 1, 1);
1316 MODULE_DEPEND(ahb, cam, 1, 1, 1);