Deleted Added
full compact
mly.c (78235) mly.c (79695)
1/*-
2 * Copyright (c) 2000, 2001 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:

--- 10 unchanged lines hidden (view full) ---

19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
1/*-
2 * Copyright (c) 2000, 2001 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:

--- 10 unchanged lines hidden (view full) ---

19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: head/sys/dev/mly/mly.c 78235 2001-06-15 00:16:59Z peter $
27 * $FreeBSD: head/sys/dev/mly/mly.c 79695 2001-07-14 00:12:23Z msmith $
28 */
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/malloc.h>
33#include <sys/kernel.h>
34#include <sys/bus.h>
35#include <sys/conf.h>
36#include <sys/ctype.h>
28 */
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/malloc.h>
33#include <sys/kernel.h>
34#include <sys/bus.h>
35#include <sys/conf.h>
36#include <sys/ctype.h>
37#include <sys/devicestat.h>
37#include <sys/ioccom.h>
38#include <sys/stat.h>
39
40#include <machine/bus_memio.h>
41#include <machine/bus.h>
42#include <machine/resource.h>
43#include <sys/rman.h>
44
38#include <sys/ioccom.h>
39#include <sys/stat.h>
40
41#include <machine/bus_memio.h>
42#include <machine/bus.h>
43#include <machine/resource.h>
44#include <sys/rman.h>
45
46#include <cam/cam.h>
47#include <cam/cam_ccb.h>
48#include <cam/cam_periph.h>
49#include <cam/cam_sim.h>
50#include <cam/cam_xpt_sim.h>
45#include <cam/scsi/scsi_all.h>
51#include <cam/scsi/scsi_all.h>
52#include <cam/scsi/scsi_message.h>
46
53
54#include <pci/pcireg.h>
55#include <pci/pcivar.h>
56
47#include <dev/mly/mlyreg.h>
48#include <dev/mly/mlyio.h>
49#include <dev/mly/mlyvar.h>
57#include <dev/mly/mlyreg.h>
58#include <dev/mly/mlyio.h>
59#include <dev/mly/mlyvar.h>
50#define MLY_DEFINE_TABLES
51#include <dev/mly/mly_tables.h>
52
60#include <dev/mly/mly_tables.h>
61
62static int mly_probe(device_t dev);
63static int mly_attach(device_t dev);
64static int mly_pci_attach(struct mly_softc *sc);
65static int mly_detach(device_t dev);
66static int mly_shutdown(device_t dev);
67static void mly_intr(void *arg);
68
69static int mly_sg_map(struct mly_softc *sc);
70static void mly_sg_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error);
71static int mly_mmbox_map(struct mly_softc *sc);
72static void mly_mmbox_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error);
73static void mly_free(struct mly_softc *sc);
74
53static int mly_get_controllerinfo(struct mly_softc *sc);
54static void mly_scan_devices(struct mly_softc *sc);
55static void mly_rescan_btl(struct mly_softc *sc, int bus, int target);
56static void mly_complete_rescan(struct mly_command *mc);
57static int mly_get_eventstatus(struct mly_softc *sc);
58static int mly_enable_mmbox(struct mly_softc *sc);
59static int mly_flush(struct mly_softc *sc);
60static int mly_ioctl(struct mly_softc *sc, struct mly_command_ioctl *ioctl, void **data,
61 size_t datasize, u_int8_t *status, void *sense_buffer, size_t *sense_length);
75static int mly_get_controllerinfo(struct mly_softc *sc);
76static void mly_scan_devices(struct mly_softc *sc);
77static void mly_rescan_btl(struct mly_softc *sc, int bus, int target);
78static void mly_complete_rescan(struct mly_command *mc);
79static int mly_get_eventstatus(struct mly_softc *sc);
80static int mly_enable_mmbox(struct mly_softc *sc);
81static int mly_flush(struct mly_softc *sc);
82static int mly_ioctl(struct mly_softc *sc, struct mly_command_ioctl *ioctl, void **data,
83 size_t datasize, u_int8_t *status, void *sense_buffer, size_t *sense_length);
84static void mly_check_event(struct mly_softc *sc);
62static void mly_fetch_event(struct mly_softc *sc);
63static void mly_complete_event(struct mly_command *mc);
64static void mly_process_event(struct mly_softc *sc, struct mly_event *me);
65static void mly_periodic(void *data);
66
67static int mly_immediate_command(struct mly_command *mc);
68static int mly_start(struct mly_command *mc);
85static void mly_fetch_event(struct mly_softc *sc);
86static void mly_complete_event(struct mly_command *mc);
87static void mly_process_event(struct mly_softc *sc, struct mly_event *me);
88static void mly_periodic(void *data);
89
90static int mly_immediate_command(struct mly_command *mc);
91static int mly_start(struct mly_command *mc);
92static void mly_done(struct mly_softc *sc);
69static void mly_complete(void *context, int pending);
70
93static void mly_complete(void *context, int pending);
94
95static int mly_alloc_command(struct mly_softc *sc, struct mly_command **mcp);
96static void mly_release_command(struct mly_command *mc);
71static void mly_alloc_commands_map(void *arg, bus_dma_segment_t *segs, int nseg, int error);
72static int mly_alloc_commands(struct mly_softc *sc);
97static void mly_alloc_commands_map(void *arg, bus_dma_segment_t *segs, int nseg, int error);
98static int mly_alloc_commands(struct mly_softc *sc);
99static void mly_release_commands(struct mly_softc *sc);
73static void mly_map_command(struct mly_command *mc);
74static void mly_unmap_command(struct mly_command *mc);
75
100static void mly_map_command(struct mly_command *mc);
101static void mly_unmap_command(struct mly_command *mc);
102
103static int mly_cam_attach(struct mly_softc *sc);
104static void mly_cam_detach(struct mly_softc *sc);
105static void mly_cam_rescan_btl(struct mly_softc *sc, int bus, int target);
106static void mly_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb);
107static void mly_cam_action(struct cam_sim *sim, union ccb *ccb);
108static int mly_cam_action_io(struct cam_sim *sim, struct ccb_scsiio *csio);
109static void mly_cam_poll(struct cam_sim *sim);
110static void mly_cam_complete(struct mly_command *mc);
111static struct cam_periph *mly_find_periph(struct mly_softc *sc, int bus, int target);
112static int mly_name_device(struct mly_softc *sc, int bus, int target);
113
76static int mly_fwhandshake(struct mly_softc *sc);
77
78static void mly_describe_controller(struct mly_softc *sc);
79#ifdef MLY_DEBUG
80static void mly_printstate(struct mly_softc *sc);
81static void mly_print_command(struct mly_command *mc);
82static void mly_print_packet(struct mly_command *mc);
83static void mly_panic(struct mly_softc *sc, char *reason);
84#endif
85void mly_print_controller(int controller);
86
114static int mly_fwhandshake(struct mly_softc *sc);
115
116static void mly_describe_controller(struct mly_softc *sc);
117#ifdef MLY_DEBUG
118static void mly_printstate(struct mly_softc *sc);
119static void mly_print_command(struct mly_command *mc);
120static void mly_print_packet(struct mly_command *mc);
121static void mly_panic(struct mly_softc *sc, char *reason);
122#endif
123void mly_print_controller(int controller);
124
125
87static d_open_t mly_user_open;
88static d_close_t mly_user_close;
89static d_ioctl_t mly_user_ioctl;
90static int mly_user_command(struct mly_softc *sc, struct mly_user_command *uc);
91static int mly_user_health(struct mly_softc *sc, struct mly_user_health *uh);
92
126static d_open_t mly_user_open;
127static d_close_t mly_user_close;
128static d_ioctl_t mly_user_ioctl;
129static int mly_user_command(struct mly_softc *sc, struct mly_user_command *uc);
130static int mly_user_health(struct mly_softc *sc, struct mly_user_health *uh);
131
132
133static device_method_t mly_methods[] = {
134 /* Device interface */
135 DEVMETHOD(device_probe, mly_probe),
136 DEVMETHOD(device_attach, mly_attach),
137 DEVMETHOD(device_detach, mly_detach),
138 DEVMETHOD(device_shutdown, mly_shutdown),
139 { 0, 0 }
140};
141
142static driver_t mly_pci_driver = {
143 "mly",
144 mly_methods,
145 sizeof(struct mly_softc)
146};
147
148static devclass_t mly_devclass;
149DRIVER_MODULE(mly, pci, mly_pci_driver, mly_devclass, 0, 0);
150
93#define MLY_CDEV_MAJOR 158
94
95static struct cdevsw mly_cdevsw = {
96 mly_user_open,
97 mly_user_close,
98 noread,
99 nowrite,
100 mly_user_ioctl,

--- 8 unchanged lines hidden (view full) ---

109};
110
111/********************************************************************************
112 ********************************************************************************
113 Device Interface
114 ********************************************************************************
115 ********************************************************************************/
116
151#define MLY_CDEV_MAJOR 158
152
153static struct cdevsw mly_cdevsw = {
154 mly_user_open,
155 mly_user_close,
156 noread,
157 nowrite,
158 mly_user_ioctl,

--- 8 unchanged lines hidden (view full) ---

167};
168
169/********************************************************************************
170 ********************************************************************************
171 Device Interface
172 ********************************************************************************
173 ********************************************************************************/
174
175static struct mly_ident
176{
177 u_int16_t vendor;
178 u_int16_t device;
179 u_int16_t subvendor;
180 u_int16_t subdevice;
181 int hwif;
182 char *desc;
183} mly_identifiers[] = {
184 {0x1069, 0xba56, 0x1069, 0x0040, MLY_HWIF_STRONGARM, "Mylex eXtremeRAID 2000"},
185 {0x1069, 0xba56, 0x1069, 0x0030, MLY_HWIF_STRONGARM, "Mylex eXtremeRAID 3000"},
186 {0x1069, 0x0050, 0x1069, 0x0050, MLY_HWIF_I960RX, "Mylex AcceleRAID 352"},
187 {0x1069, 0x0050, 0x1069, 0x0052, MLY_HWIF_I960RX, "Mylex AcceleRAID 170"},
188 {0x1069, 0x0050, 0x1069, 0x0054, MLY_HWIF_I960RX, "Mylex AcceleRAID 160"},
189 {0, 0, 0, 0, 0, 0}
190};
191
117/********************************************************************************
192/********************************************************************************
193 * Compare the provided PCI device with the list we support.
194 */
195static int
196mly_probe(device_t dev)
197{
198 struct mly_ident *m;
199
200 debug_called(1);
201
202 for (m = mly_identifiers; m->vendor != 0; m++) {
203 if ((m->vendor == pci_get_vendor(dev)) &&
204 (m->device == pci_get_device(dev)) &&
205 ((m->subvendor == 0) || ((m->subvendor == pci_get_subvendor(dev)) &&
206 (m->subdevice == pci_get_subdevice(dev))))) {
207
208 device_set_desc(dev, m->desc);
209#ifdef MLY_MODULE
210 return(-5);
211#else
212 return(-10); /* allow room to be overridden */
213#endif
214 }
215 }
216 return(ENXIO);
217}
218
219/********************************************************************************
118 * Initialise the controller and softc
119 */
120int
220 * Initialise the controller and softc
221 */
222int
121mly_attach(struct mly_softc *sc)
223mly_attach(device_t dev)
122{
224{
123 int error;
225 struct mly_softc *sc = device_get_softc(dev);
226 int error;
124
125 debug_called(1);
126
227
228 debug_called(1);
229
230 sc->mly_dev = dev;
231
232#ifdef MLY_DEBUG
233 if (device_get_unit(sc->mly_dev) == 0)
234 mly_softc0 = sc;
235#endif
236
127 /*
237 /*
238 * Do PCI-specific initialisation.
239 */
240 if ((error = mly_pci_attach(sc)) != 0)
241 goto out;
242
243 /*
128 * Initialise per-controller queues.
129 */
130 mly_initq_free(sc);
244 * Initialise per-controller queues.
245 */
246 mly_initq_free(sc);
131 mly_initq_ready(sc);
132 mly_initq_busy(sc);
133 mly_initq_complete(sc);
134
135#if __FreeBSD_version >= 500005
136 /*
137 * Initialise command-completion task.
138 */
139 TASK_INIT(&sc->mly_task_complete, 0, mly_complete, sc);
140#endif
141
142 /* disable interrupts before we start talking to the controller */
143 MLY_MASK_INTERRUPTS(sc);
144
145 /*
146 * Wait for the controller to come ready, handshake with the firmware if required.
147 * This is typically only necessary on platforms where the controller BIOS does not
148 * run.
149 */
150 if ((error = mly_fwhandshake(sc)))
247 mly_initq_busy(sc);
248 mly_initq_complete(sc);
249
250#if __FreeBSD_version >= 500005
251 /*
252 * Initialise command-completion task.
253 */
254 TASK_INIT(&sc->mly_task_complete, 0, mly_complete, sc);
255#endif
256
257 /* disable interrupts before we start talking to the controller */
258 MLY_MASK_INTERRUPTS(sc);
259
260 /*
261 * Wait for the controller to come ready, handshake with the firmware if required.
262 * This is typically only necessary on platforms where the controller BIOS does not
263 * run.
264 */
265 if ((error = mly_fwhandshake(sc)))
151 return(error);
266 goto out;
152
153 /*
267
268 /*
154 * Allocate command buffers
269 * Allocate initial command buffers.
155 */
156 if ((error = mly_alloc_commands(sc)))
270 */
271 if ((error = mly_alloc_commands(sc)))
157 return(error);
272 goto out;
158
159 /*
160 * Obtain controller feature information
161 */
162 if ((error = mly_get_controllerinfo(sc)))
273
274 /*
275 * Obtain controller feature information
276 */
277 if ((error = mly_get_controllerinfo(sc)))
163 return(error);
278 goto out;
164
165 /*
279
280 /*
281 * Reallocate command buffers now we know how many we want.
282 */
283 mly_release_commands(sc);
284 if ((error = mly_alloc_commands(sc)))
285 goto out;
286
287 /*
166 * Get the current event counter for health purposes, populate the initial
167 * health status buffer.
168 */
169 if ((error = mly_get_eventstatus(sc)))
288 * Get the current event counter for health purposes, populate the initial
289 * health status buffer.
290 */
291 if ((error = mly_get_eventstatus(sc)))
170 return(error);
292 goto out;
171
172 /*
293
294 /*
173 * Enable memory-mailbox mode
295 * Enable memory-mailbox mode.
174 */
175 if ((error = mly_enable_mmbox(sc)))
296 */
297 if ((error = mly_enable_mmbox(sc)))
176 return(error);
298 goto out;
177
178 /*
179 * Attach to CAM.
180 */
181 if ((error = mly_cam_attach(sc)))
299
300 /*
301 * Attach to CAM.
302 */
303 if ((error = mly_cam_attach(sc)))
182 return(error);
304 goto out;
183
184 /*
185 * Print a little information about the controller
186 */
187 mly_describe_controller(sc);
188
189 /*
305
306 /*
307 * Print a little information about the controller
308 */
309 mly_describe_controller(sc);
310
311 /*
190 * Mark all attached devices for rescan
312 * Mark all attached devices for rescan.
191 */
192 mly_scan_devices(sc);
193
194 /*
195 * Instigate the first status poll immediately. Rescan completions won't
196 * happen until interrupts are enabled, which should still be before
313 */
314 mly_scan_devices(sc);
315
316 /*
317 * Instigate the first status poll immediately. Rescan completions won't
318 * happen until interrupts are enabled, which should still be before
197 * the SCSI subsystem gets to us. (XXX assuming CAM and interrupt-driven
198 * discovery here...)
319 * the SCSI subsystem gets to us, courtesy of the "SCSI settling delay".
199 */
200 mly_periodic((void *)sc);
201
202 /*
203 * Create the control device.
204 */
205 sc->mly_dev_t = make_dev(&mly_cdevsw, device_get_unit(sc->mly_dev), UID_ROOT, GID_OPERATOR,
206 S_IRUSR | S_IWUSR, "mly%d", device_get_unit(sc->mly_dev));
207 sc->mly_dev_t->si_drv1 = sc;
208
209 /* enable interrupts now */
210 MLY_UNMASK_INTERRUPTS(sc);
211
320 */
321 mly_periodic((void *)sc);
322
323 /*
324 * Create the control device.
325 */
326 sc->mly_dev_t = make_dev(&mly_cdevsw, device_get_unit(sc->mly_dev), UID_ROOT, GID_OPERATOR,
327 S_IRUSR | S_IWUSR, "mly%d", device_get_unit(sc->mly_dev));
328 sc->mly_dev_t->si_drv1 = sc;
329
330 /* enable interrupts now */
331 MLY_UNMASK_INTERRUPTS(sc);
332
333 out:
334 if (error != 0)
335 mly_free(sc);
336 return(error);
337}
338
339/********************************************************************************
340 * Perform PCI-specific initialisation.
341 */
342static int
343mly_pci_attach(struct mly_softc *sc)
344{
345 int i, error;
346 u_int32_t command;
347
348 debug_called(1);
349
350 /* assume failure is 'not configured' */
351 error = ENXIO;
352
353 /*
354 * Verify that the adapter is correctly set up in PCI space.
355 *
356 * XXX we shouldn't do this; the PCI code should.
357 */
358 command = pci_read_config(sc->mly_dev, PCIR_COMMAND, 2);
359 command |= PCIM_CMD_BUSMASTEREN;
360 pci_write_config(sc->mly_dev, PCIR_COMMAND, command, 2);
361 command = pci_read_config(sc->mly_dev, PCIR_COMMAND, 2);
362 if (!(command & PCIM_CMD_BUSMASTEREN)) {
363 mly_printf(sc, "can't enable busmaster feature\n");
364 goto fail;
365 }
366 if ((command & PCIM_CMD_MEMEN) == 0) {
367 mly_printf(sc, "memory window not available\n");
368 goto fail;
369 }
370
371 /*
372 * Allocate the PCI register window.
373 */
374 sc->mly_regs_rid = PCIR_MAPS; /* first base address register */
375 if ((sc->mly_regs_resource = bus_alloc_resource(sc->mly_dev, SYS_RES_MEMORY, &sc->mly_regs_rid,
376 0, ~0, 1, RF_ACTIVE)) == NULL) {
377 mly_printf(sc, "can't allocate register window\n");
378 goto fail;
379 }
380 sc->mly_btag = rman_get_bustag(sc->mly_regs_resource);
381 sc->mly_bhandle = rman_get_bushandle(sc->mly_regs_resource);
382
383 /*
384 * Allocate and connect our interrupt.
385 */
386 sc->mly_irq_rid = 0;
387 if ((sc->mly_irq = bus_alloc_resource(sc->mly_dev, SYS_RES_IRQ, &sc->mly_irq_rid,
388 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
389 mly_printf(sc, "can't allocate interrupt\n");
390 goto fail;
391 }
392 if (bus_setup_intr(sc->mly_dev, sc->mly_irq, INTR_TYPE_CAM | INTR_ENTROPY, mly_intr, sc, &sc->mly_intr)) {
393 mly_printf(sc, "can't set up interrupt\n");
394 goto fail;
395 }
396
397 /* assume failure is 'out of memory' */
398 error = ENOMEM;
399
400 /*
401 * Allocate the parent bus DMA tag appropriate for our PCI interface.
402 *
403 * Note that all of these controllers are 64-bit capable.
404 */
405 if (bus_dma_tag_create(NULL, /* parent */
406 1, 0, /* alignment, boundary */
407 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
408 BUS_SPACE_MAXADDR, /* highaddr */
409 NULL, NULL, /* filter, filterarg */
410 MAXBSIZE, MLY_MAX_SGENTRIES, /* maxsize, nsegments */
411 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
412 BUS_DMA_ALLOCNOW, /* flags */
413 &sc->mly_parent_dmat)) {
414 mly_printf(sc, "can't allocate parent DMA tag\n");
415 goto fail;
416 }
417
418 /*
419 * Create DMA tag for mapping buffers into controller-addressable space.
420 */
421 if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */
422 1, 0, /* alignment, boundary */
423 BUS_SPACE_MAXADDR, /* lowaddr */
424 BUS_SPACE_MAXADDR, /* highaddr */
425 NULL, NULL, /* filter, filterarg */
426 MAXBSIZE, MLY_MAX_SGENTRIES, /* maxsize, nsegments */
427 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
428 0, /* flags */
429 &sc->mly_buffer_dmat)) {
430 mly_printf(sc, "can't allocate buffer DMA tag\n");
431 goto fail;
432 }
433
434 /*
435 * Initialise the DMA tag for command packets.
436 */
437 if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */
438 1, 0, /* alignment, boundary */
439 BUS_SPACE_MAXADDR, /* lowaddr */
440 BUS_SPACE_MAXADDR, /* highaddr */
441 NULL, NULL, /* filter, filterarg */
442 sizeof(union mly_command_packet) * MLY_MAX_COMMANDS, 1, /* maxsize, nsegments */
443 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
444 0, /* flags */
445 &sc->mly_packet_dmat)) {
446 mly_printf(sc, "can't allocate command packet DMA tag\n");
447 goto fail;
448 }
449
450 /*
451 * Detect the hardware interface version
452 */
453 for (i = 0; mly_identifiers[i].vendor != 0; i++) {
454 if ((mly_identifiers[i].vendor == pci_get_vendor(sc->mly_dev)) &&
455 (mly_identifiers[i].device == pci_get_device(sc->mly_dev))) {
456 sc->mly_hwif = mly_identifiers[i].hwif;
457 switch(sc->mly_hwif) {
458 case MLY_HWIF_I960RX:
459 debug(1, "set hardware up for i960RX");
460 sc->mly_doorbell_true = 0x00;
461 sc->mly_command_mailbox = MLY_I960RX_COMMAND_MAILBOX;
462 sc->mly_status_mailbox = MLY_I960RX_STATUS_MAILBOX;
463 sc->mly_idbr = MLY_I960RX_IDBR;
464 sc->mly_odbr = MLY_I960RX_ODBR;
465 sc->mly_error_status = MLY_I960RX_ERROR_STATUS;
466 sc->mly_interrupt_status = MLY_I960RX_INTERRUPT_STATUS;
467 sc->mly_interrupt_mask = MLY_I960RX_INTERRUPT_MASK;
468 break;
469 case MLY_HWIF_STRONGARM:
470 debug(1, "set hardware up for StrongARM");
471 sc->mly_doorbell_true = 0xff; /* doorbell 'true' is 0 */
472 sc->mly_command_mailbox = MLY_STRONGARM_COMMAND_MAILBOX;
473 sc->mly_status_mailbox = MLY_STRONGARM_STATUS_MAILBOX;
474 sc->mly_idbr = MLY_STRONGARM_IDBR;
475 sc->mly_odbr = MLY_STRONGARM_ODBR;
476 sc->mly_error_status = MLY_STRONGARM_ERROR_STATUS;
477 sc->mly_interrupt_status = MLY_STRONGARM_INTERRUPT_STATUS;
478 sc->mly_interrupt_mask = MLY_STRONGARM_INTERRUPT_MASK;
479 break;
480 }
481 break;
482 }
483 }
484
485 /*
486 * Create the scatter/gather mappings.
487 */
488 if ((error = mly_sg_map(sc)))
489 goto fail;
490
491 /*
492 * Allocate and map the memory mailbox
493 */
494 if ((error = mly_mmbox_map(sc)))
495 goto fail;
496
497 error = 0;
498
499fail:
500 return(error);
501}
502
503/********************************************************************************
504 * Shut the controller down and detach all our resources.
505 */
506static int
507mly_detach(device_t dev)
508{
509 int error;
510
511 if ((error = mly_shutdown(dev)) != 0)
512 return(error);
513
514 mly_free(device_get_softc(dev));
212 return(0);
213}
214
215/********************************************************************************
216 * Bring the controller to a state where it can be safely left alone.
515 return(0);
516}
517
518/********************************************************************************
519 * Bring the controller to a state where it can be safely left alone.
520 *
521 * Note that it should not be necessary to wait for any outstanding commands,
522 * as they should be completed prior to calling here.
523 *
524 * XXX this applies for I/O, but not status polls; we should beware of
525 * the case where a status command is running while we detach.
217 */
526 */
218void
219mly_detach(struct mly_softc *sc)
527static int
528mly_shutdown(device_t dev)
220{
529{
530 struct mly_softc *sc = device_get_softc(dev);
221
222 debug_called(1);
531
532 debug_called(1);
533
534 if (sc->mly_state & MLY_STATE_OPEN)
535 return(EBUSY);
223
224 /* kill the periodic event */
225 untimeout(mly_periodic, sc, sc->mly_periodic);
226
536
537 /* kill the periodic event */
538 untimeout(mly_periodic, sc, sc->mly_periodic);
539
227 sc->mly_state |= MLY_STATE_SUSPEND;
228
229 /* flush controller */
230 mly_printf(sc, "flushing cache...");
231 printf("%s\n", mly_flush(sc) ? "failed" : "done");
232
233 MLY_MASK_INTERRUPTS(sc);
540 /* flush controller */
541 mly_printf(sc, "flushing cache...");
542 printf("%s\n", mly_flush(sc) ? "failed" : "done");
543
544 MLY_MASK_INTERRUPTS(sc);
545
546 return(0);
234}
235
547}
548
549/*******************************************************************************
550 * Take an interrupt, or be poked by other code to look for interrupt-worthy
551 * status.
552 */
553static void
554mly_intr(void *arg)
555{
556 struct mly_softc *sc = (struct mly_softc *)arg;
557
558 debug_called(2);
559
560 mly_done(sc);
561};
562
236/********************************************************************************
237 ********************************************************************************
563/********************************************************************************
564 ********************************************************************************
565 Bus-dependant Resource Management
566 ********************************************************************************
567 ********************************************************************************/
568
569/********************************************************************************
570 * Allocate memory for the scatter/gather tables
571 */
572static int
573mly_sg_map(struct mly_softc *sc)
574{
575 size_t segsize;
576
577 debug_called(1);
578
579 /*
580 * Create a single tag describing a region large enough to hold all of
581 * the s/g lists we will need.
582 */
583 segsize = sizeof(struct mly_sg_entry) * MLY_MAX_COMMANDS * MLY_MAX_SGENTRIES;
584 if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */
585 1, 0, /* alignment, boundary */
586 BUS_SPACE_MAXADDR, /* lowaddr */
587 BUS_SPACE_MAXADDR, /* highaddr */
588 NULL, NULL, /* filter, filterarg */
589 segsize, 1, /* maxsize, nsegments */
590 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
591 0, /* flags */
592 &sc->mly_sg_dmat)) {
593 mly_printf(sc, "can't allocate scatter/gather DMA tag\n");
594 return(ENOMEM);
595 }
596
597 /*
598 * Allocate enough s/g maps for all commands and permanently map them into
599 * controller-visible space.
600 *
601 * XXX this assumes we can get enough space for all the s/g maps in one
602 * contiguous slab.
603 */
604 if (bus_dmamem_alloc(sc->mly_sg_dmat, (void **)&sc->mly_sg_table, BUS_DMA_NOWAIT, &sc->mly_sg_dmamap)) {
605 mly_printf(sc, "can't allocate s/g table\n");
606 return(ENOMEM);
607 }
608 bus_dmamap_load(sc->mly_sg_dmat, sc->mly_sg_dmamap, sc->mly_sg_table, segsize, mly_sg_map_helper, sc, 0);
609 return(0);
610}
611
612/********************************************************************************
613 * Save the physical address of the base of the s/g table.
614 */
615static void
616mly_sg_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
617{
618 struct mly_softc *sc = (struct mly_softc *)arg;
619
620 debug_called(1);
621
622 /* save base of s/g table's address in bus space */
623 sc->mly_sg_busaddr = segs->ds_addr;
624}
625
626/********************************************************************************
627 * Allocate memory for the memory-mailbox interface
628 */
629static int
630mly_mmbox_map(struct mly_softc *sc)
631{
632
633 /*
634 * Create a DMA tag for a single contiguous region large enough for the
635 * memory mailbox structure.
636 */
637 if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */
638 1, 0, /* alignment, boundary */
639 BUS_SPACE_MAXADDR, /* lowaddr */
640 BUS_SPACE_MAXADDR, /* highaddr */
641 NULL, NULL, /* filter, filterarg */
642 sizeof(struct mly_mmbox), 1, /* maxsize, nsegments */
643 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
644 0, /* flags */
645 &sc->mly_mmbox_dmat)) {
646 mly_printf(sc, "can't allocate memory mailbox DMA tag\n");
647 return(ENOMEM);
648 }
649
650 /*
651 * Allocate the buffer
652 */
653 if (bus_dmamem_alloc(sc->mly_mmbox_dmat, (void **)&sc->mly_mmbox, BUS_DMA_NOWAIT, &sc->mly_mmbox_dmamap)) {
654 mly_printf(sc, "can't allocate memory mailbox\n");
655 return(ENOMEM);
656 }
657 bus_dmamap_load(sc->mly_mmbox_dmat, sc->mly_mmbox_dmamap, sc->mly_mmbox, sizeof(struct mly_mmbox),
658 mly_mmbox_map_helper, sc, 0);
659 bzero(sc->mly_mmbox, sizeof(*sc->mly_mmbox));
660 return(0);
661
662}
663
664/********************************************************************************
665 * Save the physical address of the memory mailbox
666 */
667static void
668mly_mmbox_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
669{
670 struct mly_softc *sc = (struct mly_softc *)arg;
671
672 debug_called(1);
673
674 sc->mly_mmbox_busaddr = segs->ds_addr;
675}
676
677/********************************************************************************
678 * Free all of the resources associated with (sc)
679 *
680 * Should not be called if the controller is active.
681 */
682void
683mly_free(struct mly_softc *sc)
684{
685
686 debug_called(1);
687
688 /* detach from CAM */
689 mly_cam_detach(sc);
690
691 /* release command memory */
692 mly_release_commands(sc);
693
694 /* throw away the controllerinfo structure */
695 if (sc->mly_controllerinfo != NULL)
696 free(sc->mly_controllerinfo, M_DEVBUF);
697
698 /* throw away the controllerparam structure */
699 if (sc->mly_controllerparam != NULL)
700 free(sc->mly_controllerparam, M_DEVBUF);
701
702 /* destroy data-transfer DMA tag */
703 if (sc->mly_buffer_dmat)
704 bus_dma_tag_destroy(sc->mly_buffer_dmat);
705
706 /* free and destroy DMA memory and tag for s/g lists */
707 if (sc->mly_sg_table) {
708 bus_dmamap_unload(sc->mly_sg_dmat, sc->mly_sg_dmamap);
709 bus_dmamem_free(sc->mly_sg_dmat, sc->mly_sg_table, sc->mly_sg_dmamap);
710 }
711 if (sc->mly_sg_dmat)
712 bus_dma_tag_destroy(sc->mly_sg_dmat);
713
714 /* free and destroy DMA memory and tag for memory mailbox */
715 if (sc->mly_mmbox) {
716 bus_dmamap_unload(sc->mly_mmbox_dmat, sc->mly_mmbox_dmamap);
717 bus_dmamem_free(sc->mly_mmbox_dmat, sc->mly_mmbox, sc->mly_mmbox_dmamap);
718 }
719 if (sc->mly_mmbox_dmat)
720 bus_dma_tag_destroy(sc->mly_mmbox_dmat);
721
722 /* disconnect the interrupt handler */
723 if (sc->mly_intr)
724 bus_teardown_intr(sc->mly_dev, sc->mly_irq, sc->mly_intr);
725 if (sc->mly_irq != NULL)
726 bus_release_resource(sc->mly_dev, SYS_RES_IRQ, sc->mly_irq_rid, sc->mly_irq);
727
728 /* destroy the parent DMA tag */
729 if (sc->mly_parent_dmat)
730 bus_dma_tag_destroy(sc->mly_parent_dmat);
731
732 /* release the register window mapping */
733 if (sc->mly_regs_resource != NULL)
734 bus_release_resource(sc->mly_dev, SYS_RES_MEMORY, sc->mly_regs_rid, sc->mly_regs_resource);
735}
736
737/********************************************************************************
738 ********************************************************************************
238 Command Wrappers
239 ********************************************************************************
240 ********************************************************************************/
241
242/********************************************************************************
243 * Fill in the mly_controllerinfo and mly_controllerparam fields in the softc.
244 */
245static int

--- 36 unchanged lines hidden (view full) ---

282
283/********************************************************************************
284 * Schedule all possible devices for a rescan.
285 *
286 */
287static void
288mly_scan_devices(struct mly_softc *sc)
289{
739 Command Wrappers
740 ********************************************************************************
741 ********************************************************************************/
742
743/********************************************************************************
744 * Fill in the mly_controllerinfo and mly_controllerparam fields in the softc.
745 */
746static int

--- 36 unchanged lines hidden (view full) ---

783
784/********************************************************************************
785 * Schedule all possible devices for a rescan.
786 *
787 */
788static void
789mly_scan_devices(struct mly_softc *sc)
790{
290 int bus, target, nchn;
791 int bus, target;
291
292 debug_called(1);
293
294 /*
295 * Clear any previous BTL information.
296 */
297 bzero(&sc->mly_btl, sizeof(sc->mly_btl));
298
299 /*
792
793 debug_called(1);
794
795 /*
796 * Clear any previous BTL information.
797 */
798 bzero(&sc->mly_btl, sizeof(sc->mly_btl));
799
800 /*
300 * Mark all devices as requiring a rescan, and let the early periodic scan collect them.
801 * Mark all devices as requiring a rescan, and let the next
802 * periodic scan collect them.
301 */
803 */
302 nchn = sc->mly_controllerinfo->physical_channels_present +
303 sc->mly_controllerinfo->virtual_channels_present;
304 for (bus = 0; bus < nchn; bus++)
305 for (target = 0; target < MLY_MAX_TARGETS; target++)
306 sc->mly_btl[bus][target].mb_flags = MLY_BTL_RESCAN;
804 for (bus = 0; bus < sc->mly_cam_channels; bus++)
805 if (MLY_BUS_IS_VALID(sc, bus))
806 for (target = 0; target < MLY_MAX_TARGETS; target++)
807 sc->mly_btl[bus][target].mb_flags = MLY_BTL_RESCAN;
307
308}
309
310/********************************************************************************
311 * Rescan a device, possibly as a consequence of getting an event which suggests
312 * that it may have changed.
808
809}
810
811/********************************************************************************
812 * Rescan a device, possibly as a consequence of getting an event which suggests
813 * that it may have changed.
814 *
815 * If we suffer resource starvation, we can abandon the rescan as we'll be
816 * retried.
313 */
314static void
315mly_rescan_btl(struct mly_softc *sc, int bus, int target)
316{
317 struct mly_command *mc;
318 struct mly_command_ioctl *mci;
319
817 */
818static void
819mly_rescan_btl(struct mly_softc *sc, int bus, int target)
820{
821 struct mly_command *mc;
822 struct mly_command_ioctl *mci;
823
320 debug_called(2);
824 debug_called(1);
321
825
826 /* check that this bus is valid */
827 if (!MLY_BUS_IS_VALID(sc, bus))
828 return;
829
322 /* get a command */
830 /* get a command */
323 mc = NULL;
324 if (mly_alloc_command(sc, &mc))
831 if (mly_alloc_command(sc, &mc))
325 return; /* we'll be retried soon */
832 return;
326
327 /* set up the data buffer */
328 if ((mc->mc_data = malloc(sizeof(union mly_devinfo), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) {
329 mly_release_command(mc);
833
834 /* set up the data buffer */
835 if ((mc->mc_data = malloc(sizeof(union mly_devinfo), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) {
836 mly_release_command(mc);
330 return; /* we'll get retried the next time a command completes */
837 return;
331 }
332 mc->mc_flags |= MLY_CMD_DATAIN;
333 mc->mc_complete = mly_complete_rescan;
334
838 }
839 mc->mc_flags |= MLY_CMD_DATAIN;
840 mc->mc_complete = mly_complete_rescan;
841
335 sc->mly_btl[bus][target].mb_flags &= ~MLY_BTL_RESCAN;
336
337 /*
338 * Build the ioctl.
842 /*
843 * Build the ioctl.
339 *
340 * At this point we are committed to sending this request, as it
341 * will be the only one constructed for this particular update.
342 */
343 mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl;
344 mci->opcode = MDACMD_IOCTL;
345 mci->addr.phys.controller = 0;
346 mci->timeout.value = 30;
347 mci->timeout.scale = MLY_TIMEOUT_SECONDS;
844 */
845 mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl;
846 mci->opcode = MDACMD_IOCTL;
847 mci->addr.phys.controller = 0;
848 mci->timeout.value = 30;
849 mci->timeout.scale = MLY_TIMEOUT_SECONDS;
348 if (bus >= sc->mly_controllerinfo->physical_channels_present) {
850 if (bus < sc->mly_controllerinfo->virtual_channels_present) {
349 mc->mc_length = mci->data_size = sizeof(struct mly_ioctl_getlogdevinfovalid);
350 mci->sub_ioctl = MDACIOCTL_GETLOGDEVINFOVALID;
851 mc->mc_length = mci->data_size = sizeof(struct mly_ioctl_getlogdevinfovalid);
852 mci->sub_ioctl = MDACIOCTL_GETLOGDEVINFOVALID;
351 mci->addr.log.logdev = ((bus - sc->mly_controllerinfo->physical_channels_present) * MLY_MAX_TARGETS)
352 + target;
353 debug(2, "logical device %d", mci->addr.log.logdev);
853 mci->addr.log.logdev = MLY_LOGDEV_ID(sc, bus, target);
854 debug(1, "logical device %d", mci->addr.log.logdev);
354 } else {
355 mc->mc_length = mci->data_size = sizeof(struct mly_ioctl_getphysdevinfovalid);
356 mci->sub_ioctl = MDACIOCTL_GETPHYSDEVINFOVALID;
357 mci->addr.phys.lun = 0;
358 mci->addr.phys.target = target;
359 mci->addr.phys.channel = bus;
855 } else {
856 mc->mc_length = mci->data_size = sizeof(struct mly_ioctl_getphysdevinfovalid);
857 mci->sub_ioctl = MDACIOCTL_GETPHYSDEVINFOVALID;
858 mci->addr.phys.lun = 0;
859 mci->addr.phys.target = target;
860 mci->addr.phys.channel = bus;
360 debug(2, "physical device %d:%d", mci->addr.phys.channel, mci->addr.phys.target);
861 debug(1, "physical device %d:%d", mci->addr.phys.channel, mci->addr.phys.target);
361 }
362
363 /*
862 }
863
864 /*
364 * Use the ready queue to get this command dispatched.
865 * Dispatch the command. If we successfully send the command, clear the rescan
866 * bit.
365 */
867 */
366 mly_enqueue_ready(mc);
367 mly_startio(sc);
868 if (mly_start(mc) != 0) {
869 mly_release_command(mc);
870 } else {
871 sc->mly_btl[bus][target].mb_flags &= ~MLY_BTL_RESCAN; /* success */
872 }
368}
369
370/********************************************************************************
371 * Handle the completion of a rescan operation
372 */
373static void
374mly_complete_rescan(struct mly_command *mc)
375{
376 struct mly_softc *sc = mc->mc_sc;
377 struct mly_ioctl_getlogdevinfovalid *ldi;
378 struct mly_ioctl_getphysdevinfovalid *pdi;
873}
874
875/********************************************************************************
876 * Handle the completion of a rescan operation
877 */
878static void
879mly_complete_rescan(struct mly_command *mc)
880{
881 struct mly_softc *sc = mc->mc_sc;
882 struct mly_ioctl_getlogdevinfovalid *ldi;
883 struct mly_ioctl_getphysdevinfovalid *pdi;
379 int bus, target;
884 struct mly_command_ioctl *mci;
885 struct mly_btl btl, *btlp;
886 int bus, target, rescan;
380
887
381 debug_called(2);
888 debug_called(1);
382
889
383 /* iff the command completed OK, we should use the result to update our data */
890 /*
891 * Recover the bus and target from the command. We need these even in
892 * the case where we don't have a useful response.
893 */
894 mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl;
895 if (mci->sub_ioctl == MDACIOCTL_GETLOGDEVINFOVALID) {
896 bus = MLY_LOGDEV_BUS(sc, mci->addr.log.logdev);
897 target = MLY_LOGDEV_TARGET(sc, mci->addr.log.logdev);
898 } else {
899 bus = mci->addr.phys.channel;
900 target = mci->addr.phys.target;
901 }
902 /* XXX validate bus/target? */
903
904 /* the default result is 'no device' */
905 bzero(&btl, sizeof(btl));
906
907 /* if the rescan completed OK, we have possibly-new BTL data */
384 if (mc->mc_status == 0) {
385 if (mc->mc_length == sizeof(*ldi)) {
386 ldi = (struct mly_ioctl_getlogdevinfovalid *)mc->mc_data;
908 if (mc->mc_status == 0) {
909 if (mc->mc_length == sizeof(*ldi)) {
910 ldi = (struct mly_ioctl_getlogdevinfovalid *)mc->mc_data;
387 bus = MLY_LOGDEV_BUS(sc, ldi->logical_device_number);
388 target = MLY_LOGDEV_TARGET(ldi->logical_device_number);
389 sc->mly_btl[bus][target].mb_flags = MLY_BTL_LOGICAL; /* clears all other flags */
390 sc->mly_btl[bus][target].mb_type = ldi->raid_level;
391 sc->mly_btl[bus][target].mb_state = ldi->state;
392 debug(2, "BTL rescan for %d returns %s, %s", ldi->logical_device_number,
911 if ((MLY_LOGDEV_BUS(sc, ldi->logical_device_number) != bus) ||
912 (MLY_LOGDEV_TARGET(sc, ldi->logical_device_number) != target)) {
913 mly_printf(sc, "WARNING: BTL rescan for %d:%d returned data for %d:%d instead\n",
914 bus, target, MLY_LOGDEV_BUS(sc, ldi->logical_device_number),
915 MLY_LOGDEV_TARGET(sc, ldi->logical_device_number));
916 /* XXX what can we do about this? */
917 }
918 btl.mb_flags = MLY_BTL_LOGICAL;
919 btl.mb_type = ldi->raid_level;
920 btl.mb_state = ldi->state;
921 debug(1, "BTL rescan for %d returns %s, %s", ldi->logical_device_number,
393 mly_describe_code(mly_table_device_type, ldi->raid_level),
394 mly_describe_code(mly_table_device_state, ldi->state));
395 } else if (mc->mc_length == sizeof(*pdi)) {
396 pdi = (struct mly_ioctl_getphysdevinfovalid *)mc->mc_data;
922 mly_describe_code(mly_table_device_type, ldi->raid_level),
923 mly_describe_code(mly_table_device_state, ldi->state));
924 } else if (mc->mc_length == sizeof(*pdi)) {
925 pdi = (struct mly_ioctl_getphysdevinfovalid *)mc->mc_data;
397 bus = pdi->channel;
398 target = pdi->target;
399 sc->mly_btl[bus][target].mb_flags = MLY_BTL_PHYSICAL; /* clears all other flags */
400 sc->mly_btl[bus][target].mb_type = MLY_DEVICE_TYPE_PHYSICAL;
401 sc->mly_btl[bus][target].mb_state = pdi->state;
402 sc->mly_btl[bus][target].mb_speed = pdi->speed;
403 sc->mly_btl[bus][target].mb_width = pdi->width;
926 if ((pdi->channel != bus) || (pdi->target != target)) {
927 mly_printf(sc, "WARNING: BTL rescan for %d:%d returned data for %d:%d instead\n",
928 bus, target, pdi->channel, pdi->target);
929 /* XXX what can we do about this? */
930 }
931 btl.mb_flags = MLY_BTL_PHYSICAL;
932 btl.mb_type = MLY_DEVICE_TYPE_PHYSICAL;
933 btl.mb_state = pdi->state;
934 btl.mb_speed = pdi->speed;
935 btl.mb_width = pdi->width;
404 if (pdi->state != MLY_DEVICE_STATE_UNCONFIGURED)
405 sc->mly_btl[bus][target].mb_flags |= MLY_BTL_PROTECTED;
936 if (pdi->state != MLY_DEVICE_STATE_UNCONFIGURED)
937 sc->mly_btl[bus][target].mb_flags |= MLY_BTL_PROTECTED;
406 debug(2, "BTL rescan for %d:%d returns %s", bus, target,
938 debug(1, "BTL rescan for %d:%d returns %s", bus, target,
407 mly_describe_code(mly_table_device_state, pdi->state));
408 } else {
939 mly_describe_code(mly_table_device_state, pdi->state));
940 } else {
409 mly_printf(sc, "BTL rescan result corrupted\n");
941 mly_printf(sc, "BTL rescan result invalid\n");
410 }
942 }
411 } else {
412 /*
413 * A request sent for a device beyond the last device present will fail.
414 * We don't care about this, so we do nothing about it.
415 */
416 }
943 }
944
417 free(mc->mc_data, M_DEVBUF);
418 mly_release_command(mc);
945 free(mc->mc_data, M_DEVBUF);
946 mly_release_command(mc);
947
948 /*
949 * Decide whether we need to rescan the device.
950 */
951 rescan = 0;
952
953 /* device type changes (usually between 'nothing' and 'something') */
954 btlp = &sc->mly_btl[bus][target];
955 if (btl.mb_flags != btlp->mb_flags) {
956 debug(1, "flags changed, rescanning");
957 rescan = 1;
958 }
959
960 /* XXX other reasons? */
961
962 /*
963 * Update BTL information.
964 */
965 *btlp = btl;
966
967 /*
968 * Perform CAM rescan if required.
969 */
970 if (rescan)
971 mly_cam_rescan_btl(sc, bus, target);
419}
420
421/********************************************************************************
422 * Get the current health status and set the 'next event' counter to suit.
423 */
424static int
425mly_get_eventstatus(struct mly_softc *sc)
426{

--- 173 unchanged lines hidden (view full) ---

600 if (error && (mc->mc_data != NULL) && (*data == NULL))
601 free(mc->mc_data, M_DEVBUF);
602 mly_release_command(mc);
603 }
604 return(error);
605}
606
607/********************************************************************************
972}
973
974/********************************************************************************
975 * Get the current health status and set the 'next event' counter to suit.
976 */
977static int
978mly_get_eventstatus(struct mly_softc *sc)
979{

--- 173 unchanged lines hidden (view full) ---

1153 if (error && (mc->mc_data != NULL) && (*data == NULL))
1154 free(mc->mc_data, M_DEVBUF);
1155 mly_release_command(mc);
1156 }
1157 return(error);
1158}
1159
1160/********************************************************************************
1161 * Check for event(s) outstanding in the controller.
1162 */
1163static void
1164mly_check_event(struct mly_softc *sc)
1165{
1166
1167 /*
1168 * The controller may have updated the health status information,
1169 * so check for it here. Note that the counters are all in host memory,
1170 * so this check is very cheap. Also note that we depend on checking on
1171 * completion
1172 */
1173 if (sc->mly_mmbox->mmm_health.status.change_counter != sc->mly_event_change) {
1174 sc->mly_event_change = sc->mly_mmbox->mmm_health.status.change_counter;
1175 debug(1, "event change %d, event status update, %d -> %d", sc->mly_event_change,
1176 sc->mly_event_waiting, sc->mly_mmbox->mmm_health.status.next_event);
1177 sc->mly_event_waiting = sc->mly_mmbox->mmm_health.status.next_event;
1178
1179 /* wake up anyone that might be interested in this */
1180 wakeup(&sc->mly_event_change);
1181 }
1182 if (sc->mly_event_counter != sc->mly_event_waiting)
1183 mly_fetch_event(sc);
1184}
1185
1186/********************************************************************************
608 * Fetch one event from the controller.
1187 * Fetch one event from the controller.
1188 *
1189 * If we fail due to resource starvation, we'll be retried the next time a
1190 * command completes.
609 */
610static void
611mly_fetch_event(struct mly_softc *sc)
612{
613 struct mly_command *mc;
614 struct mly_command_ioctl *mci;
615 int s;
616 u_int32_t event;
617
1191 */
1192static void
1193mly_fetch_event(struct mly_softc *sc)
1194{
1195 struct mly_command *mc;
1196 struct mly_command_ioctl *mci;
1197 int s;
1198 u_int32_t event;
1199
618 debug_called(2);
1200 debug_called(1);
619
620 /* get a command */
1201
1202 /* get a command */
621 mc = NULL;
622 if (mly_alloc_command(sc, &mc))
1203 if (mly_alloc_command(sc, &mc))
623 return; /* we'll get retried the next time a command completes */
1204 return;
624
625 /* set up the data buffer */
626 if ((mc->mc_data = malloc(sizeof(struct mly_event), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) {
627 mly_release_command(mc);
1205
1206 /* set up the data buffer */
1207 if ((mc->mc_data = malloc(sizeof(struct mly_event), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) {
1208 mly_release_command(mc);
628 return; /* we'll get retried the next time a command completes */
1209 return;
629 }
630 mc->mc_length = sizeof(struct mly_event);
631 mc->mc_flags |= MLY_CMD_DATAIN;
632 mc->mc_complete = mly_complete_event;
633
634 /*
635 * Get an event number to fetch. It's possible that we've raced with another
636 * context for the last event, in which case there will be no more events.

--- 20 unchanged lines hidden (view full) ---

657 mci->addr.phys.target = (event >> 24) & 0xff;
658 mci->addr.phys.channel = 0;
659 mci->addr.phys.controller = 0;
660 mci->timeout.value = 30;
661 mci->timeout.scale = MLY_TIMEOUT_SECONDS;
662 mci->sub_ioctl = MDACIOCTL_GETEVENT;
663 mci->param.getevent.sequence_number_low = event & 0xffff;
664
1210 }
1211 mc->mc_length = sizeof(struct mly_event);
1212 mc->mc_flags |= MLY_CMD_DATAIN;
1213 mc->mc_complete = mly_complete_event;
1214
1215 /*
1216 * Get an event number to fetch. It's possible that we've raced with another
1217 * context for the last event, in which case there will be no more events.

--- 20 unchanged lines hidden (view full) ---

1238 mci->addr.phys.target = (event >> 24) & 0xff;
1239 mci->addr.phys.channel = 0;
1240 mci->addr.phys.controller = 0;
1241 mci->timeout.value = 30;
1242 mci->timeout.scale = MLY_TIMEOUT_SECONDS;
1243 mci->sub_ioctl = MDACIOCTL_GETEVENT;
1244 mci->param.getevent.sequence_number_low = event & 0xffff;
1245
665 debug(2, "fetch event %u", event);
1246 debug(1, "fetch event %u", event);
666
667 /*
1247
1248 /*
668 * Use the ready queue to get this command dispatched.
1249 * Submit the command.
1250 *
1251 * Note that failure of mly_start() will result in this event never being
1252 * fetched.
669 */
1253 */
670 mly_enqueue_ready(mc);
671 mly_startio(sc);
1254 if (mly_start(mc) != 0) {
1255 mly_printf(sc, "couldn't fetch event %u\n", event);
1256 mly_release_command(mc);
1257 }
672}
673
674/********************************************************************************
675 * Handle the completion of an event poll.
1258}
1259
1260/********************************************************************************
1261 * Handle the completion of an event poll.
676 *
677 * Note that we don't actually have to instigate another poll; the completion of
678 * this command will trigger that if there are any more events to poll for.
679 */
680static void
681mly_complete_event(struct mly_command *mc)
682{
683 struct mly_softc *sc = mc->mc_sc;
684 struct mly_event *me = (struct mly_event *)mc->mc_data;
685
1262 */
1263static void
1264mly_complete_event(struct mly_command *mc)
1265{
1266 struct mly_softc *sc = mc->mc_sc;
1267 struct mly_event *me = (struct mly_event *)mc->mc_data;
1268
686 debug_called(2);
1269 debug_called(1);
687
688 /*
689 * If the event was successfully fetched, process it.
690 */
691 if (mc->mc_status == SCSI_STATUS_OK) {
692 mly_process_event(sc, me);
693 free(me, M_DEVBUF);
694 }
695 mly_release_command(mc);
1270
1271 /*
1272 * If the event was successfully fetched, process it.
1273 */
1274 if (mc->mc_status == SCSI_STATUS_OK) {
1275 mly_process_event(sc, me);
1276 free(me, M_DEVBUF);
1277 }
1278 mly_release_command(mc);
1279
1280 /*
1281 * Check for another event.
1282 */
1283 mly_check_event(sc);
696}
697
698/********************************************************************************
699 * Process a controller event.
700 */
701static void
702mly_process_event(struct mly_softc *sc, struct mly_event *me)
703{

--- 14 unchanged lines hidden (view full) ---

718 event = ((int)(ssd->add_sense_code & ~0x80) << 8) + ssd->add_sense_code_qual;
719 } else {
720 event = me->code;
721 }
722
723 /* look up event, get codes */
724 fp = mly_describe_code(mly_table_event, event);
725
1284}
1285
1286/********************************************************************************
1287 * Process a controller event.
1288 */
1289static void
1290mly_process_event(struct mly_softc *sc, struct mly_event *me)
1291{

--- 14 unchanged lines hidden (view full) ---

1306 event = ((int)(ssd->add_sense_code & ~0x80) << 8) + ssd->add_sense_code_qual;
1307 } else {
1308 event = me->code;
1309 }
1310
1311 /* look up event, get codes */
1312 fp = mly_describe_code(mly_table_event, event);
1313
726 debug(2, "Event %d code 0x%x", me->sequence_number, me->code);
1314 debug(1, "Event %d code 0x%x", me->sequence_number, me->code);
727
728 /* quiet event? */
729 class = fp[0];
730 if (isupper(class) && bootverbose)
731 class = tolower(class);
732
733 /* get action code, text string */
734 action = fp[1];

--- 9 unchanged lines hidden (view full) ---

744 case 'p': /* error on physical device */
745 mly_printf(sc, "physical device %d:%d %s\n", me->channel, me->target, tp);
746 if (action == 'r')
747 sc->mly_btl[me->channel][me->target].mb_flags |= MLY_BTL_RESCAN;
748 break;
749 case 'l': /* error on logical unit */
750 case 'm': /* message about logical unit */
751 bus = MLY_LOGDEV_BUS(sc, me->lun);
1315
1316 /* quiet event? */
1317 class = fp[0];
1318 if (isupper(class) && bootverbose)
1319 class = tolower(class);
1320
1321 /* get action code, text string */
1322 action = fp[1];

--- 9 unchanged lines hidden (view full) ---

1332 case 'p': /* error on physical device */
1333 mly_printf(sc, "physical device %d:%d %s\n", me->channel, me->target, tp);
1334 if (action == 'r')
1335 sc->mly_btl[me->channel][me->target].mb_flags |= MLY_BTL_RESCAN;
1336 break;
1337 case 'l': /* error on logical unit */
1338 case 'm': /* message about logical unit */
1339 bus = MLY_LOGDEV_BUS(sc, me->lun);
752 target = MLY_LOGDEV_TARGET(me->lun);
1340 target = MLY_LOGDEV_TARGET(sc, me->lun);
753 mly_name_device(sc, bus, target);
754 mly_printf(sc, "logical device %d (%s) %s\n", me->lun, sc->mly_btl[bus][target].mb_name, tp);
755 if (action == 'r')
756 sc->mly_btl[bus][target].mb_flags |= MLY_BTL_RESCAN;
757 break;
758 break;
759 case 's': /* report of sense data */
760 if (((ssd->flags & SSD_KEY) == SSD_KEY_NO_SENSE) ||

--- 25 unchanged lines hidden (view full) ---

786
787/********************************************************************************
788 * Perform periodic activities.
789 */
790static void
791mly_periodic(void *data)
792{
793 struct mly_softc *sc = (struct mly_softc *)data;
1341 mly_name_device(sc, bus, target);
1342 mly_printf(sc, "logical device %d (%s) %s\n", me->lun, sc->mly_btl[bus][target].mb_name, tp);
1343 if (action == 'r')
1344 sc->mly_btl[bus][target].mb_flags |= MLY_BTL_RESCAN;
1345 break;
1346 break;
1347 case 's': /* report of sense data */
1348 if (((ssd->flags & SSD_KEY) == SSD_KEY_NO_SENSE) ||

--- 25 unchanged lines hidden (view full) ---

1374
1375/********************************************************************************
1376 * Perform periodic activities.
1377 */
1378static void
1379mly_periodic(void *data)
1380{
1381 struct mly_softc *sc = (struct mly_softc *)data;
794 int nchn, bus, target;
1382 int bus, target;
795
796 debug_called(2);
797
798 /*
799 * Scan devices.
800 */
1383
1384 debug_called(2);
1385
1386 /*
1387 * Scan devices.
1388 */
801 nchn = sc->mly_controllerinfo->physical_channels_present +
802 sc->mly_controllerinfo->virtual_channels_present;
803 for (bus = 0; bus < nchn; bus++) {
804 for (target = 0; target < MLY_MAX_TARGETS; target++) {
1389 for (bus = 0; bus < sc->mly_cam_channels; bus++) {
1390 if (MLY_BUS_IS_VALID(sc, bus)) {
1391 for (target = 0; target < MLY_MAX_TARGETS; target++) {
805
1392
806 /* ignore the controller in this scan */
807 if (target == sc->mly_controllerparam->initiator_id)
808 continue;
1393 /* ignore the controller in this scan */
1394 if (target == sc->mly_controllerparam->initiator_id)
1395 continue;
809
1396
810 /* perform device rescan? */
811 if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_RESCAN)
812 mly_rescan_btl(sc, bus, target);
1397 /* perform device rescan? */
1398 if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_RESCAN)
1399 mly_rescan_btl(sc, bus, target);
1400 }
813 }
814 }
1401 }
1402 }
1403
1404 /* check for controller events */
1405 mly_check_event(sc);
815
1406
816 sc->mly_periodic = timeout(mly_periodic, sc, hz);
1407 /* reschedule ourselves */
1408 sc->mly_periodic = timeout(mly_periodic, sc, MLY_PERIODIC_INTERVAL * hz);
817}
818
819/********************************************************************************
820 ********************************************************************************
821 Command Processing
822 ********************************************************************************
823 ********************************************************************************/
824
825/********************************************************************************
826 * Run a command and wait for it to complete.
827 *
828 */
829static int
830mly_immediate_command(struct mly_command *mc)
831{
832 struct mly_softc *sc = mc->mc_sc;
833 int error, s;
834
1409}
1410
1411/********************************************************************************
1412 ********************************************************************************
1413 Command Processing
1414 ********************************************************************************
1415 ********************************************************************************/
1416
1417/********************************************************************************
1418 * Run a command and wait for it to complete.
1419 *
1420 */
1421static int
1422mly_immediate_command(struct mly_command *mc)
1423{
1424 struct mly_softc *sc = mc->mc_sc;
1425 int error, s;
1426
835 debug_called(2);
1427 debug_called(1);
836
837 /* spinning at splcam is ugly, but we're only used during controller init */
838 s = splcam();
1428
1429 /* spinning at splcam is ugly, but we're only used during controller init */
1430 s = splcam();
839 if ((error = mly_start(mc)))
1431 if ((error = mly_start(mc))) {
1432 splx(s);
840 return(error);
1433 return(error);
1434 }
841
842 if (sc->mly_state & MLY_STATE_INTERRUPTS_ON) {
843 /* sleep on the command */
844 while(!(mc->mc_flags & MLY_CMD_COMPLETE)) {
845 tsleep(mc, PRIBIO, "mlywait", 0);
846 }
847 } else {
848 /* spin and collect status while we do */
849 while(!(mc->mc_flags & MLY_CMD_COMPLETE)) {
850 mly_done(mc->mc_sc);
851 }
852 }
853 splx(s);
854 return(0);
855}
856
857/********************************************************************************
1435
1436 if (sc->mly_state & MLY_STATE_INTERRUPTS_ON) {
1437 /* sleep on the command */
1438 while(!(mc->mc_flags & MLY_CMD_COMPLETE)) {
1439 tsleep(mc, PRIBIO, "mlywait", 0);
1440 }
1441 } else {
1442 /* spin and collect status while we do */
1443 while(!(mc->mc_flags & MLY_CMD_COMPLETE)) {
1444 mly_done(mc->mc_sc);
1445 }
1446 }
1447 splx(s);
1448 return(0);
1449}
1450
1451/********************************************************************************
858 * Start as much queued I/O as possible on the controller
1452 * Deliver a command to the controller.
1453 *
1454 * XXX it would be good to just queue commands that we can't submit immediately
1455 * and send them later, but we probably want a wrapper for that so that
1456 * we don't hang on a failed submission for an immediate command.
859 */
1457 */
860void
861mly_startio(struct mly_softc *sc)
862{
863 struct mly_command *mc;
864
865 debug_called(2);
866
867 for (;;) {
868
869 /* try for a ready command */
870 mc = mly_dequeue_ready(sc);
871
872 /* try to build a command from a queued ccb */
873 if (!mc)
874 mly_cam_command(sc, &mc);
875
876 /* no command == nothing to do */
877 if (!mc)
878 break;
879
880 /* try to post the command */
881 if (mly_start(mc)) {
882 /* controller busy, or no resources - defer for later */
883 mly_requeue_ready(mc);
884 break;
885 }
886 }
887}
888
889/********************************************************************************
890 * Deliver a command to the controller; allocate controller resources at the
891 * last moment.
892 */
893static int
894mly_start(struct mly_command *mc)
895{
896 struct mly_softc *sc = mc->mc_sc;
897 union mly_command_packet *pkt;
898 int s;
899
900 debug_called(2);

--- 169 unchanged lines hidden (view full) ---

1070 * Call completion handler or wake up sleeping consumer.
1071 */
1072 if (mc_complete != NULL) {
1073 mc_complete(mc);
1074 } else {
1075 wakeup(mc);
1076 }
1077 }
1458static int
1459mly_start(struct mly_command *mc)
1460{
1461 struct mly_softc *sc = mc->mc_sc;
1462 union mly_command_packet *pkt;
1463 int s;
1464
1465 debug_called(2);

--- 169 unchanged lines hidden (view full) ---

1635 * Call completion handler or wake up sleeping consumer.
1636 */
1637 if (mc_complete != NULL) {
1638 mc_complete(mc);
1639 } else {
1640 wakeup(mc);
1641 }
1642 }
1078
1643
1079 /*
1644 /*
1080 * We may have freed up controller resources which would allow us
1081 * to push more commands onto the controller, so we check here.
1645 * XXX if we are deferring commands due to controller-busy status, we should
1646 * retry submitting them here.
1082 */
1647 */
1083 mly_startio(sc);
1084
1085 /*
1086 * The controller may have updated the health status information,
1087 * so check for it here.
1088 *
1089 * Note that we only check for health status after a completed command. It
1090 * might be wise to ping the controller occasionally if it's been idle for
1091 * a while just to check up on it. While a filesystem is mounted, or I/O is
1092 * active this isn't really an issue.
1093 */
1094 if (sc->mly_mmbox->mmm_health.status.change_counter != sc->mly_event_change) {
1095 sc->mly_event_change = sc->mly_mmbox->mmm_health.status.change_counter;
1096 debug(1, "event change %d, event status update, %d -> %d", sc->mly_event_change,
1097 sc->mly_event_waiting, sc->mly_mmbox->mmm_health.status.next_event);
1098 sc->mly_event_waiting = sc->mly_mmbox->mmm_health.status.next_event;
1099
1100 /* wake up anyone that might be interested in this */
1101 wakeup(&sc->mly_event_change);
1102 }
1103 if (sc->mly_event_counter != sc->mly_event_waiting)
1104 mly_fetch_event(sc);
1105}
1106
1107/********************************************************************************
1108 ********************************************************************************
1109 Command Buffer Management
1110 ********************************************************************************
1111 ********************************************************************************/
1112

--- 42 unchanged lines hidden (view full) ---

1155}
1156
1157/********************************************************************************
1158 * Map helper for command allocation.
1159 */
1160static void
1161mly_alloc_commands_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1162{
1648}
1649
1650/********************************************************************************
1651 ********************************************************************************
1652 Command Buffer Management
1653 ********************************************************************************
1654 ********************************************************************************/
1655

--- 42 unchanged lines hidden (view full) ---

1698}
1699
1700/********************************************************************************
1701 * Map helper for command allocation.
1702 */
1703static void
1704mly_alloc_commands_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1705{
1163 struct mly_softc *sc = (struct mly_softc *)arg
1706 struct mly_softc *sc = (struct mly_softc *)arg;
1164
1707
1165 debug_called(2);
1708 debug_called(1);
1166
1167 sc->mly_packetphys = segs[0].ds_addr;
1168}
1169
1170/********************************************************************************
1171 * Allocate and initialise command and packet structures.
1709
1710 sc->mly_packetphys = segs[0].ds_addr;
1711}
1712
1713/********************************************************************************
1714 * Allocate and initialise command and packet structures.
1715 *
1716 * If the controller supports fewer than MLY_MAX_COMMANDS commands, limit our
1717 * allocation to that number. If we don't yet know how many commands the
1718 * controller supports, allocate a very small set (suitable for initialisation
1719 * purposes only).
1172 */
1173static int
1174mly_alloc_commands(struct mly_softc *sc)
1175{
1176 struct mly_command *mc;
1720 */
1721static int
1722mly_alloc_commands(struct mly_softc *sc)
1723{
1724 struct mly_command *mc;
1177 int i;
1725 int i, ncmd;
1178
1726
1727 if (sc->mly_controllerinfo == NULL) {
1728 ncmd = 4;
1729 } else {
1730 ncmd = min(MLY_MAX_COMMANDS, sc->mly_controllerinfo->maximum_parallel_commands);
1731 }
1732
1179 /*
1180 * Allocate enough space for all the command packets in one chunk and
1181 * map them permanently into controller-visible space.
1182 */
1183 if (bus_dmamem_alloc(sc->mly_packet_dmat, (void **)&sc->mly_packet,
1184 BUS_DMA_NOWAIT, &sc->mly_packetmap)) {
1185 return(ENOMEM);
1186 }
1187 bus_dmamap_load(sc->mly_packet_dmat, sc->mly_packetmap, sc->mly_packet,
1733 /*
1734 * Allocate enough space for all the command packets in one chunk and
1735 * map them permanently into controller-visible space.
1736 */
1737 if (bus_dmamem_alloc(sc->mly_packet_dmat, (void **)&sc->mly_packet,
1738 BUS_DMA_NOWAIT, &sc->mly_packetmap)) {
1739 return(ENOMEM);
1740 }
1741 bus_dmamap_load(sc->mly_packet_dmat, sc->mly_packetmap, sc->mly_packet,
1188 MLY_MAXCOMMANDS * sizeof(union mly_command_packet),
1742 ncmd * sizeof(union mly_command_packet),
1189 mly_alloc_commands_map, sc, 0);
1190
1743 mly_alloc_commands_map, sc, 0);
1744
1191 for (i = 0; i < MLY_MAXCOMMANDS; i++) {
1745 for (i = 0; i < ncmd; i++) {
1192 mc = &sc->mly_command[i];
1193 bzero(mc, sizeof(*mc));
1194 mc->mc_sc = sc;
1195 mc->mc_slot = MLY_SLOT_START + i;
1196 mc->mc_packet = sc->mly_packet + i;
1197 mc->mc_packetphys = sc->mly_packetphys + (i * sizeof(union mly_command_packet));
1198 if (!bus_dmamap_create(sc->mly_buffer_dmat, 0, &mc->mc_datamap))
1199 mly_release_command(mc);
1200 }
1201 return(0);
1202}
1203
1204/********************************************************************************
1746 mc = &sc->mly_command[i];
1747 bzero(mc, sizeof(*mc));
1748 mc->mc_sc = sc;
1749 mc->mc_slot = MLY_SLOT_START + i;
1750 mc->mc_packet = sc->mly_packet + i;
1751 mc->mc_packetphys = sc->mly_packetphys + (i * sizeof(union mly_command_packet));
1752 if (!bus_dmamap_create(sc->mly_buffer_dmat, 0, &mc->mc_datamap))
1753 mly_release_command(mc);
1754 }
1755 return(0);
1756}
1757
1758/********************************************************************************
1759 * Free all the storage held by commands.
1760 *
1761 * Must be called with all commands on the free list.
1762 */
1763static void
1764mly_release_commands(struct mly_softc *sc)
1765{
1766 struct mly_command *mc;
1767
1768 /* throw away command buffer DMA maps */
1769 while (mly_alloc_command(sc, &mc) == 0)
1770 bus_dmamap_destroy(sc->mly_buffer_dmat, mc->mc_datamap);
1771
1772 /* release the packet storage */
1773 if (sc->mly_packet != NULL) {
1774 bus_dmamap_unload(sc->mly_packet_dmat, sc->mly_packetmap);
1775 bus_dmamem_free(sc->mly_packet_dmat, sc->mly_packet, sc->mly_packetmap);
1776 sc->mly_packet = NULL;
1777 }
1778}
1779
1780
1781/********************************************************************************
1205 * Command-mapping helper function - populate this command's s/g table
1206 * with the s/g entries for its data.
1207 */
1208static void
1209mly_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1210{
1211 struct mly_command *mc = (struct mly_command *)arg;
1212 struct mly_softc *sc = mc->mc_sc;
1213 struct mly_command_generic *gen = &(mc->mc_packet->generic);
1214 struct mly_sg_entry *sg;
1215 int i, tabofs;
1216
1782 * Command-mapping helper function - populate this command's s/g table
1783 * with the s/g entries for its data.
1784 */
1785static void
1786mly_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1787{
1788 struct mly_command *mc = (struct mly_command *)arg;
1789 struct mly_softc *sc = mc->mc_sc;
1790 struct mly_command_generic *gen = &(mc->mc_packet->generic);
1791 struct mly_sg_entry *sg;
1792 int i, tabofs;
1793
1217 debug_called(3);
1794 debug_called(2);
1218
1219 /* can we use the transfer structure directly? */
1220 if (nseg <= 2) {
1221 sg = &gen->transfer.direct.sg[0];
1222 gen->command_control.extended_sg_table = 0;
1223 } else {
1795
1796 /* can we use the transfer structure directly? */
1797 if (nseg <= 2) {
1798 sg = &gen->transfer.direct.sg[0];
1799 gen->command_control.extended_sg_table = 0;
1800 } else {
1224 tabofs = ((mc->mc_slot - MLY_SLOT_START) * MLY_MAXSGENTRIES);
1801 tabofs = ((mc->mc_slot - MLY_SLOT_START) * MLY_MAX_SGENTRIES);
1225 sg = sc->mly_sg_table + tabofs;
1226 gen->transfer.indirect.entries[0] = nseg;
1227 gen->transfer.indirect.table_physaddr[0] = sc->mly_sg_busaddr + (tabofs * sizeof(struct mly_sg_entry));
1228 gen->command_control.extended_sg_table = 1;
1229 }
1230
1231 /* copy the s/g table */
1232 for (i = 0; i < nseg; i++) {

--- 9 unchanged lines hidden (view full) ---

1242 *
1243 * We don't support 'large' SCSI commands at this time, so this is unused.
1244 */
1245static void
1246mly_map_command_cdb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1247{
1248 struct mly_command *mc = (struct mly_command *)arg;
1249
1802 sg = sc->mly_sg_table + tabofs;
1803 gen->transfer.indirect.entries[0] = nseg;
1804 gen->transfer.indirect.table_physaddr[0] = sc->mly_sg_busaddr + (tabofs * sizeof(struct mly_sg_entry));
1805 gen->command_control.extended_sg_table = 1;
1806 }
1807
1808 /* copy the s/g table */
1809 for (i = 0; i < nseg; i++) {

--- 9 unchanged lines hidden (view full) ---

1819 *
1820 * We don't support 'large' SCSI commands at this time, so this is unused.
1821 */
1822static void
1823mly_map_command_cdb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1824{
1825 struct mly_command *mc = (struct mly_command *)arg;
1826
1250 debug_called(3);
1827 debug_called(2);
1251
1252 /* XXX can we safely assume that a CDB will never cross a page boundary? */
1253 if ((segs[0].ds_addr % PAGE_SIZE) >
1254 ((segs[0].ds_addr + mc->mc_packet->scsi_large.cdb_length) % PAGE_SIZE))
1255 panic("cdb crosses page boundary");
1256
1257 /* fix up fields in the command packet */
1258 mc->mc_packet->scsi_large.cdb_physaddr = segs[0].ds_addr;

--- 10 unchanged lines hidden (view full) ---

1269
1270 debug_called(2);
1271
1272 /* don't map more than once */
1273 if (mc->mc_flags & MLY_CMD_MAPPED)
1274 return;
1275
1276 /* does the command have a data buffer? */
1828
1829 /* XXX can we safely assume that a CDB will never cross a page boundary? */
1830 if ((segs[0].ds_addr % PAGE_SIZE) >
1831 ((segs[0].ds_addr + mc->mc_packet->scsi_large.cdb_length) % PAGE_SIZE))
1832 panic("cdb crosses page boundary");
1833
1834 /* fix up fields in the command packet */
1835 mc->mc_packet->scsi_large.cdb_physaddr = segs[0].ds_addr;

--- 10 unchanged lines hidden (view full) ---

1846
1847 debug_called(2);
1848
1849 /* don't map more than once */
1850 if (mc->mc_flags & MLY_CMD_MAPPED)
1851 return;
1852
1853 /* does the command have a data buffer? */
1277 if (mc->mc_data != NULL)
1854 if (mc->mc_data != NULL) {
1278 bus_dmamap_load(sc->mly_buffer_dmat, mc->mc_datamap, mc->mc_data, mc->mc_length,
1279 mly_map_command_sg, mc, 0);
1280
1855 bus_dmamap_load(sc->mly_buffer_dmat, mc->mc_datamap, mc->mc_data, mc->mc_length,
1856 mly_map_command_sg, mc, 0);
1857
1281 if (mc->mc_flags & MLY_CMD_DATAIN)
1282 bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREREAD);
1283 if (mc->mc_flags & MLY_CMD_DATAOUT)
1284 bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREWRITE);
1285
1858 if (mc->mc_flags & MLY_CMD_DATAIN)
1859 bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREREAD);
1860 if (mc->mc_flags & MLY_CMD_DATAOUT)
1861 bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREWRITE);
1862 }
1286 mc->mc_flags |= MLY_CMD_MAPPED;
1287}
1288
1289/********************************************************************************
1290 * Unmap a command from controller-visible space
1291 */
1292static void
1293mly_unmap_command(struct mly_command *mc)
1294{
1295 struct mly_softc *sc = mc->mc_sc;
1296
1297 debug_called(2);
1298
1299 if (!(mc->mc_flags & MLY_CMD_MAPPED))
1300 return;
1301
1863 mc->mc_flags |= MLY_CMD_MAPPED;
1864}
1865
1866/********************************************************************************
1867 * Unmap a command from controller-visible space
1868 */
1869static void
1870mly_unmap_command(struct mly_command *mc)
1871{
1872 struct mly_softc *sc = mc->mc_sc;
1873
1874 debug_called(2);
1875
1876 if (!(mc->mc_flags & MLY_CMD_MAPPED))
1877 return;
1878
1302 if (mc->mc_flags & MLY_CMD_DATAIN)
1303 bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_POSTREAD);
1304 if (mc->mc_flags & MLY_CMD_DATAOUT)
1305 bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_POSTWRITE);
1306
1307 /* does the command have a data buffer? */
1879 /* does the command have a data buffer? */
1308 if (mc->mc_data != NULL)
1309 bus_dmamap_unload(sc->mly_buffer_dmat, mc->mc_datamap);
1880 if (mc->mc_data != NULL) {
1881 if (mc->mc_flags & MLY_CMD_DATAIN)
1882 bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_POSTREAD);
1883 if (mc->mc_flags & MLY_CMD_DATAOUT)
1884 bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_POSTWRITE);
1310
1885
1886 bus_dmamap_unload(sc->mly_buffer_dmat, mc->mc_datamap);
1887 }
1311 mc->mc_flags &= ~MLY_CMD_MAPPED;
1312}
1313
1888 mc->mc_flags &= ~MLY_CMD_MAPPED;
1889}
1890
1891
1314/********************************************************************************
1315 ********************************************************************************
1892/********************************************************************************
1893 ********************************************************************************
1894 CAM interface
1895 ********************************************************************************
1896 ********************************************************************************/
1897
1898/********************************************************************************
1899 * Attach the physical and virtual SCSI busses to CAM.
1900 *
1901 * Physical bus numbering starts from 0, virtual bus numbering from one greater
1902 * than the highest physical bus. Physical busses are only registered if
1903 * the kernel environment variable "hw.mly.register_physical_channels" is set.
1904 *
1905 * When we refer to a "bus", we are referring to the bus number registered with
1906 * the SIM, wheras a "channel" is a channel number given to the adapter. In order
1907 * to keep things simple, we map these 1:1, so "bus" and "channel" may be used
1908 * interchangeably.
1909 */
1910int
1911mly_cam_attach(struct mly_softc *sc)
1912{
1913 struct cam_devq *devq;
1914 int chn, i;
1915
1916 debug_called(1);
1917
1918 /*
1919 * Allocate a devq for all our channels combined.
1920 */
1921 if ((devq = cam_simq_alloc(sc->mly_controllerinfo->maximum_parallel_commands)) == NULL) {
1922 mly_printf(sc, "can't allocate CAM SIM queue\n");
1923 return(ENOMEM);
1924 }
1925
1926 /*
1927 * If physical channel registration has been requested, register these first.
1928 * Note that we enable tagged command queueing for physical channels.
1929 */
1930 if (getenv("hw.mly.register_physical_channels") != NULL) {
1931 chn = 0;
1932 for (i = 0; i < sc->mly_controllerinfo->physical_channels_present; i++, chn++) {
1933
1934 if ((sc->mly_cam_sim[chn] = cam_sim_alloc(mly_cam_action, mly_cam_poll, "mly", sc,
1935 device_get_unit(sc->mly_dev),
1936 sc->mly_controllerinfo->maximum_parallel_commands,
1937 1, devq)) == NULL) {
1938 return(ENOMEM);
1939 }
1940 if (xpt_bus_register(sc->mly_cam_sim[chn], chn)) {
1941 mly_printf(sc, "CAM XPT phsyical channel registration failed\n");
1942 return(ENXIO);
1943 }
1944 debug(1, "registered physical channel %d", chn);
1945 }
1946 }
1947
1948 /*
1949 * Register our virtual channels, with bus numbers matching channel numbers.
1950 */
1951 chn = sc->mly_controllerinfo->physical_channels_present;
1952 for (i = 0; i < sc->mly_controllerinfo->virtual_channels_present; i++, chn++) {
1953 if ((sc->mly_cam_sim[chn] = cam_sim_alloc(mly_cam_action, mly_cam_poll, "mly", sc,
1954 device_get_unit(sc->mly_dev),
1955 sc->mly_controllerinfo->maximum_parallel_commands,
1956 0, devq)) == NULL) {
1957 return(ENOMEM);
1958 }
1959 if (xpt_bus_register(sc->mly_cam_sim[chn], chn)) {
1960 mly_printf(sc, "CAM XPT virtual channel registration failed\n");
1961 return(ENXIO);
1962 }
1963 debug(1, "registered virtual channel %d", chn);
1964 }
1965
1966 /*
1967 * This is the total number of channels that (might have been) registered with
1968 * CAM. Some may not have been; check the mly_cam_sim array to be certain.
1969 */
1970 sc->mly_cam_channels = sc->mly_controllerinfo->physical_channels_present +
1971 sc->mly_controllerinfo->virtual_channels_present;
1972
1973 return(0);
1974}
1975
1976/********************************************************************************
1977 * Detach from CAM
1978 */
1979void
1980mly_cam_detach(struct mly_softc *sc)
1981{
1982 int i;
1983
1984 debug_called(1);
1985
1986 for (i = 0; i < sc->mly_cam_channels; i++) {
1987 if (sc->mly_cam_sim[i] != NULL) {
1988 xpt_bus_deregister(cam_sim_path(sc->mly_cam_sim[i]));
1989 cam_sim_free(sc->mly_cam_sim[i], 0);
1990 }
1991 }
1992 if (sc->mly_cam_devq != NULL)
1993 cam_simq_free(sc->mly_cam_devq);
1994}
1995
1996/************************************************************************
1997 * Rescan a device.
1998 */
1999static void
2000mly_cam_rescan_btl(struct mly_softc *sc, int bus, int target)
2001{
2002 union ccb *ccb;
2003
2004 debug_called(1);
2005
2006 if ((ccb = malloc(sizeof(union ccb), M_TEMP, M_WAITOK | M_ZERO)) == NULL) {
2007 mly_printf(sc, "rescan failed (can't allocate CCB)\n");
2008 return;
2009 }
2010
2011 if (xpt_create_path(&sc->mly_cam_path, xpt_periph,
2012 cam_sim_path(sc->mly_cam_sim[bus]), target, 0) != CAM_REQ_CMP) {
2013 mly_printf(sc, "rescan failed (can't create path)\n");
2014 return;
2015 }
2016 xpt_setup_ccb(&ccb->ccb_h, sc->mly_cam_path, 5/*priority (low)*/);
2017 ccb->ccb_h.func_code = XPT_SCAN_LUN;
2018 ccb->ccb_h.cbfcnp = mly_cam_rescan_callback;
2019 ccb->crcn.flags = CAM_FLAG_NONE;
2020 debug(1, "rescan target %d:%d", bus, target);
2021 xpt_action(ccb);
2022}
2023
2024static void
2025mly_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
2026{
2027 free(ccb, M_TEMP);
2028}
2029
2030/********************************************************************************
2031 * Handle an action requested by CAM
2032 */
2033static void
2034mly_cam_action(struct cam_sim *sim, union ccb *ccb)
2035{
2036 struct mly_softc *sc = cam_sim_softc(sim);
2037
2038 debug_called(2);
2039
2040 switch (ccb->ccb_h.func_code) {
2041
2042 /* perform SCSI I/O */
2043 case XPT_SCSI_IO:
2044 if (!mly_cam_action_io(sim, (struct ccb_scsiio *)&ccb->csio))
2045 return;
2046 break;
2047
2048 /* perform geometry calculations */
2049 case XPT_CALC_GEOMETRY:
2050 {
2051 struct ccb_calc_geometry *ccg = &ccb->ccg;
2052 u_int32_t secs_per_cylinder;
2053
2054 debug(2, "XPT_CALC_GEOMETRY %d:%d:%d", cam_sim_bus(sim), ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
2055
2056 if (sc->mly_controllerparam->bios_geometry == MLY_BIOSGEOM_8G) {
2057 ccg->heads = 255;
2058 ccg->secs_per_track = 63;
2059 } else { /* MLY_BIOSGEOM_2G */
2060 ccg->heads = 128;
2061 ccg->secs_per_track = 32;
2062 }
2063 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2064 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2065 ccb->ccb_h.status = CAM_REQ_CMP;
2066 break;
2067 }
2068
2069 /* handle path attribute inquiry */
2070 case XPT_PATH_INQ:
2071 {
2072 struct ccb_pathinq *cpi = &ccb->cpi;
2073
2074 debug(2, "XPT_PATH_INQ %d:%d:%d", cam_sim_bus(sim), ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
2075
2076 cpi->version_num = 1;
2077 cpi->hba_inquiry = PI_TAG_ABLE; /* XXX extra flags for physical channels? */
2078 cpi->target_sprt = 0;
2079 cpi->hba_misc = 0;
2080 cpi->max_target = MLY_MAX_TARGETS - 1;
2081 cpi->max_lun = MLY_MAX_LUNS - 1;
2082 cpi->initiator_id = sc->mly_controllerparam->initiator_id;
2083 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2084 strncpy(cpi->hba_vid, "FreeBSD", HBA_IDLEN);
2085 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2086 cpi->unit_number = cam_sim_unit(sim);
2087 cpi->bus_id = cam_sim_bus(sim);
2088 cpi->base_transfer_speed = 132 * 1024; /* XXX what to set this to? */
2089 ccb->ccb_h.status = CAM_REQ_CMP;
2090 break;
2091 }
2092
2093 case XPT_GET_TRAN_SETTINGS:
2094 {
2095 struct ccb_trans_settings *cts = &ccb->cts;
2096 int bus, target;
2097
2098 bus = cam_sim_bus(sim);
2099 target = cts->ccb_h.target_id;
2100 /* XXX validate bus/target? */
2101
2102 debug(2, "XPT_GET_TRAN_SETTINGS %d:%d", bus, target);
2103 cts->valid = 0;
2104
2105 /* logical device? */
2106 if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_LOGICAL) {
2107 /* nothing special for these */
2108
2109 /* physical device? */
2110 } else if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_PHYSICAL) {
2111 /* allow CAM to try tagged transactions */
2112 cts->flags |= CCB_TRANS_TAG_ENB;
2113 cts->valid |= CCB_TRANS_TQ_VALID;
2114
2115 /* convert speed (MHz) to usec */
2116 if (sc->mly_btl[bus][target].mb_speed == 0) {
2117 cts->sync_period = 1000000 / 5;
2118 } else {
2119 cts->sync_period = 1000000 / sc->mly_btl[bus][target].mb_speed;
2120 }
2121
2122 /* convert bus width to CAM internal encoding */
2123 switch (sc->mly_btl[bus][target].mb_width) {
2124 case 32:
2125 cts->bus_width = MSG_EXT_WDTR_BUS_32_BIT;
2126 break;
2127 case 16:
2128 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2129 break;
2130 case 8:
2131 default:
2132 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2133 break;
2134 }
2135 cts->valid |= CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_BUS_WIDTH_VALID;
2136
2137 /* not a device, bail out */
2138 } else {
2139 cts->ccb_h.status = CAM_REQ_CMP_ERR;
2140 break;
2141 }
2142
2143 /* disconnect always OK */
2144 cts->flags |= CCB_TRANS_DISC_ENB;
2145 cts->valid |= CCB_TRANS_DISC_VALID;
2146
2147 cts->ccb_h.status = CAM_REQ_CMP;
2148 break;
2149 }
2150
2151 default: /* we can't do this */
2152 debug(2, "unspported func_code = 0x%x", ccb->ccb_h.func_code);
2153 ccb->ccb_h.status = CAM_REQ_INVALID;
2154 break;
2155 }
2156
2157 xpt_done(ccb);
2158}
2159
2160/********************************************************************************
2161 * Handle an I/O operation requested by CAM
2162 */
2163static int
2164mly_cam_action_io(struct cam_sim *sim, struct ccb_scsiio *csio)
2165{
2166 struct mly_softc *sc = cam_sim_softc(sim);
2167 struct mly_command *mc;
2168 struct mly_command_scsi_small *ss;
2169 int bus, target;
2170 int error;
2171
2172 bus = cam_sim_bus(sim);
2173 target = csio->ccb_h.target_id;
2174
2175 debug(2, "XPT_SCSI_IO %d:%d:%d", bus, target, csio->ccb_h.target_lun);
2176
2177 /* validate bus number */
2178 if (!MLY_BUS_IS_VALID(sc, bus)) {
2179 debug(0, " invalid bus %d", bus);
2180 csio->ccb_h.status = CAM_REQ_CMP_ERR;
2181 }
2182
2183 /* check for I/O attempt to a protected device */
2184 if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_PROTECTED) {
2185 debug(2, " device protected");
2186 csio->ccb_h.status = CAM_REQ_CMP_ERR;
2187 }
2188
2189 /* check for I/O attempt to nonexistent device */
2190 if (!(sc->mly_btl[bus][target].mb_flags & (MLY_BTL_LOGICAL | MLY_BTL_PHYSICAL))) {
2191 debug(2, " device %d:%d does not exist", bus, target);
2192 csio->ccb_h.status = CAM_REQ_CMP_ERR;
2193 }
2194
2195 /* XXX increase if/when we support large SCSI commands */
2196 if (csio->cdb_len > MLY_CMD_SCSI_SMALL_CDB) {
2197 debug(0, " command too large (%d > %d)", csio->cdb_len, MLY_CMD_SCSI_SMALL_CDB);
2198 csio->ccb_h.status = CAM_REQ_CMP_ERR;
2199 }
2200
2201 /* check that the CDB pointer is not to a physical address */
2202 if ((csio->ccb_h.flags & CAM_CDB_POINTER) && (csio->ccb_h.flags & CAM_CDB_PHYS)) {
2203 debug(0, " CDB pointer is to physical address");
2204 csio->ccb_h.status = CAM_REQ_CMP_ERR;
2205 }
2206
2207 /* if there is data transfer, it must be to/from a virtual address */
2208 if ((csio->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2209 if (csio->ccb_h.flags & CAM_DATA_PHYS) { /* we can't map it */
2210 debug(0, " data pointer is to physical address");
2211 csio->ccb_h.status = CAM_REQ_CMP_ERR;
2212 }
2213 if (csio->ccb_h.flags & CAM_SCATTER_VALID) { /* we want to do the s/g setup */
2214 debug(0, " data has premature s/g setup");
2215 csio->ccb_h.status = CAM_REQ_CMP_ERR;
2216 }
2217 }
2218
2219 /* abandon aborted ccbs or those that have failed validation */
2220 if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2221 debug(2, "abandoning CCB due to abort/validation failure");
2222 return(EINVAL);
2223 }
2224
2225 /*
2226 * Get a command, or push the ccb back to CAM and freeze the queue.
2227 */
2228 if ((error = mly_alloc_command(sc, &mc))) {
2229 xpt_freeze_simq(sim, 1);
2230 csio->ccb_h.status |= CAM_REQUEUE_REQ;
2231 return(error);
2232 }
2233
2234 /* build the command */
2235 mc->mc_data = csio->data_ptr;
2236 mc->mc_length = csio->dxfer_len;
2237 mc->mc_complete = mly_cam_complete;
2238 mc->mc_private = csio;
2239
2240 /* save the bus number in the ccb for later recovery XXX should be a better way */
2241 csio->ccb_h.sim_priv.entries[0].field = bus;
2242
2243 /* build the packet for the controller */
2244 ss = &mc->mc_packet->scsi_small;
2245 ss->opcode = MDACMD_SCSI;
2246 if (csio->ccb_h.flags * CAM_DIS_DISCONNECT)
2247 ss->command_control.disable_disconnect = 1;
2248 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2249 ss->command_control.data_direction = MLY_CCB_WRITE;
2250 ss->data_size = csio->dxfer_len;
2251 ss->addr.phys.lun = csio->ccb_h.target_lun;
2252 ss->addr.phys.target = csio->ccb_h.target_id;
2253 ss->addr.phys.channel = bus;
2254 if (csio->ccb_h.timeout < (60 * 1000)) {
2255 ss->timeout.value = csio->ccb_h.timeout / 1000;
2256 ss->timeout.scale = MLY_TIMEOUT_SECONDS;
2257 } else if (csio->ccb_h.timeout < (60 * 60 * 1000)) {
2258 ss->timeout.value = csio->ccb_h.timeout / (60 * 1000);
2259 ss->timeout.scale = MLY_TIMEOUT_MINUTES;
2260 } else {
2261 ss->timeout.value = csio->ccb_h.timeout / (60 * 60 * 1000); /* overflow? */
2262 ss->timeout.scale = MLY_TIMEOUT_HOURS;
2263 }
2264 ss->maximum_sense_size = csio->sense_len;
2265 ss->cdb_length = csio->cdb_len;
2266 if (csio->ccb_h.flags & CAM_CDB_POINTER) {
2267 bcopy(csio->cdb_io.cdb_ptr, ss->cdb, csio->cdb_len);
2268 } else {
2269 bcopy(csio->cdb_io.cdb_bytes, ss->cdb, csio->cdb_len);
2270 }
2271
2272 /* give the command to the controller */
2273 if ((error = mly_start(mc))) {
2274 xpt_freeze_simq(sim, 1);
2275 csio->ccb_h.status |= CAM_REQUEUE_REQ;
2276 return(error);
2277 }
2278
2279 return(0);
2280}
2281
2282/********************************************************************************
2283 * Check for possibly-completed commands.
2284 */
2285static void
2286mly_cam_poll(struct cam_sim *sim)
2287{
2288 struct mly_softc *sc = cam_sim_softc(sim);
2289
2290 debug_called(2);
2291
2292 mly_done(sc);
2293}
2294
2295/********************************************************************************
2296 * Handle completion of a command - pass results back through the CCB
2297 */
2298static void
2299mly_cam_complete(struct mly_command *mc)
2300{
2301 struct mly_softc *sc = mc->mc_sc;
2302 struct ccb_scsiio *csio = (struct ccb_scsiio *)mc->mc_private;
2303 struct scsi_inquiry_data *inq = (struct scsi_inquiry_data *)csio->data_ptr;
2304 struct mly_btl *btl;
2305 u_int8_t cmd;
2306 int bus, target;
2307
2308 debug_called(2);
2309
2310 csio->scsi_status = mc->mc_status;
2311 switch(mc->mc_status) {
2312 case SCSI_STATUS_OK:
2313 /*
2314 * In order to report logical device type and status, we overwrite
2315 * the result of the INQUIRY command to logical devices.
2316 */
2317 bus = csio->ccb_h.sim_priv.entries[0].field;
2318 target = csio->ccb_h.target_id;
2319 /* XXX validate bus/target? */
2320 if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_LOGICAL) {
2321 if (csio->ccb_h.flags & CAM_CDB_POINTER) {
2322 cmd = *csio->cdb_io.cdb_ptr;
2323 } else {
2324 cmd = csio->cdb_io.cdb_bytes[0];
2325 }
2326 if (cmd == INQUIRY) {
2327 btl = &sc->mly_btl[bus][target];
2328 padstr(inq->vendor, mly_describe_code(mly_table_device_type, btl->mb_type), 8);
2329 padstr(inq->product, mly_describe_code(mly_table_device_state, btl->mb_state), 16);
2330 padstr(inq->revision, "", 4);
2331 }
2332 }
2333
2334 debug(2, "SCSI_STATUS_OK");
2335 csio->ccb_h.status = CAM_REQ_CMP;
2336 break;
2337
2338 case SCSI_STATUS_CHECK_COND:
2339 debug(1, "SCSI_STATUS_CHECK_COND sense %d resid %d", mc->mc_sense, mc->mc_resid);
2340 csio->ccb_h.status = CAM_SCSI_STATUS_ERROR;
2341 bzero(&csio->sense_data, SSD_FULL_SIZE);
2342 bcopy(mc->mc_packet, &csio->sense_data, mc->mc_sense);
2343 csio->sense_len = mc->mc_sense;
2344 csio->ccb_h.status |= CAM_AUTOSNS_VALID;
2345 csio->resid = mc->mc_resid; /* XXX this is a signed value... */
2346 break;
2347
2348 case SCSI_STATUS_BUSY:
2349 debug(1, "SCSI_STATUS_BUSY");
2350 csio->ccb_h.status = CAM_SCSI_BUSY;
2351 break;
2352
2353 default:
2354 debug(1, "unknown status 0x%x", csio->scsi_status);
2355 csio->ccb_h.status = CAM_REQ_CMP_ERR;
2356 break;
2357 }
2358 xpt_done((union ccb *)csio);
2359 mly_release_command(mc);
2360}
2361
2362/********************************************************************************
2363 * Find a peripheral attahed at (bus),(target)
2364 */
2365static struct cam_periph *
2366mly_find_periph(struct mly_softc *sc, int bus, int target)
2367{
2368 struct cam_periph *periph;
2369 struct cam_path *path;
2370 int status;
2371
2372 status = xpt_create_path(&path, NULL, cam_sim_path(sc->mly_cam_sim[bus]), target, 0);
2373 if (status == CAM_REQ_CMP) {
2374 periph = cam_periph_find(path, NULL);
2375 xpt_free_path(path);
2376 } else {
2377 periph = NULL;
2378 }
2379 return(periph);
2380}
2381
2382/********************************************************************************
2383 * Name the device at (bus)(target)
2384 */
2385int
2386mly_name_device(struct mly_softc *sc, int bus, int target)
2387{
2388 struct cam_periph *periph;
2389
2390 if ((periph = mly_find_periph(sc, bus, target)) != NULL) {
2391 sprintf(sc->mly_btl[bus][target].mb_name, "%s%d", periph->periph_name, periph->unit_number);
2392 return(0);
2393 }
2394 sc->mly_btl[bus][target].mb_name[0] = 0;
2395 return(ENOENT);
2396}
2397
2398/********************************************************************************
2399 ********************************************************************************
1316 Hardware Control
1317 ********************************************************************************
1318 ********************************************************************************/
1319
1320/********************************************************************************
1321 * Handshake with the firmware while the card is being initialised.
1322 */
1323static int

--- 343 unchanged lines hidden (view full) ---

1667 * Panic in a slightly informative fashion
1668 */
1669static void
1670mly_panic(struct mly_softc *sc, char *reason)
1671{
1672 mly_printstate(sc);
1673 panic(reason);
1674}
2400 Hardware Control
2401 ********************************************************************************
2402 ********************************************************************************/
2403
2404/********************************************************************************
2405 * Handshake with the firmware while the card is being initialised.
2406 */
2407static int

--- 343 unchanged lines hidden (view full) ---

2751 * Panic in a slightly informative fashion
2752 */
2753static void
2754mly_panic(struct mly_softc *sc, char *reason)
2755{
2756 mly_printstate(sc);
2757 panic(reason);
2758}
1675#endif
1676
1677/********************************************************************************
1678 * Print queue statistics, callable from DDB.
1679 */
1680void
1681mly_print_controller(int controller)
1682{
1683 struct mly_softc *sc;
1684
1685 if ((sc = devclass_get_softc(devclass_find("mly"), controller)) == NULL) {
1686 printf("mly: controller %d invalid\n", controller);
1687 } else {
1688 device_printf(sc->mly_dev, "queue curr max\n");
1689 device_printf(sc->mly_dev, "free %04d/%04d\n",
1690 sc->mly_qstat[MLYQ_FREE].q_length, sc->mly_qstat[MLYQ_FREE].q_max);
2759
2760/********************************************************************************
2761 * Print queue statistics, callable from DDB.
2762 */
2763void
2764mly_print_controller(int controller)
2765{
2766 struct mly_softc *sc;
2767
2768 if ((sc = devclass_get_softc(devclass_find("mly"), controller)) == NULL) {
2769 printf("mly: controller %d invalid\n", controller);
2770 } else {
2771 device_printf(sc->mly_dev, "queue curr max\n");
2772 device_printf(sc->mly_dev, "free %04d/%04d\n",
2773 sc->mly_qstat[MLYQ_FREE].q_length, sc->mly_qstat[MLYQ_FREE].q_max);
1691 device_printf(sc->mly_dev, "ready %04d/%04d\n",
1692 sc->mly_qstat[MLYQ_READY].q_length, sc->mly_qstat[MLYQ_READY].q_max);
1693 device_printf(sc->mly_dev, "busy %04d/%04d\n",
1694 sc->mly_qstat[MLYQ_BUSY].q_length, sc->mly_qstat[MLYQ_BUSY].q_max);
1695 device_printf(sc->mly_dev, "complete %04d/%04d\n",
1696 sc->mly_qstat[MLYQ_COMPLETE].q_length, sc->mly_qstat[MLYQ_COMPLETE].q_max);
1697 }
1698}
2774 device_printf(sc->mly_dev, "busy %04d/%04d\n",
2775 sc->mly_qstat[MLYQ_BUSY].q_length, sc->mly_qstat[MLYQ_BUSY].q_max);
2776 device_printf(sc->mly_dev, "complete %04d/%04d\n",
2777 sc->mly_qstat[MLYQ_COMPLETE].q_length, sc->mly_qstat[MLYQ_COMPLETE].q_max);
2778 }
2779}
2780#endif
1699
1700
1701/********************************************************************************
1702 ********************************************************************************
1703 Control device interface
1704 ********************************************************************************
1705 ********************************************************************************/
1706

--- 50 unchanged lines hidden (view full) ---

1757 * as the user-space data pointer and data size, and an optional sense buffer
1758 * size/pointer. On completion, the data size is adjusted to the command
1759 * residual, and the sense buffer size to the size of the returned sense data.
1760 *
1761 */
1762static int
1763mly_user_command(struct mly_softc *sc, struct mly_user_command *uc)
1764{
2781
2782
2783/********************************************************************************
2784 ********************************************************************************
2785 Control device interface
2786 ********************************************************************************
2787 ********************************************************************************/
2788

--- 50 unchanged lines hidden (view full) ---

2839 * as the user-space data pointer and data size, and an optional sense buffer
2840 * size/pointer. On completion, the data size is adjusted to the command
2841 * residual, and the sense buffer size to the size of the returned sense data.
2842 *
2843 */
2844static int
2845mly_user_command(struct mly_softc *sc, struct mly_user_command *uc)
2846{
1765 struct mly_command *mc;
1766 int error, s;
2847 struct mly_command *mc;
2848 int error, s;
1767
1768 /* allocate a command */
1769 if (mly_alloc_command(sc, &mc)) {
1770 error = ENOMEM;
1771 goto out; /* XXX Linux version will wait for a command */
1772 }
1773
1774 /* handle data size/direction */

--- 16 unchanged lines hidden (view full) ---

1791
1792 /* copy the controller command */
1793 bcopy(&uc->CommandMailbox, mc->mc_packet, sizeof(uc->CommandMailbox));
1794
1795 /* clear command completion handler so that we get woken up */
1796 mc->mc_complete = NULL;
1797
1798 /* execute the command */
2849
2850 /* allocate a command */
2851 if (mly_alloc_command(sc, &mc)) {
2852 error = ENOMEM;
2853 goto out; /* XXX Linux version will wait for a command */
2854 }
2855
2856 /* handle data size/direction */

--- 16 unchanged lines hidden (view full) ---

2873
2874 /* copy the controller command */
2875 bcopy(&uc->CommandMailbox, mc->mc_packet, sizeof(uc->CommandMailbox));
2876
2877 /* clear command completion handler so that we get woken up */
2878 mc->mc_complete = NULL;
2879
2880 /* execute the command */
2881 if ((error = mly_start(mc)) != 0)
2882 goto out;
1799 s = splcam();
2883 s = splcam();
1800 mly_requeue_ready(mc);
1801 mly_startio(sc);
1802 while (!(mc->mc_flags & MLY_CMD_COMPLETE))
1803 tsleep(mc, PRIBIO, "mlyioctl", 0);
1804 splx(s);
1805
1806 /* return the data to userspace */
1807 if (uc->DataTransferLength > 0)
1808 if ((error = copyout(mc->mc_data, uc->DataTransferBuffer, mc->mc_length)) != 0)
1809 goto out;

--- 50 unchanged lines hidden ---
2884 while (!(mc->mc_flags & MLY_CMD_COMPLETE))
2885 tsleep(mc, PRIBIO, "mlyioctl", 0);
2886 splx(s);
2887
2888 /* return the data to userspace */
2889 if (uc->DataTransferLength > 0)
2890 if ((error = copyout(mc->mc_data, uc->DataTransferBuffer, mc->mc_length)) != 0)
2891 goto out;

--- 50 unchanged lines hidden ---