dwc2_hcdddma.c revision 1.10
1/* $NetBSD: dwc2_hcdddma.c,v 1.10 2021/12/21 09:51:22 skrll Exp $ */ 2 3/* 4 * hcd_ddma.c - DesignWare HS OTG Controller descriptor DMA routines 5 * 6 * Copyright (C) 2004-2013 Synopsys, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. The names of the above-listed copyright holders may not be used 18 * to endorse or promote products derived from this software without 19 * specific prior written permission. 20 * 21 * ALTERNATIVELY, this software may be distributed under the terms of the 22 * GNU General Public License ("GPL") as published by the Free Software 23 * Foundation; either version 2 of the License, or (at your option) any 24 * later version. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 27 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 28 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 30 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 31 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 32 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 33 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 34 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 35 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 36 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39/* 40 * This file contains the Descriptor DMA implementation for Host mode 41 */ 42#include <sys/cdefs.h> 43__KERNEL_RCSID(0, "$NetBSD: dwc2_hcdddma.c,v 1.10 2021/12/21 09:51:22 skrll Exp $"); 44 45#include <sys/param.h> 46#include <sys/types.h> 47#include <sys/kernel.h> 48#include <sys/kmem.h> 49#include <sys/cpu.h> 50 51#include <dev/usb/usb.h> 52#include <dev/usb/usbdi.h> 53#include <dev/usb/usbdivar.h> 54#include <dev/usb/usb_mem.h> 55 56#include <linux/kernel.h> 57#include <linux/list.h> 58 59#include <dwc2/dwc2.h> 60#include <dwc2/dwc2var.h> 61 62#include "dwc2_core.h" 63#include "dwc2_hcd.h" 64 65static u16 dwc2_frame_list_idx(u16 frame) 66{ 67 return frame & (FRLISTEN_64_SIZE - 1); 68} 69 70static u16 dwc2_desclist_idx_inc(u16 idx, u16 inc, u8 speed) 71{ 72 return (idx + inc) & 73 ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC : 74 MAX_DMA_DESC_NUM_GENERIC) - 1); 75} 76 77static u16 dwc2_desclist_idx_dec(u16 idx, u16 inc, u8 speed) 78{ 79 return (idx - inc) & 80 ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC : 81 MAX_DMA_DESC_NUM_GENERIC) - 1); 82} 83 84static u16 dwc2_max_desc_num(struct dwc2_qh *qh) 85{ 86 return (qh->ep_type == USB_ENDPOINT_XFER_ISOC && 87 qh->dev_speed == USB_SPEED_HIGH) ? 88 MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC; 89} 90 91static u16 dwc2_frame_incr_val(struct dwc2_qh *qh) 92{ 93 return qh->dev_speed == USB_SPEED_HIGH ? 94 (qh->interval + 8 - 1) / 8 : qh->interval; 95} 96 97static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 98 gfp_t flags) 99{ 100 int err; 101 102 KASSERT(!cpu_intr_p() && !cpu_softintr_p()); 103 104 qh->desc_list = NULL; 105 qh->desc_list_sz = sizeof(struct dwc2_hcd_dma_desc) * 106 dwc2_max_desc_num(qh); 107 108 err = usb_allocmem(hsotg->hsotg_sc->sc_bus.ub_dmatag, 109 qh->desc_list_sz, 0, USBMALLOC_COHERENT, &qh->desc_list_usbdma); 110 111 if (err) 112 return -ENOMEM; 113 114 qh->desc_list = KERNADDR(&qh->desc_list_usbdma, 0); 115 qh->desc_list_dma = DMAADDR(&qh->desc_list_usbdma, 0); 116 117 qh->n_bytes = kmem_zalloc(sizeof(u32) * dwc2_max_desc_num(qh), KM_SLEEP); 118 if (!qh->n_bytes) { 119 usb_freemem(&qh->desc_list_usbdma); 120 qh->desc_list = NULL; 121 return -ENOMEM; 122 } 123 124 return 0; 125} 126 127static void dwc2_desc_list_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 128{ 129 130 if (qh->desc_list) { 131 usb_freemem(&qh->desc_list_usbdma); 132 qh->desc_list = NULL; 133 } 134 135 kmem_free(qh->n_bytes, sizeof(u32) * dwc2_max_desc_num(qh)); 136 qh->n_bytes = NULL; 137} 138 139static int dwc2_frame_list_alloc(struct dwc2_hsotg *hsotg, gfp_t mem_flags) 140{ 141 int err; 142 143 if (hsotg->frame_list) 144 return 0; 145 146 /* XXXNH - pool_cache_t */ 147 hsotg->frame_list_sz = 4 * FRLISTEN_64_SIZE; 148 hsotg->frame_list = NULL; 149 err = usb_allocmem(hsotg->hsotg_sc->sc_bus.ub_dmatag, 150 hsotg->frame_list_sz, 0, USBMALLOC_COHERENT, &hsotg->frame_list_usbdma); 151 152 if (!err) { 153 hsotg->frame_list = KERNADDR(&hsotg->frame_list_usbdma, 0); 154 hsotg->frame_list_dma = DMAADDR(&hsotg->frame_list_usbdma, 0); 155 } 156 157 if (!hsotg->frame_list) 158 return -ENOMEM; 159 160 return 0; 161} 162 163static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg) 164{ 165 usb_dma_t frame_list_usbdma; 166 unsigned long flags; 167 168 spin_lock_irqsave(&hsotg->lock, flags); 169 170 if (!hsotg->frame_list) { 171 spin_unlock_irqrestore(&hsotg->lock, flags); 172 return; 173 } 174 175 frame_list_usbdma = hsotg->frame_list_usbdma; 176 hsotg->frame_list = NULL; 177 178 spin_unlock_irqrestore(&hsotg->lock, flags); 179 180 usb_freemem(&frame_list_usbdma); 181} 182 183static void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en) 184{ 185 u32 hcfg; 186 unsigned long flags; 187 188 spin_lock_irqsave(&hsotg->lock, flags); 189 190 hcfg = DWC2_READ_4(hsotg, HCFG); 191 if (hcfg & HCFG_PERSCHEDENA) { 192 /* already enabled */ 193 spin_unlock_irqrestore(&hsotg->lock, flags); 194 return; 195 } 196 197 DWC2_WRITE_4(hsotg, HFLBADDR, hsotg->frame_list_dma); 198 199 hcfg &= ~HCFG_FRLISTEN_MASK; 200 hcfg |= fr_list_en | HCFG_PERSCHEDENA; 201 dev_vdbg(hsotg->dev, "Enabling Periodic schedule\n"); 202 DWC2_WRITE_4(hsotg, HCFG, hcfg); 203 204 spin_unlock_irqrestore(&hsotg->lock, flags); 205} 206 207static void dwc2_per_sched_disable(struct dwc2_hsotg *hsotg) 208{ 209 u32 hcfg; 210 unsigned long flags; 211 212 spin_lock_irqsave(&hsotg->lock, flags); 213 214 hcfg = DWC2_READ_4(hsotg, HCFG); 215 if (!(hcfg & HCFG_PERSCHEDENA)) { 216 /* already disabled */ 217 spin_unlock_irqrestore(&hsotg->lock, flags); 218 return; 219 } 220 221 hcfg &= ~HCFG_PERSCHEDENA; 222 dev_vdbg(hsotg->dev, "Disabling Periodic schedule\n"); 223 DWC2_WRITE_4(hsotg, HCFG, hcfg); 224 225 spin_unlock_irqrestore(&hsotg->lock, flags); 226} 227 228/* 229 * Activates/Deactivates FrameList entries for the channel based on endpoint 230 * servicing period 231 */ 232static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 233 int enable) 234{ 235 struct dwc2_host_chan *chan; 236 u16 i, j, inc; 237 238 if (!hsotg) { 239 printf("hsotg = %p\n", hsotg); 240 return; 241 } 242 243 if (!qh->channel) { 244 dev_err(hsotg->dev, "qh->channel = %p\n", qh->channel); 245 return; 246 } 247 248 if (!hsotg->frame_list) { 249 dev_err(hsotg->dev, "hsotg->frame_list = %p\n", 250 hsotg->frame_list); 251 return; 252 } 253 254 chan = qh->channel; 255 inc = dwc2_frame_incr_val(qh); 256 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC) 257 i = dwc2_frame_list_idx(qh->sched_frame); 258 else 259 i = 0; 260 261 j = i; 262 do { 263 if (enable) 264 hsotg->frame_list[j] |= 1 << chan->hc_num; 265 else 266 hsotg->frame_list[j] &= ~(1 << chan->hc_num); 267 j = (j + inc) & (FRLISTEN_64_SIZE - 1); 268 } while (j != i); 269 270 /* 271 * Sync frame list since controller will access it if periodic 272 * channel is currently enabled. 273 */ 274 usb_syncmem(&hsotg->frame_list_usbdma, 0, hsotg->frame_list_sz, 275 BUS_DMASYNC_PREWRITE); 276 277 if (!enable) 278 return; 279 280 chan->schinfo = 0; 281 if (chan->speed == USB_SPEED_HIGH && qh->interval) { 282 j = 1; 283 /* TODO - check this */ 284 inc = (8 + qh->interval - 1) / qh->interval; 285 for (i = 0; i < inc; i++) { 286 chan->schinfo |= j; 287 j = j << qh->interval; 288 } 289 } else { 290 chan->schinfo = 0xff; 291 } 292} 293 294static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg, 295 struct dwc2_qh *qh) 296{ 297 struct dwc2_host_chan *chan = qh->channel; 298 299 if (dwc2_qh_is_non_per(qh)) { 300 if (hsotg->core_params->uframe_sched > 0) 301 hsotg->available_host_channels++; 302 else 303 hsotg->non_periodic_channels--; 304 } else { 305 dwc2_update_frame_list(hsotg, qh, 0); 306 hsotg->available_host_channels++; 307 } 308 309 /* 310 * The condition is added to prevent double cleanup try in case of 311 * device disconnect. See channel cleanup in dwc2_hcd_disconnect(). 312 */ 313 if (chan->qh) { 314 if (!list_empty(&chan->hc_list_entry)) 315 list_del(&chan->hc_list_entry); 316 dwc2_hc_cleanup(hsotg, chan); 317 list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list); 318 chan->qh = NULL; 319 } 320 321 qh->channel = NULL; 322 qh->ntd = 0; 323 324 if (qh->desc_list) 325 memset(qh->desc_list, 0, sizeof(struct dwc2_hcd_dma_desc) * 326 dwc2_max_desc_num(qh)); 327} 328 329/** 330 * dwc2_hcd_qh_init_ddma() - Initializes a QH structure's Descriptor DMA 331 * related members 332 * 333 * @hsotg: The HCD state structure for the DWC OTG controller 334 * @qh: The QH to init 335 * 336 * Return: 0 if successful, negative error code otherwise 337 * 338 * Allocates memory for the descriptor list. For the first periodic QH, 339 * allocates memory for the FrameList and enables periodic scheduling. 340 */ 341int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 342 gfp_t mem_flags) 343{ 344 int retval; 345 346 if (qh->do_split) { 347 dev_err(hsotg->dev, 348 "SPLIT Transfers are not supported in Descriptor DMA mode.\n"); 349 retval = -EINVAL; 350 goto err0; 351 } 352 353 retval = dwc2_desc_list_alloc(hsotg, qh, mem_flags); 354 if (retval) 355 goto err0; 356 357 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC || 358 qh->ep_type == USB_ENDPOINT_XFER_INT) { 359 if (!hsotg->frame_list) { 360 retval = dwc2_frame_list_alloc(hsotg, mem_flags); 361 if (retval) 362 goto err1; 363 /* Enable periodic schedule on first periodic QH */ 364 dwc2_per_sched_enable(hsotg, HCFG_FRLISTEN_64); 365 } 366 } 367 368 qh->ntd = 0; 369 return 0; 370 371err1: 372 dwc2_desc_list_free(hsotg, qh); 373err0: 374 return retval; 375} 376 377/** 378 * dwc2_hcd_qh_free_ddma() - Frees a QH structure's Descriptor DMA related 379 * members 380 * 381 * @hsotg: The HCD state structure for the DWC OTG controller 382 * @qh: The QH to free 383 * 384 * Frees descriptor list memory associated with the QH. If QH is periodic and 385 * the last, frees FrameList memory and disables periodic scheduling. 386 */ 387void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 388{ 389 unsigned long flags; 390 391 dwc2_desc_list_free(hsotg, qh); 392 393 /* 394 * Channel still assigned due to some reasons. 395 * Seen on Isoc URB dequeue. Channel halted but no subsequent 396 * ChHalted interrupt to release the channel. Afterwards 397 * when it comes here from endpoint disable routine 398 * channel remains assigned. 399 */ 400 spin_lock_irqsave(&hsotg->lock, flags); 401 if (qh->channel) 402 dwc2_release_channel_ddma(hsotg, qh); 403 spin_unlock_irqrestore(&hsotg->lock, flags); 404 405 if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC || 406 qh->ep_type == USB_ENDPOINT_XFER_INT) && 407 (hsotg->core_params->uframe_sched > 0 || 408 !hsotg->periodic_channels) && hsotg->frame_list) { 409 dwc2_per_sched_disable(hsotg); 410 dwc2_frame_list_free(hsotg); 411 } 412} 413 414static u8 dwc2_frame_to_desc_idx(struct dwc2_qh *qh, u16 frame_idx) 415{ 416 if (qh->dev_speed == USB_SPEED_HIGH) 417 /* Descriptor set (8 descriptors) index which is 8-aligned */ 418 return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8; 419 else 420 return frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1); 421} 422 423/* 424 * Determine starting frame for Isochronous transfer. 425 * Few frames skipped to prevent race condition with HC. 426 */ 427static u16 dwc2_calc_starting_frame(struct dwc2_hsotg *hsotg, 428 struct dwc2_qh *qh, u16 *skip_frames) 429{ 430 u16 frame; 431 432 hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg); 433 434 /* sched_frame is always frame number (not uFrame) both in FS and HS! */ 435 436 /* 437 * skip_frames is used to limit activated descriptors number 438 * to avoid the situation when HC services the last activated 439 * descriptor firstly. 440 * Example for FS: 441 * Current frame is 1, scheduled frame is 3. Since HC always fetches 442 * the descriptor corresponding to curr_frame+1, the descriptor 443 * corresponding to frame 2 will be fetched. If the number of 444 * descriptors is max=64 (or greather) the list will be fully programmed 445 * with Active descriptors and it is possible case (rare) that the 446 * latest descriptor(considering rollback) corresponding to frame 2 will 447 * be serviced first. HS case is more probable because, in fact, up to 448 * 11 uframes (16 in the code) may be skipped. 449 */ 450 if (qh->dev_speed == USB_SPEED_HIGH) { 451 /* 452 * Consider uframe counter also, to start xfer asap. If half of 453 * the frame elapsed skip 2 frames otherwise just 1 frame. 454 * Starting descriptor index must be 8-aligned, so if the 455 * current frame is near to complete the next one is skipped as 456 * well. 457 */ 458 if (dwc2_micro_frame_num(hsotg->frame_number) >= 5) { 459 *skip_frames = 2 * 8; 460 frame = dwc2_frame_num_inc(hsotg->frame_number, 461 *skip_frames); 462 } else { 463 *skip_frames = 1 * 8; 464 frame = dwc2_frame_num_inc(hsotg->frame_number, 465 *skip_frames); 466 } 467 468 frame = dwc2_full_frame_num(frame); 469 } else { 470 /* 471 * Two frames are skipped for FS - the current and the next. 472 * But for descriptor programming, 1 frame (descriptor) is 473 * enough, see example above. 474 */ 475 *skip_frames = 1; 476 frame = dwc2_frame_num_inc(hsotg->frame_number, 2); 477 } 478 479 return frame; 480} 481 482/* 483 * Calculate initial descriptor index for isochronous transfer based on 484 * scheduled frame 485 */ 486static u16 dwc2_recalc_initial_desc_idx(struct dwc2_hsotg *hsotg, 487 struct dwc2_qh *qh) 488{ 489 u16 frame, fr_idx, fr_idx_tmp, skip_frames; 490 491 /* 492 * With current ISOC processing algorithm the channel is being released 493 * when no more QTDs in the list (qh->ntd == 0). Thus this function is 494 * called only when qh->ntd == 0 and qh->channel == 0. 495 * 496 * So qh->channel != NULL branch is not used and just not removed from 497 * the source file. It is required for another possible approach which 498 * is, do not disable and release the channel when ISOC session 499 * completed, just move QH to inactive schedule until new QTD arrives. 500 * On new QTD, the QH moved back to 'ready' schedule, starting frame and 501 * therefore starting desc_index are recalculated. In this case channel 502 * is released only on ep_disable. 503 */ 504 505 /* 506 * Calculate starting descriptor index. For INTERRUPT endpoint it is 507 * always 0. 508 */ 509 if (qh->channel) { 510 frame = dwc2_calc_starting_frame(hsotg, qh, &skip_frames); 511 /* 512 * Calculate initial descriptor index based on FrameList current 513 * bitmap and servicing period 514 */ 515 fr_idx_tmp = dwc2_frame_list_idx(frame); 516 fr_idx = (FRLISTEN_64_SIZE + 517 dwc2_frame_list_idx(qh->sched_frame) - fr_idx_tmp) 518 % dwc2_frame_incr_val(qh); 519 fr_idx = (fr_idx + fr_idx_tmp) % FRLISTEN_64_SIZE; 520 } else { 521 qh->sched_frame = dwc2_calc_starting_frame(hsotg, qh, 522 &skip_frames); 523 fr_idx = dwc2_frame_list_idx(qh->sched_frame); 524 } 525 526 qh->td_first = qh->td_last = dwc2_frame_to_desc_idx(qh, fr_idx); 527 528 return skip_frames; 529} 530 531#define ISOC_URB_GIVEBACK_ASAP 532 533#define MAX_ISOC_XFER_SIZE_FS 1023 534#define MAX_ISOC_XFER_SIZE_HS 3072 535#define DESCNUM_THRESHOLD 4 536 537static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, 538 struct dwc2_qtd *qtd, 539 struct dwc2_qh *qh, u32 max_xfer_size, 540 u16 idx) 541{ 542 struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx]; 543 struct dwc2_hcd_iso_packet_desc *frame_desc; 544 545 memset(dma_desc, 0, sizeof(*dma_desc)); 546 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last]; 547 548 if (frame_desc->length > max_xfer_size) 549 qh->n_bytes[idx] = max_xfer_size; 550 else 551 qh->n_bytes[idx] = frame_desc->length; 552 553 dma_desc->buf = (u32)(DMAADDR(qtd->urb->usbdma, frame_desc->offset)); 554 dma_desc->status = qh->n_bytes[idx] << HOST_DMA_ISOC_NBYTES_SHIFT & 555 HOST_DMA_ISOC_NBYTES_MASK; 556 557 /* Set active bit */ 558 dma_desc->status |= HOST_DMA_A; 559 560 qh->ntd++; 561 qtd->isoc_frame_index_last++; 562 563#ifdef ISOC_URB_GIVEBACK_ASAP 564 /* Set IOC for each descriptor corresponding to last frame of URB */ 565 if (qtd->isoc_frame_index_last == qtd->urb->packet_count) 566 dma_desc->status |= HOST_DMA_IOC; 567#endif 568 569 usb_syncmem(&qh->desc_list_usbdma, 570 (idx * sizeof(struct dwc2_hcd_dma_desc)), 571 sizeof(struct dwc2_hcd_dma_desc), 572 BUS_DMASYNC_PREWRITE); 573} 574 575static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, 576 struct dwc2_qh *qh, u16 skip_frames) 577{ 578 struct dwc2_qtd *qtd; 579 u32 max_xfer_size; 580 u16 idx, inc, n_desc = 0, ntd_max = 0; 581 u16 cur_idx; 582 u16 next_idx; 583 584 idx = qh->td_last; 585 inc = qh->interval; 586 hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg); 587 cur_idx = dwc2_frame_list_idx(hsotg->frame_number); 588 next_idx = dwc2_desclist_idx_inc(qh->td_last, inc, qh->dev_speed); 589 590 /* 591 * Ensure current frame number didn't overstep last scheduled 592 * descriptor. If it happens, the only way to recover is to move 593 * qh->td_last to current frame number + 1. 594 * So that next isoc descriptor will be scheduled on frame number + 1 595 * and not on a past frame. 596 */ 597 if (dwc2_frame_idx_num_gt(cur_idx, next_idx) || (cur_idx == next_idx)) { 598 if (inc < 32) { 599 dev_vdbg(hsotg->dev, 600 "current frame number overstep last descriptor\n"); 601 qh->td_last = dwc2_desclist_idx_inc(cur_idx, inc, 602 qh->dev_speed); 603 idx = qh->td_last; 604 } 605 } 606 607 if (qh->interval) { 608 ntd_max = (dwc2_max_desc_num(qh) + qh->interval - 1) / 609 qh->interval; 610 if (skip_frames && !qh->channel) 611 ntd_max -= skip_frames / qh->interval; 612 } 613 614 max_xfer_size = qh->dev_speed == USB_SPEED_HIGH ? 615 MAX_ISOC_XFER_SIZE_HS : MAX_ISOC_XFER_SIZE_FS; 616 617 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) { 618 if (qtd->in_process && 619 qtd->isoc_frame_index_last == 620 qtd->urb->packet_count) 621 continue; 622 623 qtd->isoc_td_first = idx; 624 while (qh->ntd < ntd_max && qtd->isoc_frame_index_last < 625 qtd->urb->packet_count) { 626 dwc2_fill_host_isoc_dma_desc(hsotg, qtd, qh, 627 max_xfer_size, idx); 628 idx = dwc2_desclist_idx_inc(idx, inc, qh->dev_speed); 629 n_desc++; 630 } 631 qtd->isoc_td_last = idx; 632 qtd->in_process = 1; 633 } 634 635 qh->td_last = idx; 636 637#ifdef ISOC_URB_GIVEBACK_ASAP 638 /* Set IOC for last descriptor if descriptor list is full */ 639 if (qh->ntd == ntd_max) { 640 idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed); 641 qh->desc_list[idx].status |= HOST_DMA_IOC; 642 643 usb_syncmem(&qh->desc_list_usbdma, 644 (idx * sizeof(struct dwc2_hcd_dma_desc)), 645 sizeof(struct dwc2_hcd_dma_desc), 646 BUS_DMASYNC_PREWRITE); 647 } 648#else 649 /* 650 * Set IOC bit only for one descriptor. Always try to be ahead of HW 651 * processing, i.e. on IOC generation driver activates next descriptor 652 * but core continues to process descriptors following the one with IOC 653 * set. 654 */ 655 656 if (n_desc > DESCNUM_THRESHOLD) 657 /* 658 * Move IOC "up". Required even if there is only one QTD 659 * in the list, because QTDs might continue to be queued, 660 * but during the activation it was only one queued. 661 * Actually more than one QTD might be in the list if this 662 * function called from XferCompletion - QTDs was queued during 663 * HW processing of the previous descriptor chunk. 664 */ 665 idx = dwc2_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2), 666 qh->dev_speed); 667 else 668 /* 669 * Set the IOC for the latest descriptor if either number of 670 * descriptors is not greater than threshold or no more new 671 * descriptors activated 672 */ 673 idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed); 674 675 qh->desc_list[idx].status |= HOST_DMA_IOC; 676 usb_syncmem(&qh->desc_list_usbdma, 677 (idx * sizeof(struct dwc2_hcd_dma_desc)), 678 sizeof(struct dwc2_hcd_dma_desc), 679 BUS_DMASYNC_PREWRITE); 680#endif 681} 682 683static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg, 684 struct dwc2_host_chan *chan, 685 struct dwc2_qtd *qtd, struct dwc2_qh *qh, 686 int n_desc) 687{ 688 struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[n_desc]; 689 int len = chan->xfer_len; 690 691 if (len > MAX_DMA_DESC_SIZE - (chan->max_packet - 1)) 692 len = MAX_DMA_DESC_SIZE - (chan->max_packet - 1); 693 694 if (chan->ep_is_in) { 695 int num_packets; 696 697 if (len > 0 && chan->max_packet) 698 num_packets = (len + chan->max_packet - 1) 699 / chan->max_packet; 700 else 701 /* Need 1 packet for transfer length of 0 */ 702 num_packets = 1; 703 704 /* Always program an integral # of packets for IN transfers */ 705 len = num_packets * chan->max_packet; 706 } 707 708 dma_desc->status = len << HOST_DMA_NBYTES_SHIFT & HOST_DMA_NBYTES_MASK; 709 qh->n_bytes[n_desc] = len; 710 711 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL && 712 qtd->control_phase == DWC2_CONTROL_SETUP) 713 dma_desc->status |= HOST_DMA_SUP; 714 715 dma_desc->buf = (u32)chan->xfer_dma; 716 717 usb_syncmem(&qh->desc_list_usbdma, 718 (n_desc * sizeof(struct dwc2_hcd_dma_desc)), 719 sizeof(struct dwc2_hcd_dma_desc), 720 BUS_DMASYNC_PREWRITE); 721 722 /* 723 * Last (or only) descriptor of IN transfer with actual size less 724 * than MaxPacket 725 */ 726 if (len > chan->xfer_len) { 727 chan->xfer_len = 0; 728 } else { 729 chan->xfer_dma += len; /* XXXNH safe */ 730 chan->xfer_len -= len; 731 } 732} 733 734static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg, 735 struct dwc2_qh *qh) 736{ 737 struct dwc2_qtd *qtd; 738 struct dwc2_host_chan *chan = qh->channel; 739 int n_desc = 0; 740 741 dev_vdbg(hsotg->dev, "%s(): qh=%p dma=%08lx len=%d\n", __func__, qh, 742 (unsigned long)chan->xfer_dma, chan->xfer_len); 743 744 /* 745 * Start with chan->xfer_dma initialized in assign_and_init_hc(), then 746 * if SG transfer consists of multiple URBs, this pointer is re-assigned 747 * to the buffer of the currently processed QTD. For non-SG request 748 * there is always one QTD active. 749 */ 750 751 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) { 752 dev_vdbg(hsotg->dev, "qtd=%p\n", qtd); 753 754 if (n_desc) { 755 /* SG request - more than 1 QTD */ 756 chan->xfer_dma = DMAADDR(qtd->urb->usbdma, 757 qtd->urb->actual_length); 758 chan->xfer_len = qtd->urb->length - 759 qtd->urb->actual_length; 760 dev_vdbg(hsotg->dev, "buf=%08lx len=%d\n", 761 (unsigned long)chan->xfer_dma, chan->xfer_len); 762 } 763 764 qtd->n_desc = 0; 765 do { 766 if (n_desc > 1) { 767 qh->desc_list[n_desc - 1].status |= HOST_DMA_A; 768 dev_vdbg(hsotg->dev, 769 "set A bit in desc %d (%p)\n", 770 n_desc - 1, 771 &qh->desc_list[n_desc - 1]); 772 usb_syncmem(&qh->desc_list_usbdma, 773 ((n_desc - 1) * 774 sizeof(struct dwc2_hcd_dma_desc)), 775 sizeof(struct dwc2_hcd_dma_desc), 776 BUS_DMASYNC_PREWRITE); 777 } 778 dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc); 779 dev_vdbg(hsotg->dev, 780 "desc %d (%p) buf=%08x status=%08x\n", 781 n_desc, &qh->desc_list[n_desc], 782 qh->desc_list[n_desc].buf, 783 qh->desc_list[n_desc].status); 784 qtd->n_desc++; 785 n_desc++; 786 } while (chan->xfer_len > 0 && 787 n_desc != MAX_DMA_DESC_NUM_GENERIC); 788 789 dev_vdbg(hsotg->dev, "n_desc=%d\n", n_desc); 790 qtd->in_process = 1; 791 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) 792 break; 793 if (n_desc == MAX_DMA_DESC_NUM_GENERIC) 794 break; 795 } 796 797 if (n_desc) { 798 qh->desc_list[n_desc - 1].status |= 799 HOST_DMA_IOC | HOST_DMA_EOL | HOST_DMA_A; 800 dev_vdbg(hsotg->dev, "set IOC/EOL/A bits in desc %d (%p)\n", 801 n_desc - 1, &qh->desc_list[n_desc - 1]); 802 usb_syncmem(&qh->desc_list_usbdma, 803 ((n_desc - 1) * sizeof(struct dwc2_hcd_dma_desc)), 804 sizeof(struct dwc2_hcd_dma_desc), 805 BUS_DMASYNC_PREWRITE); 806 if (n_desc > 1) { 807 qh->desc_list[0].status |= HOST_DMA_A; 808 dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n", 809 &qh->desc_list[0]); 810 usb_syncmem(&qh->desc_list_usbdma, 0, 811 sizeof(struct dwc2_hcd_dma_desc), 812 BUS_DMASYNC_PREWRITE); 813 } 814 chan->ntd = n_desc; 815 } 816} 817 818/** 819 * dwc2_hcd_start_xfer_ddma() - Starts a transfer in Descriptor DMA mode 820 * 821 * @hsotg: The HCD state structure for the DWC OTG controller 822 * @qh: The QH to init 823 * 824 * Return: 0 if successful, negative error code otherwise 825 * 826 * For Control and Bulk endpoints, initializes descriptor list and starts the 827 * transfer. For Interrupt and Isochronous endpoints, initializes descriptor 828 * list then updates FrameList, marking appropriate entries as active. 829 * 830 * For Isochronous endpoints the starting descriptor index is calculated based 831 * on the scheduled frame, but only on the first transfer descriptor within a 832 * session. Then the transfer is started via enabling the channel. 833 * 834 * For Isochronous endpoints the channel is not halted on XferComplete 835 * interrupt so remains assigned to the endpoint(QH) until session is done. 836 */ 837void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 838{ 839 /* Channel is already assigned */ 840 struct dwc2_host_chan *chan = qh->channel; 841 u16 skip_frames = 0; 842 843 switch (chan->ep_type) { 844 case USB_ENDPOINT_XFER_CONTROL: 845 case USB_ENDPOINT_XFER_BULK: 846 dwc2_init_non_isoc_dma_desc(hsotg, qh); 847 dwc2_hc_start_transfer_ddma(hsotg, chan); 848 break; 849 case USB_ENDPOINT_XFER_INT: 850 dwc2_init_non_isoc_dma_desc(hsotg, qh); 851 dwc2_update_frame_list(hsotg, qh, 1); 852 dwc2_hc_start_transfer_ddma(hsotg, chan); 853 break; 854 case USB_ENDPOINT_XFER_ISOC: 855 if (!qh->ntd) 856 skip_frames = dwc2_recalc_initial_desc_idx(hsotg, qh); 857 dwc2_init_isoc_dma_desc(hsotg, qh, skip_frames); 858 859 if (!chan->xfer_started) { 860 dwc2_update_frame_list(hsotg, qh, 1); 861 862 /* 863 * Always set to max, instead of actual size. Otherwise 864 * ntd will be changed with channel being enabled. Not 865 * recommended. 866 */ 867 chan->ntd = dwc2_max_desc_num(qh); 868 869 /* Enable channel only once for ISOC */ 870 dwc2_hc_start_transfer_ddma(hsotg, chan); 871 } 872 873 break; 874 default: 875 break; 876 } 877} 878 879#define DWC2_CMPL_DONE 1 880#define DWC2_CMPL_STOP 2 881 882static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, 883 struct dwc2_host_chan *chan, 884 struct dwc2_qtd *qtd, 885 struct dwc2_qh *qh, u16 idx) 886{ 887 struct dwc2_hcd_dma_desc *dma_desc; 888 struct dwc2_hcd_iso_packet_desc *frame_desc; 889 u16 remain = 0; 890 int rc = 0; 891 892 if (!qtd->urb) 893 return -EINVAL; 894 895 usb_syncmem(&qh->desc_list_usbdma, 896 (idx * sizeof(struct dwc2_hcd_dma_desc)), 897 sizeof(struct dwc2_hcd_dma_desc), 898 BUS_DMASYNC_POSTREAD); 899 900 dma_desc = &qh->desc_list[idx]; 901 902 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last]; 903 dma_desc->buf = (u32)(DMAADDR(qtd->urb->usbdma, frame_desc->offset)); 904 if (chan->ep_is_in) 905 remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >> 906 HOST_DMA_ISOC_NBYTES_SHIFT; 907 908 if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) { 909 /* 910 * XactError, or unable to complete all the transactions 911 * in the scheduled micro-frame/frame, both indicated by 912 * HOST_DMA_STS_PKTERR 913 */ 914 qtd->urb->error_count++; 915 frame_desc->actual_length = qh->n_bytes[idx] - remain; 916 frame_desc->status = -EPROTO; 917 } else { 918 /* Success */ 919 frame_desc->actual_length = qh->n_bytes[idx] - remain; 920 frame_desc->status = 0; 921 } 922 923 if (++qtd->isoc_frame_index == qtd->urb->packet_count) { 924 /* 925 * urb->status is not used for isoc transfers here. The 926 * individual frame_desc status are used instead. 927 */ 928 dwc2_host_complete(hsotg, qtd, 0); 929 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 930 931 /* 932 * This check is necessary because urb_dequeue can be called 933 * from urb complete callback (sound driver for example). All 934 * pending URBs are dequeued there, so no need for further 935 * processing. 936 */ 937 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) 938 return -1; 939 rc = DWC2_CMPL_DONE; 940 } 941 942 qh->ntd--; 943 944 /* Stop if IOC requested descriptor reached */ 945 if (dma_desc->status & HOST_DMA_IOC) 946 rc = DWC2_CMPL_STOP; 947 948 return rc; 949} 950 951static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg, 952 struct dwc2_host_chan *chan, 953 enum dwc2_halt_status halt_status) 954{ 955 struct dwc2_hcd_iso_packet_desc *frame_desc; 956 struct dwc2_qtd *qtd, *qtd_tmp; 957 struct dwc2_qh *qh; 958 u16 idx; 959 int rc; 960 961 qh = chan->qh; 962 idx = qh->td_first; 963 964 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) { 965 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) 966 qtd->in_process = 0; 967 return; 968 } 969 970 if (halt_status == DWC2_HC_XFER_AHB_ERR || 971 halt_status == DWC2_HC_XFER_BABBLE_ERR) { 972 /* 973 * Channel is halted in these error cases, considered as serious 974 * issues. 975 * Complete all URBs marking all frames as failed, irrespective 976 * whether some of the descriptors (frames) succeeded or not. 977 * Pass error code to completion routine as well, to update 978 * urb->status, some of class drivers might use it to stop 979 * queing transfer requests. 980 */ 981 int err = halt_status == DWC2_HC_XFER_AHB_ERR ? 982 -EIO : -EOVERFLOW; 983 984 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, 985 qtd_list_entry) { 986 if (qtd->urb) { 987 for (idx = 0; idx < qtd->urb->packet_count; 988 idx++) { 989 frame_desc = &qtd->urb->iso_descs[idx]; 990 frame_desc->status = err; 991 } 992 993 dwc2_host_complete(hsotg, qtd, err); 994 } 995 996 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 997 } 998 999 return; 1000 } 1001 1002 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) { 1003 if (!qtd->in_process) 1004 break; 1005 1006 /* 1007 * Ensure idx corresponds to descriptor where first urb of this 1008 * qtd was added. In fact, during isoc desc init, dwc2 may skip 1009 * an index if current frame number is already over this index. 1010 */ 1011 if (idx != qtd->isoc_td_first) { 1012 dev_vdbg(hsotg->dev, 1013 "try to complete %d instead of %d\n", 1014 idx, qtd->isoc_td_first); 1015 idx = qtd->isoc_td_first; 1016 } 1017 1018 do { 1019 struct dwc2_qtd *qtd_next; 1020 u16 cur_idx; 1021 1022 rc = dwc2_cmpl_host_isoc_dma_desc(hsotg, chan, qtd, qh, 1023 idx); 1024 if (rc < 0) 1025 return; 1026 idx = dwc2_desclist_idx_inc(idx, qh->interval, 1027 chan->speed); 1028 if (!rc) 1029 continue; 1030 1031 if (rc == DWC2_CMPL_DONE) 1032 break; 1033 1034 /* rc == DWC2_CMPL_STOP */ 1035 1036 if (qh->interval >= 32) 1037 goto stop_scan; 1038 1039 qh->td_first = idx; 1040 cur_idx = dwc2_frame_list_idx(hsotg->frame_number); 1041 qtd_next = list_first_entry(&qh->qtd_list, 1042 struct dwc2_qtd, 1043 qtd_list_entry); 1044 if (dwc2_frame_idx_num_gt(cur_idx, 1045 qtd_next->isoc_td_last)) 1046 break; 1047 1048 goto stop_scan; 1049 1050 } while (idx != qh->td_first); 1051 } 1052 1053stop_scan: 1054 qh->td_first = idx; 1055} 1056 1057static int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg, 1058 struct dwc2_host_chan *chan, 1059 struct dwc2_qtd *qtd, 1060 struct dwc2_hcd_dma_desc *dma_desc, 1061 enum dwc2_halt_status halt_status, 1062 u32 n_bytes, int *xfer_done) 1063{ 1064 struct dwc2_hcd_urb *urb = qtd->urb; 1065 u16 remain = 0; 1066 1067 if (chan->ep_is_in) 1068 remain = (dma_desc->status & HOST_DMA_NBYTES_MASK) >> 1069 HOST_DMA_NBYTES_SHIFT; 1070 1071 dev_vdbg(hsotg->dev, "remain=%d dwc2_urb=%p\n", remain, urb); 1072 1073 if (halt_status == DWC2_HC_XFER_AHB_ERR) { 1074 dev_err(hsotg->dev, "EIO\n"); 1075 urb->status = -EIO; 1076 return 1; 1077 } 1078 1079 if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) { 1080 switch (halt_status) { 1081 case DWC2_HC_XFER_STALL: 1082 dev_vdbg(hsotg->dev, "Stall\n"); 1083 urb->status = -EPIPE; 1084 break; 1085 case DWC2_HC_XFER_BABBLE_ERR: 1086 dev_err(hsotg->dev, "Babble\n"); 1087 urb->status = -EOVERFLOW; 1088 break; 1089 case DWC2_HC_XFER_XACT_ERR: 1090 dev_err(hsotg->dev, "XactErr\n"); 1091 urb->status = -EPROTO; 1092 break; 1093 default: 1094 dev_err(hsotg->dev, 1095 "%s: Unhandled descriptor error status (%d)\n", 1096 __func__, halt_status); 1097 break; 1098 } 1099 return 1; 1100 } 1101 1102 if (dma_desc->status & HOST_DMA_A) { 1103 dev_vdbg(hsotg->dev, 1104 "Active descriptor encountered on channel %d\n", 1105 chan->hc_num); 1106 return 0; 1107 } 1108 1109 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL) { 1110 if (qtd->control_phase == DWC2_CONTROL_DATA) { 1111 urb->actual_length += n_bytes - remain; 1112 if (remain || urb->actual_length >= urb->length) { 1113 /* 1114 * For Control Data stage do not set urb->status 1115 * to 0, to prevent URB callback. Set it when 1116 * Status phase is done. See below. 1117 */ 1118 *xfer_done = 1; 1119 } 1120 } else if (qtd->control_phase == DWC2_CONTROL_STATUS) { 1121 urb->status = 0; 1122 *xfer_done = 1; 1123 } 1124 /* No handling for SETUP stage */ 1125 } else { 1126 /* BULK and INTR */ 1127 urb->actual_length += n_bytes - remain; 1128 dev_vdbg(hsotg->dev, "length=%d actual=%d\n", urb->length, 1129 urb->actual_length); 1130 if (remain || urb->actual_length >= urb->length) { 1131 urb->status = 0; 1132 *xfer_done = 1; 1133 } 1134 } 1135 1136 return 0; 1137} 1138 1139static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg, 1140 struct dwc2_host_chan *chan, 1141 int chnum, struct dwc2_qtd *qtd, 1142 int desc_num, 1143 enum dwc2_halt_status halt_status, 1144 int *xfer_done) 1145{ 1146 struct dwc2_qh *qh = chan->qh; 1147 struct dwc2_hcd_urb *urb = qtd->urb; 1148 struct dwc2_hcd_dma_desc *dma_desc; 1149 u32 n_bytes; 1150 int failed; 1151 1152 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1153 1154 if (!urb) 1155 return -EINVAL; 1156 1157 usb_syncmem(&qh->desc_list_usbdma, 1158 (desc_num * sizeof(struct dwc2_hcd_dma_desc)), 1159 sizeof(struct dwc2_hcd_dma_desc), 1160 BUS_DMASYNC_POSTREAD); 1161 1162 dma_desc = &qh->desc_list[desc_num]; 1163 n_bytes = qh->n_bytes[desc_num]; 1164 dev_vdbg(hsotg->dev, 1165 "qtd=%p dwc2_urb=%p desc_num=%d desc=%p n_bytes=%d\n", 1166 qtd, urb, desc_num, dma_desc, n_bytes); 1167 failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc, 1168 halt_status, n_bytes, 1169 xfer_done); 1170 if (*xfer_done && urb->status != -EINPROGRESS) 1171 failed = 1; 1172 1173 if (failed) { 1174 dwc2_host_complete(hsotg, qtd, urb->status); 1175 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 1176 dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x status=%08x\n", 1177 failed, *xfer_done, urb->status); 1178 return failed; 1179 } 1180 1181 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) { 1182 switch (qtd->control_phase) { 1183 case DWC2_CONTROL_SETUP: 1184 if (urb->length > 0) 1185 qtd->control_phase = DWC2_CONTROL_DATA; 1186 else 1187 qtd->control_phase = DWC2_CONTROL_STATUS; 1188 dev_vdbg(hsotg->dev, 1189 " Control setup transaction done\n"); 1190 break; 1191 case DWC2_CONTROL_DATA: 1192 if (*xfer_done) { 1193 qtd->control_phase = DWC2_CONTROL_STATUS; 1194 dev_vdbg(hsotg->dev, 1195 " Control data transfer done\n"); 1196 } else if (desc_num + 1 == qtd->n_desc) { 1197 /* 1198 * Last descriptor for Control data stage which 1199 * is not completed yet 1200 */ 1201 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, 1202 qtd); 1203 } 1204 break; 1205 default: 1206 break; 1207 } 1208 } 1209 1210 return 0; 1211} 1212 1213static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg, 1214 struct dwc2_host_chan *chan, 1215 int chnum, 1216 enum dwc2_halt_status halt_status) 1217{ 1218 struct list_head *qtd_item, *qtd_tmp; 1219 struct dwc2_qh *qh = chan->qh; 1220 struct dwc2_qtd *qtd = NULL; 1221 int xfer_done; 1222 int desc_num = 0; 1223 1224 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) { 1225 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) 1226 qtd->in_process = 0; 1227 return; 1228 } 1229 1230 list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) { 1231 int i; 1232 1233 qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry); 1234 xfer_done = 0; 1235 1236 for (i = 0; i < qtd->n_desc; i++) { 1237 if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd, 1238 desc_num, halt_status, 1239 &xfer_done)) { 1240 qtd = NULL; 1241 break; 1242 } 1243 desc_num++; 1244 } 1245 } 1246 1247 if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) { 1248 /* 1249 * Resetting the data toggle for bulk and interrupt endpoints 1250 * in case of stall. See handle_hc_stall_intr(). 1251 */ 1252 if (halt_status == DWC2_HC_XFER_STALL) 1253 qh->data_toggle = DWC2_HC_PID_DATA0; 1254 else if (qtd) 1255 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); 1256 } 1257 1258 if (halt_status == DWC2_HC_XFER_COMPLETE) { 1259 if (chan->hcint & HCINTMSK_NYET) { 1260 /* 1261 * Got a NYET on the last transaction of the transfer. 1262 * It means that the endpoint should be in the PING 1263 * state at the beginning of the next transfer. 1264 */ 1265 qh->ping_state = 1; 1266 } 1267 } 1268} 1269 1270/** 1271 * dwc2_hcd_complete_xfer_ddma() - Scans the descriptor list, updates URB's 1272 * status and calls completion routine for the URB if it's done. Called from 1273 * interrupt handlers. 1274 * 1275 * @hsotg: The HCD state structure for the DWC OTG controller 1276 * @chan: Host channel the transfer is completed on 1277 * @chnum: Index of Host channel registers 1278 * @halt_status: Reason the channel is being halted or just XferComplete 1279 * for isochronous transfers 1280 * 1281 * Releases the channel to be used by other transfers. 1282 * In case of Isochronous endpoint the channel is not halted until the end of 1283 * the session, i.e. QTD list is empty. 1284 * If periodic channel released the FrameList is updated accordingly. 1285 * Calls transaction selection routines to activate pending transfers. 1286 */ 1287void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg, 1288 struct dwc2_host_chan *chan, int chnum, 1289 enum dwc2_halt_status halt_status) 1290{ 1291 struct dwc2_qh *qh = chan->qh; 1292 int continue_isoc_xfer = 0; 1293 enum dwc2_transaction_type tr_type; 1294 1295 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 1296 dwc2_complete_isoc_xfer_ddma(hsotg, chan, halt_status); 1297 1298 /* Release the channel if halted or session completed */ 1299 if (halt_status != DWC2_HC_XFER_COMPLETE || 1300 list_empty(&qh->qtd_list)) { 1301 struct dwc2_qtd *qtd, *qtd_tmp; 1302 1303 /* 1304 * Kill all remainings QTDs since channel has been 1305 * halted. 1306 */ 1307 list_for_each_entry_safe(qtd, qtd_tmp, 1308 &qh->qtd_list, 1309 qtd_list_entry) { 1310 dwc2_host_complete(hsotg, qtd, 1311 -ECONNRESET); 1312 dwc2_hcd_qtd_unlink_and_free(hsotg, 1313 qtd, qh); 1314 } 1315 1316 /* Halt the channel if session completed */ 1317 if (halt_status == DWC2_HC_XFER_COMPLETE) 1318 dwc2_hc_halt(hsotg, chan, halt_status); 1319 dwc2_release_channel_ddma(hsotg, qh); 1320 dwc2_hcd_qh_unlink(hsotg, qh); 1321 } else { 1322 /* Keep in assigned schedule to continue transfer */ 1323 list_move(&qh->qh_list_entry, 1324 &hsotg->periodic_sched_assigned); 1325 /* 1326 * If channel has been halted during giveback of urb 1327 * then prevent any new scheduling. 1328 */ 1329 if (!chan->halt_status) 1330 continue_isoc_xfer = 1; 1331 } 1332 /* 1333 * Todo: Consider the case when period exceeds FrameList size. 1334 * Frame Rollover interrupt should be used. 1335 */ 1336 } else { 1337 /* 1338 * Scan descriptor list to complete the URB(s), then release 1339 * the channel 1340 */ 1341 dwc2_complete_non_isoc_xfer_ddma(hsotg, chan, chnum, 1342 halt_status); 1343 dwc2_release_channel_ddma(hsotg, qh); 1344 dwc2_hcd_qh_unlink(hsotg, qh); 1345 1346 if (!list_empty(&qh->qtd_list)) { 1347 /* 1348 * Add back to inactive non-periodic schedule on normal 1349 * completion 1350 */ 1351 dwc2_hcd_qh_add(hsotg, qh); 1352 } 1353 } 1354 1355 tr_type = dwc2_hcd_select_transactions(hsotg); 1356 if (tr_type != DWC2_TRANSACTION_NONE || continue_isoc_xfer) { 1357 if (continue_isoc_xfer) { 1358 if (tr_type == DWC2_TRANSACTION_NONE) 1359 tr_type = DWC2_TRANSACTION_PERIODIC; 1360 else if (tr_type == DWC2_TRANSACTION_NON_PERIODIC) 1361 tr_type = DWC2_TRANSACTION_ALL; 1362 } 1363 dwc2_hcd_queue_transactions(hsotg, tr_type); 1364 } 1365} 1366