Lines Matching refs:hs_req

425  * @hs_req: The request being processed.
432 struct dwc2_hsotg_req *hs_req)
434 struct usb_request *req = &hs_req->req;
491 * @hs_req: The request to write data for.
505 struct dwc2_hsotg_req *hs_req)
509 int buf_pos = hs_req->req.actual;
641 to_write, hs_req->req.length, can_write, buf_pos);
646 hs_req->req.actual = buf_pos + to_write;
653 data = hs_req->req.buf + buf_pos;
983 struct dwc2_hsotg_req *hs_req, *treq;
1007 list_for_each_entry_safe(hs_req, treq, &hs_ep->queue, queue) {
1008 dma_addr_t dma_addr = hs_req->req.dma;
1010 if (hs_req->req.num_sgs) {
1011 WARN_ON(hs_req->req.num_sgs > 1);
1012 dma_addr = sg_dma_address(hs_req->req.sg);
1015 hs_req->req.length);
1035 struct dwc2_hsotg_req *hs_req,
1042 * @hs_req: The request to start.
1050 struct dwc2_hsotg_req *hs_req,
1053 struct usb_request *ureq = &hs_req->req;
1070 } else if (hs_ep->req != hs_req && continuing) {
1147 hs_ep->req = hs_req;
1200 hs_req->req.frame_number = hs_ep->target_frame;
1201 hs_req->req.actual = 0;
1202 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
1230 dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req);
1285 struct dwc2_hsotg_req *hs_req)
1287 void *req_buf = hs_req->req.buf;
1293 WARN_ON(hs_req->saved_req_buf);
1296 hs_ep->ep.name, req_buf, hs_req->req.length);
1298 hs_req->req.buf = kmalloc(hs_req->req.length, GFP_ATOMIC);
1299 if (!hs_req->req.buf) {
1300 hs_req->req.buf = req_buf;
1308 hs_req->saved_req_buf = req_buf;
1311 memcpy(hs_req->req.buf, req_buf, hs_req->req.length);
1318 struct dwc2_hsotg_req *hs_req)
1321 if (!using_dma(hsotg) || !hs_req->saved_req_buf)
1325 hs_ep->ep.name, hs_req->req.status, hs_req->req.actual);
1328 if (!hs_ep->dir_in && !hs_req->req.status)
1329 memcpy(hs_req->saved_req_buf, hs_req->req.buf,
1330 hs_req->req.actual);
1333 kfree(hs_req->req.buf);
1335 hs_req->req.buf = hs_req->saved_req_buf;
1336 hs_req->saved_req_buf = NULL;
1405 struct dwc2_hsotg_req *hs_req = our_req(req);
1430 INIT_LIST_HEAD(&hs_req->queue);
1459 ret = dwc2_hsotg_handle_unaligned_buf_start(hs, hs_ep, hs_req);
1477 list_add_tail(&hs_req->queue, &hs_ep->queue);
1487 dma_addr_t dma_addr = hs_req->req.dma;
1489 if (hs_req->req.num_sgs) {
1490 WARN_ON(hs_req->req.num_sgs > 1);
1491 dma_addr = sg_dma_address(hs_req->req.sg);
1494 hs_req->req.length);
1506 dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
1521 dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
1544 struct dwc2_hsotg_req *hs_req = our_req(req);
1546 kfree(hs_req);
1750 struct dwc2_hsotg_req *hs_req;
1753 hs_req = get_ep_head(hs_ep);
1754 dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, false);
1778 struct dwc2_hsotg_req *hs_req;
1857 hs_req = ep->req;
1859 list_del_init(&hs_req->queue);
1860 if (hs_req->req.complete) {
1863 &ep->ep, &hs_req->req);
2041 struct dwc2_hsotg_req *hs_req = our_req(req);
2051 if (!list_empty(&hs_req->queue)) {
2109 * @hs_req: The request to complete.
2120 struct dwc2_hsotg_req *hs_req,
2123 if (!hs_req) {
2129 hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete);
2136 if (hs_req->req.status == -EINPROGRESS)
2137 hs_req->req.status = result;
2140 dwc2_hsotg_unmap_dma(hsotg, hs_ep, hs_req);
2142 dwc2_hsotg_handle_unaligned_buf_complete(hsotg, hs_ep, hs_req);
2145 list_del_init(&hs_req->queue);
2152 if (hs_req->req.complete) {
2154 usb_gadget_giveback_request(&hs_ep->ep, &hs_req->req);
2184 struct dwc2_hsotg_req *hs_req;
2195 hs_req = get_ep_head(hs_ep);
2196 if (!hs_req) {
2200 ureq = &hs_req->req;
2222 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
2266 struct dwc2_hsotg_req *hs_req = hs_ep->req;
2271 if (!hs_req) {
2287 read_ptr = hs_req->req.actual;
2288 max_req = hs_req->req.length - read_ptr;
2291 __func__, to_read, max_req, read_ptr, hs_req->req.length);
2304 hs_req->req.actual += to_read;
2312 hs_req->req.buf + read_ptr, to_read);
2394 struct dwc2_hsotg_req *hs_req = hs_ep->req;
2395 struct usb_request *req = &hs_req->req;
2399 if (!hs_req) {
2406 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
2434 dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true);
2462 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
2655 struct dwc2_hsotg_req *hs_req = hs_ep->req;
2657 if (!hs_ep->dir_in || !hs_req) {
2668 if (hs_req->req.actual < hs_req->req.length) {
2671 return dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req);
2688 struct dwc2_hsotg_req *hs_req = hs_ep->req;
2692 if (!hs_req) {
2707 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
2744 if (hs_req->req.actual != size_done)
2746 __func__, hs_req->req.actual, size_done);
2748 hs_req->req.actual = size_done;
2750 hs_req->req.length, hs_req->req.actual, hs_req->req.zero);
2752 if (!size_left && hs_req->req.actual < hs_req->req.length) {
2754 dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true);
2776 hs_req->req.frame_number = hs_ep->target_frame;
2780 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
2827 struct dwc2_hsotg_req *hs_req;
2864 hs_req = get_ep_head(hs_ep);
2865 if (hs_req) {
2866 hs_req->req.frame_number = hs_ep->target_frame;
2867 hs_req->req.actual = 0;
2868 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req,
2891 struct dwc2_hsotg_req *hs_req;
2922 hs_req = get_ep_head(ep);
2923 if (hs_req) {
2924 hs_req->req.frame_number = ep->target_frame;
2925 hs_req->req.actual = 0;
2926 dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA);
2959 struct dwc2_hsotg_req *hs_req;
3015 hs_req = get_ep_head(hs_ep);
3016 if (hs_req) {
3017 hs_req->req.frame_number = hs_ep->target_frame;
3018 hs_req->req.actual = 0;
3019 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
4332 struct dwc2_hsotg_req *hs_req = our_req(req);
4341 if (!on_list(hs_ep, hs_req)) {
4350 dwc2_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET);