Lines Matching refs:rtl

102 	struct ssh_rtl *rtl = ssh_request_rtl(rqst);
104 spin_lock(&rtl->queue.lock);
107 spin_unlock(&rtl->queue.lock);
113 spin_unlock(&rtl->queue.lock);
117 static bool ssh_rtl_queue_empty(struct ssh_rtl *rtl)
121 spin_lock(&rtl->queue.lock);
122 empty = list_empty(&rtl->queue.head);
123 spin_unlock(&rtl->queue.lock);
130 struct ssh_rtl *rtl = ssh_request_rtl(rqst);
132 spin_lock(&rtl->pending.lock);
135 spin_unlock(&rtl->pending.lock);
139 atomic_dec(&rtl->pending.count);
142 spin_unlock(&rtl->pending.lock);
149 struct ssh_rtl *rtl = ssh_request_rtl(rqst);
151 spin_lock(&rtl->pending.lock);
154 spin_unlock(&rtl->pending.lock);
159 spin_unlock(&rtl->pending.lock);
163 atomic_inc(&rtl->pending.count);
164 list_add_tail(&ssh_request_get(rqst)->node, &rtl->pending.head);
166 spin_unlock(&rtl->pending.lock);
172 struct ssh_rtl *rtl = ssh_request_rtl(rqst);
176 /* rtl/ptl may not be set if we're canceling before submitting. */
177 rtl_dbg_cond(rtl, "rtl: completing request (rqid: %#06x, status: %d)\n",
187 struct ssh_rtl *rtl = ssh_request_rtl(rqst);
191 rtl_dbg(rtl, "rtl: completing request with response (rqid: %#06x)\n",
199 struct ssh_rtl *rtl = ssh_request_rtl(rqst);
202 return !atomic_read(&rtl->pending.count);
204 return atomic_read(&rtl->pending.count) < SSH_RTL_MAX_PENDING;
207 static struct ssh_request *ssh_rtl_tx_next(struct ssh_rtl *rtl)
212 spin_lock(&rtl->queue.lock);
215 list_for_each_entry_safe(p, n, &rtl->queue.head, node) {
236 spin_unlock(&rtl->queue.lock);
240 static int ssh_rtl_tx_try_process_one(struct ssh_rtl *rtl)
246 rqst = ssh_rtl_tx_next(rtl);
258 status = ssh_ptl_submit(&rtl->ptl, &rqst->packet);
305 static bool ssh_rtl_tx_schedule(struct ssh_rtl *rtl)
307 if (atomic_read(&rtl->pending.count) >= SSH_RTL_MAX_PENDING)
310 if (ssh_rtl_queue_empty(rtl))
313 return schedule_work(&rtl->tx.work);
318 struct ssh_rtl *rtl = to_ssh_rtl(work, tx.work);
328 status = ssh_rtl_tx_try_process_one(rtl);
345 ssh_rtl_tx_schedule(rtl);
350 * @rtl: The request transport layer.
361 int ssh_rtl_submit(struct ssh_rtl *rtl, struct ssh_request *rqst)
374 spin_lock(&rtl->queue.lock);
391 if (cmpxchg(&rqst->packet.ptl, NULL, &rtl->ptl)) {
392 spin_unlock(&rtl->queue.lock);
407 if (test_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state)) {
408 spin_unlock(&rtl->queue.lock);
413 spin_unlock(&rtl->queue.lock);
418 list_add_tail(&ssh_request_get(rqst)->node, &rtl->queue.head);
420 spin_unlock(&rtl->queue.lock);
422 ssh_rtl_tx_schedule(rtl);
426 static void ssh_rtl_timeout_reaper_mod(struct ssh_rtl *rtl, ktime_t now,
432 spin_lock(&rtl->rtx_timeout.lock);
435 if (ktime_before(aexp, rtl->rtx_timeout.expires)) {
436 rtl->rtx_timeout.expires = expires;
437 mod_delayed_work(system_wq, &rtl->rtx_timeout.reaper, delta);
440 spin_unlock(&rtl->rtx_timeout.lock);
445 struct ssh_rtl *rtl = ssh_request_rtl(rqst);
447 ktime_t timeout = rtl->rtx_timeout.timeout;
464 ssh_rtl_timeout_reaper_mod(rtl, timestamp, timestamp + timeout);
467 static void ssh_rtl_complete(struct ssh_rtl *rtl,
481 spin_lock(&rtl->pending.lock);
482 list_for_each_entry_safe(p, n, &rtl->pending.head, node) {
489 spin_unlock(&rtl->pending.lock);
492 rtl_info(rtl, "request error injection: dropping response for request %p\n",
507 atomic_dec(&rtl->pending.count);
513 spin_unlock(&rtl->pending.lock);
516 rtl_warn(rtl, "rtl: dropping unexpected command message (rqid = %#06x)\n",
524 ssh_rtl_tx_schedule(rtl);
549 rtl_err(rtl, "rtl: received response before ACK for request (rqid = %#06x)\n",
563 ssh_rtl_tx_schedule(rtl);
578 ssh_rtl_tx_schedule(rtl);
583 struct ssh_rtl *rtl;
627 rtl = ssh_request_rtl(r);
628 spin_lock(&rtl->queue.lock);
639 spin_unlock(&rtl->queue.lock);
646 spin_unlock(&rtl->queue.lock);
730 struct ssh_rtl *rtl;
743 /* Note: rtl may be NULL if request has not been submitted yet. */
744 rtl = ssh_request_rtl(rqst);
745 if (canceled && rtl)
746 ssh_rtl_tx_schedule(rtl);
822 struct ssh_rtl *rtl = to_ssh_rtl(work, rtx_timeout.reaper.work);
826 ktime_t timeout = rtl->rtx_timeout.timeout;
829 trace_ssam_rtl_timeout_reap(atomic_read(&rtl->pending.count));
835 spin_lock(&rtl->rtx_timeout.lock);
836 rtl->rtx_timeout.expires = KTIME_MAX;
837 spin_unlock(&rtl->rtx_timeout.lock);
839 spin_lock(&rtl->pending.lock);
840 list_for_each_entry_safe(r, n, &rtl->pending.head, node) {
865 atomic_dec(&rtl->pending.count);
868 spin_unlock(&rtl->pending.lock);
893 ssh_rtl_timeout_reaper_mod(rtl, now, next);
895 ssh_rtl_tx_schedule(rtl);
898 static void ssh_rtl_rx_event(struct ssh_rtl *rtl, const struct ssh_command *cmd,
903 rtl_dbg(rtl, "rtl: handling event (rqid: %#06x)\n",
906 rtl->ops.handle_event(rtl, cmd, data);
911 struct ssh_rtl *rtl = to_ssh_rtl(p, ptl);
929 rtl_warn(rtl, "rtl: dropping message not intended for us (tid = %#04x)\n",
935 ssh_rtl_rx_event(rtl, command, &command_data);
937 ssh_rtl_complete(rtl, command, &command_data);
943 ptl_err(p, "rtl: rx: no data frame payload\n");
953 ptl_err(p, "rtl: rx: unknown frame payload type (type: %#04x)\n",
1014 * @rtl: The request transport layer to initialize.
1025 int ssh_rtl_init(struct ssh_rtl *rtl, struct serdev_device *serdev,
1033 status = ssh_ptl_init(&rtl->ptl, serdev, &ptl_ops);
1037 spin_lock_init(&rtl->queue.lock);
1038 INIT_LIST_HEAD(&rtl->queue.head);
1040 spin_lock_init(&rtl->pending.lock);
1041 INIT_LIST_HEAD(&rtl->pending.head);
1042 atomic_set_release(&rtl->pending.count, 0);
1044 INIT_WORK(&rtl->tx.work, ssh_rtl_tx_work_fn);
1046 spin_lock_init(&rtl->rtx_timeout.lock);
1047 rtl->rtx_timeout.timeout = SSH_RTL_REQUEST_TIMEOUT;
1048 rtl->rtx_timeout.expires = KTIME_MAX;
1049 INIT_DELAYED_WORK(&rtl->rtx_timeout.reaper, ssh_rtl_timeout_reap);
1051 rtl->ops = *ops;
1058 * @rtl: The request transport layer to deinitialize.
1065 void ssh_rtl_destroy(struct ssh_rtl *rtl)
1067 ssh_ptl_destroy(&rtl->ptl);
1072 * @rtl: The request transport layer.
1076 int ssh_rtl_start(struct ssh_rtl *rtl)
1080 status = ssh_ptl_tx_start(&rtl->ptl);
1084 ssh_rtl_tx_schedule(rtl);
1086 status = ssh_ptl_rx_start(&rtl->ptl);
1088 ssh_rtl_flush(rtl, msecs_to_jiffies(5000));
1089 ssh_ptl_tx_stop(&rtl->ptl);
1128 * @rtl: request transport layer
1154 int ssh_rtl_flush(struct ssh_rtl *rtl, unsigned long timeout)
1167 status = ssh_rtl_submit(rtl, &rqst.base);
1186 * @rtl: The request transport layer.
1197 void ssh_rtl_shutdown(struct ssh_rtl *rtl)
1203 set_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state);
1213 spin_lock(&rtl->queue.lock);
1214 list_for_each_entry_safe(r, n, &rtl->queue.head, node) {
1222 spin_unlock(&rtl->queue.lock);
1234 cancel_work_sync(&rtl->tx.work);
1235 ssh_ptl_shutdown(&rtl->ptl);
1236 cancel_delayed_work_sync(&rtl->rtx_timeout.reaper);
1244 pending = atomic_read(&rtl->pending.count);
1246 spin_lock(&rtl->pending.lock);
1247 list_for_each_entry_safe(r, n, &rtl->pending.head, node) {
1255 spin_unlock(&rtl->pending.lock);