Lines Matching refs:xfer

315  * scmi_xfer_token_set  - Reserve and set new token for the xfer at hand
318 * @xfer: The xfer to act upon
321 * xfer->hdr.seq: picking a monotonically increasing value avoids immediate
323 * of incorrect association of a late and expired xfer with a live in-flight
374 struct scmi_xfer *xfer)
386 next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1));
412 xfer->hdr.seq = (u16)xfer_id;
421 * @xfer: The xfer to act upon
424 struct scmi_xfer *xfer)
426 clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
430 * scmi_xfer_inflight_register_unlocked - Register the xfer as in-flight
432 * @xfer: The xfer to register
435 * Note that this helper assumes that the xfer to be registered as in-flight
436 * had been built using an xfer sequence number which still corresponds to a
442 scmi_xfer_inflight_register_unlocked(struct scmi_xfer *xfer,
446 set_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
447 hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq);
448 xfer->pending = true;
452 * scmi_xfer_inflight_register - Try to register an xfer as in-flight
454 * @xfer: The xfer to register
458 * that was baked into the provided xfer, so it checks at first if it can
459 * be mapped to a free slot and fails with an error if another xfer with the
462 * Return: 0 on Success or -EBUSY if sequence number embedded in the xfer
465 static int scmi_xfer_inflight_register(struct scmi_xfer *xfer,
472 if (!test_bit(xfer->hdr.seq, minfo->xfer_alloc_table))
473 scmi_xfer_inflight_register_unlocked(xfer, minfo);
482 * scmi_xfer_raw_inflight_register - An helper to register the given xfer as in
486 * @xfer: The xfer to register
491 struct scmi_xfer *xfer)
495 return scmi_xfer_inflight_register(xfer, &info->tx_minfo);
499 * scmi_xfer_pending_set - Pick a proper sequence number and mark the xfer
502 * @xfer: The xfer to act upon
507 static inline int scmi_xfer_pending_set(struct scmi_xfer *xfer,
514 /* Set a new monotonic token as the xfer sequence number */
515 ret = scmi_xfer_token_set(minfo, xfer);
517 scmi_xfer_inflight_register_unlocked(xfer, minfo);
532 * Picks an xfer from the free list @free_xfers (if any available) and perform
536 * allocated xfer, nor it is registered as a pending transaction.
538 * The successfully initialized xfer is refcounted.
542 * Return: An initialized xfer if all went fine, else pointer error.
548 struct scmi_xfer *xfer;
556 /* grab an xfer from the free_list */
557 xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node);
558 hlist_del_init(&xfer->node);
564 xfer->transfer_id = atomic_inc_return(&transfer_last_id);
566 refcount_set(&xfer->users, 1);
567 atomic_set(&xfer->busy, SCMI_XFER_FREE);
570 return xfer;
574 * scmi_xfer_raw_get - Helper to get a bare free xfer from the TX channel
578 * Note that xfer is taken from the TX channel structures.
580 * Return: A valid xfer on Success, or an error-pointer otherwise
584 struct scmi_xfer *xfer;
587 xfer = scmi_xfer_get(handle, &info->tx_minfo);
588 if (!IS_ERR(xfer))
589 xfer->flags |= SCMI_XFER_FLAG_IS_RAW;
591 return xfer;
634 * @xfer: message that was reserved by scmi_xfer_get
636 * After refcount check, possibly release an xfer, clearing the token slot,
637 * removing xfer from @pending_xfers and putting it back into free_xfers.
642 __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
647 if (refcount_dec_and_test(&xfer->users)) {
648 if (xfer->pending) {
649 scmi_xfer_token_clear(minfo, xfer);
650 hash_del(&xfer->node);
651 xfer->pending = false;
653 hlist_add_head(&xfer->node, &minfo->free_xfers);
659 * scmi_xfer_raw_put - Release an xfer that was taken by @scmi_xfer_raw_get
662 * @xfer: A reference to the xfer to put
664 * Note that as with other xfer_put() handlers the xfer is really effectively
667 void scmi_xfer_raw_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
671 xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW;
672 xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET;
673 return __scmi_xfer_put(&info->tx_minfo, xfer);
686 * Return: A valid xfer on Success or error otherwise
691 struct scmi_xfer *xfer = NULL;
694 xfer = XFER_FIND(minfo->pending_xfers, xfer_id);
696 return xfer ?: ERR_PTR(-EINVAL);
701 * xfer
705 * @xfer: A reference to the xfer to validate against @msg_type
708 * a pending @xfer; if an asynchronous delayed response is received before the
714 * Context: Assumes to be called with xfer->lock already acquired.
720 struct scmi_xfer *xfer)
728 if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) {
731 xfer->hdr.seq);
735 switch (xfer->state) {
742 xfer->hdr.status = SCMI_SUCCESS;
743 xfer->state = SCMI_XFER_RESP_OK;
744 complete(&xfer->done);
747 xfer->hdr.seq);
763 * scmi_xfer_state_update - Update xfer state
765 * @xfer: A reference to the xfer to update
771 * Context: Assumes to be called on an xfer exclusively acquired using the
774 static inline void scmi_xfer_state_update(struct scmi_xfer *xfer, u8 msg_type)
776 xfer->hdr.type = msg_type;
779 if (xfer->hdr.type == MSG_TYPE_COMMAND)
780 xfer->state = SCMI_XFER_RESP_OK;
782 xfer->state = SCMI_XFER_DRESP_OK;
785 static bool scmi_xfer_acquired(struct scmi_xfer *xfer)
789 ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY);
795 * scmi_xfer_command_acquire - Helper to lookup and acquire a command xfer
800 * When a valid xfer is found for the sequence number embedded in the provided
802 * xfer is granted till released with @scmi_xfer_command_release.
804 * Return: A valid @xfer on Success or error otherwise.
811 struct scmi_xfer *xfer;
819 xfer = scmi_xfer_lookup_unlocked(minfo, xfer_id);
820 if (IS_ERR(xfer)) {
825 return xfer;
827 refcount_inc(&xfer->users);
830 spin_lock_irqsave(&xfer->lock, flags);
831 ret = scmi_msg_response_validate(cinfo, msg_type, xfer);
833 * If a pending xfer was found which was also in a congruent state with
837 * RESP and DRESP for the same xfer.
840 spin_until_cond(scmi_xfer_acquired(xfer));
841 scmi_xfer_state_update(xfer, msg_type);
843 spin_unlock_irqrestore(&xfer->lock, flags);
848 msg_type, xfer_id, msg_hdr, xfer->state);
850 __scmi_xfer_put(minfo, xfer);
851 xfer = ERR_PTR(-EINVAL);
854 return xfer;
858 struct scmi_xfer *xfer)
860 atomic_set(&xfer->busy, SCMI_XFER_FREE);
861 __scmi_xfer_put(&info->tx_minfo, xfer);
874 struct scmi_xfer *xfer;
881 xfer = scmi_xfer_get(cinfo->handle, minfo);
882 if (IS_ERR(xfer)) {
884 PTR_ERR(xfer));
889 unpack_scmi_header(msg_hdr, &xfer->hdr);
891 /* Ensure order between xfer->priv store and following ops */
892 smp_store_mb(xfer->priv, priv);
894 xfer);
896 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
897 xfer->hdr.id, "NOTI", xfer->hdr.seq,
898 xfer->hdr.status, xfer->rx.buf, xfer->rx.len);
900 scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
901 xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
903 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
904 xfer->hdr.protocol_id, xfer->hdr.seq,
908 xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr);
909 scmi_raw_message_report(info->raw, xfer, SCMI_RAW_NOTIF_QUEUE,
913 __scmi_xfer_put(minfo, xfer);
921 struct scmi_xfer *xfer;
924 xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
925 if (IS_ERR(xfer)) {
935 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
936 xfer->rx.len = info->desc->max_msg_size;
939 /* Ensure order between xfer->priv store and following ops */
940 smp_store_mb(xfer->priv, priv);
941 info->desc->ops->fetch_response(cinfo, xfer);
943 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
944 xfer->hdr.id,
945 xfer->hdr.type == MSG_TYPE_DELAYED_RESP ?
946 (!SCMI_XFER_IS_RAW(xfer) ? "DLYD" : "dlyd") :
947 (!SCMI_XFER_IS_RAW(xfer) ? "RESP" : "resp"),
948 xfer->hdr.seq, xfer->hdr.status,
949 xfer->rx.buf, xfer->rx.len);
951 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
952 xfer->hdr.protocol_id, xfer->hdr.seq,
953 xfer->hdr.type);
955 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
957 complete(xfer->async_done);
959 complete(&xfer->done);
964 * When in polling mode avoid to queue the Raw xfer on the IRQ
968 if (!xfer->hdr.poll_completion)
969 scmi_raw_message_report(info->raw, xfer,
974 scmi_xfer_command_release(info, xfer);
1012 * @xfer: message that was reserved by xfer_get_init
1015 struct scmi_xfer *xfer)
1020 __scmi_xfer_put(&info->tx_minfo, xfer);
1024 struct scmi_xfer *xfer, ktime_t stop)
1029 * Poll also on xfer->done so that polling can be forcibly terminated
1032 return info->desc->ops->poll_done(cinfo, xfer) ||
1033 try_wait_for_completion(&xfer->done) ||
1039 struct scmi_xfer *xfer, unsigned int timeout_ms)
1043 if (xfer->hdr.poll_completion) {
1050 * Poll on xfer using transport provided .poll_done();
1056 xfer, stop));
1074 spin_lock_irqsave(&xfer->lock, flags);
1075 if (xfer->state == SCMI_XFER_SENT_OK) {
1076 desc->ops->fetch_response(cinfo, xfer);
1077 xfer->state = SCMI_XFER_RESP_OK;
1079 spin_unlock_irqrestore(&xfer->lock, flags);
1083 xfer->hdr.protocol_id, xfer->hdr.id,
1084 !SCMI_XFER_IS_RAW(xfer) ?
1086 xfer->hdr.seq, xfer->hdr.status,
1087 xfer->rx.buf, xfer->rx.len);
1093 scmi_raw_message_report(info->raw, xfer,
1100 if (!wait_for_completion_timeout(&xfer->done,
1116 * @xfer: Reference to the transfer being waited for.
1119 * configuration flags like xfer->hdr.poll_completion.
1124 struct scmi_xfer *xfer)
1129 trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
1130 xfer->hdr.protocol_id, xfer->hdr.seq,
1132 xfer->hdr.poll_completion);
1134 return scmi_wait_for_reply(dev, info->desc, cinfo, xfer,
1140 * reply to an xfer raw request on a specific channel for the required timeout.
1143 * @xfer: Reference to the transfer being waited for.
1149 struct scmi_xfer *xfer,
1156 ret = scmi_wait_for_reply(dev, info->desc, cinfo, xfer, timeout_ms);
1159 pack_scmi_header(&xfer->hdr));
1168 * @xfer: Transfer to initiate and wait for response
1175 struct scmi_xfer *xfer)
1184 if (xfer->hdr.poll_completion &&
1197 xfer->hdr.poll_completion = true;
1204 xfer->hdr.protocol_id = pi->proto->id;
1205 reinit_completion(&xfer->done);
1207 trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
1208 xfer->hdr.protocol_id, xfer->hdr.seq,
1209 xfer->hdr.poll_completion);
1212 xfer->hdr.status = SCMI_SUCCESS;
1213 xfer->state = SCMI_XFER_SENT_OK;
1216 * on xfer->state due to the monotonically increasing tokens allocation,
1217 * we must anyway ensure xfer->state initialization is not re-ordered
1219 * ISR calling scmi_rx_callback() cannot see an old stale xfer->state.
1223 ret = info->desc->ops->send_message(cinfo, xfer);
1229 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
1230 xfer->hdr.id, "CMND", xfer->hdr.seq,
1231 xfer->hdr.status, xfer->tx.buf, xfer->tx.len);
1233 ret = scmi_wait_for_message_response(cinfo, xfer);
1234 if (!ret && xfer->hdr.status)
1235 ret = scmi_to_linux_errno(xfer->hdr.status);
1238 info->desc->ops->mark_txdone(cinfo, ret, xfer);
1240 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
1241 xfer->hdr.protocol_id, xfer->hdr.seq, ret);
1247 struct scmi_xfer *xfer)
1252 xfer->rx.len = info->desc->max_msg_size;
1260 * @xfer: Transfer to initiate and wait for response
1280 struct scmi_xfer *xfer)
1285 xfer->async_done = &async_response;
1293 WARN_ON_ONCE(xfer->hdr.poll_completion);
1295 ret = do_xfer(ph, xfer);
1297 if (!wait_for_completion_timeout(xfer->async_done, timeout)) {
1302 } else if (xfer->hdr.status) {
1303 ret = scmi_to_linux_errno(xfer->hdr.status);
1307 xfer->async_done = NULL;
1331 struct scmi_xfer *xfer;
1342 xfer = scmi_xfer_get(pi->handle, minfo);
1343 if (IS_ERR(xfer)) {
1344 ret = PTR_ERR(xfer);
1349 /* Pick a sequence number and register this xfer as in-flight */
1350 ret = scmi_xfer_pending_set(xfer, minfo);
1354 __scmi_xfer_put(minfo, xfer);
1358 xfer->tx.len = tx_size;
1359 xfer->rx.len = rx_size ? : info->desc->max_msg_size;
1360 xfer->hdr.type = MSG_TYPE_COMMAND;
1361 xfer->hdr.id = msg_id;
1362 xfer->hdr.poll_completion = false;
1364 *p = xfer;
1497 * @t: A reference to the underlying xfer initialized and used transparently by
2315 struct scmi_xfer *xfer;
2342 xfer = devm_kzalloc(dev, sizeof(*xfer), GFP_KERNEL);
2343 if (!xfer)
2346 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
2348 if (!xfer->rx.buf)
2351 xfer->tx.buf = xfer->rx.buf;
2352 init_completion(&xfer->done);
2353 spin_lock_init(&xfer->lock);
2355 /* Add initialized xfer to the free list */
2356 hlist_add_head(&xfer->node, &info->free_xfers);