Lines Matching refs:operation

22 /* Workqueue to handle Greybus operation completions. */
29 * Protects updates to operation->errno.
33 static int gb_operation_response_send(struct gb_operation *operation,
37 * Increment operation active count and add to connection list unless the
40 * Caller holds operation reference.
42 static int gb_operation_get_active(struct gb_operation *operation)
44 struct gb_connection *connection = operation->connection;
52 if (gb_operation_is_incoming(operation))
56 if (!gb_operation_is_core(operation))
63 if (operation->active++ == 0)
64 list_add_tail(&operation->links, &connection->operations);
66 trace_gb_operation_get_active(operation);
78 /* Caller holds operation reference. */
79 static void gb_operation_put_active(struct gb_operation *operation)
81 struct gb_connection *connection = operation->connection;
86 trace_gb_operation_put_active(operation);
88 if (--operation->active == 0) {
89 list_del(&operation->links);
90 if (atomic_read(&operation->waiters))
96 static bool gb_operation_is_active(struct gb_operation *operation)
98 struct gb_connection *connection = operation->connection;
103 ret = operation->active;
110 * Set an operation's result.
112 * Initially an outgoing operation's errno value is -EBADR.
114 * valid value operation->errno can be set to is -EINPROGRESS,
130 * value to set for an operation in initial state is -EINPROGRESS.
132 * operation result.
134 static bool gb_operation_result_set(struct gb_operation *operation, int result)
148 prev = operation->errno;
150 operation->errno = result;
152 operation->errno = -EILSEQ;
161 * will be the final result of the operation. Subsequent
172 prev = operation->errno;
174 operation->errno = result; /* First and final result */
180 int gb_operation_result(struct gb_operation *operation)
182 int result = operation->errno;
192 * Looks up an outgoing operation on a connection and returns a refcounted
198 struct gb_operation *operation;
203 list_for_each_entry(operation, &connection->operations, links)
204 if (operation->id == operation_id &&
205 !gb_operation_is_incoming(operation)) {
206 gb_operation_get(operation);
212 return found ? operation : NULL;
217 struct gb_connection *connection = message->operation->connection;
231 struct gb_host_device *hd = message->operation->connection->hd;
236 static void gb_operation_request_handle(struct gb_operation *operation)
238 struct gb_connection *connection = operation->connection;
243 status = connection->handler(operation);
247 connection->name, operation->type);
252 ret = gb_operation_response_send(operation, status);
256 connection->name, status, operation->type, ret);
262 * Process operation work.
264 * For incoming requests, call the protocol request handler. The operation
267 * For outgoing requests, the operation result value should have
268 * been set before queueing this. The operation callback function
274 struct gb_operation *operation;
277 operation = container_of(work, struct gb_operation, work);
279 if (gb_operation_is_incoming(operation)) {
280 gb_operation_request_handle(operation);
282 ret = del_timer_sync(&operation->timer);
285 if (gb_operation_result(operation) == -ETIMEDOUT)
286 gb_message_cancel(operation->request);
289 operation->callback(operation);
292 gb_operation_put_active(operation);
293 gb_operation_put(operation);
298 struct gb_operation *operation = from_timer(operation, t, timer);
300 if (gb_operation_result_set(operation, -ETIMEDOUT)) {
305 queue_work(gb_operation_completion_wq, &operation->work);
331 * For a request, the operation id gets filled in
347 * Allocate a message to be used for an operation request or response.
349 * for an outgoing operation is outbound, as is the response message
350 * for an incoming operation. The message header for an outbound
434 * Map a Linux errno value (from operation->errno) into the value
468 bool gb_operation_response_alloc(struct gb_operation *operation,
471 struct gb_host_device *hd = operation->connection->hd;
476 type = operation->type | GB_MESSAGE_TYPE_RESPONSE;
480 response->operation = operation;
485 * that's left is the operation id, which we copy from the
488 request_header = operation->request->header;
490 operation->response = response;
497 * Create a Greybus operation to be sent over the given connection.
515 * Returns a pointer to the new operation or a null pointer if an
524 struct gb_operation *operation;
526 operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
527 if (!operation)
529 operation->connection = connection;
531 operation->request = gb_operation_message_alloc(hd, type, request_size,
533 if (!operation->request)
535 operation->request->operation = operation;
539 if (!gb_operation_response_alloc(operation, response_size,
544 timer_setup(&operation->timer, gb_operation_timeout, 0);
547 operation->flags = op_flags;
548 operation->type = type;
549 operation->errno = -EBADR; /* Initial value--means "never set" */
551 INIT_WORK(&operation->work, gb_operation_work);
552 init_completion(&operation->completion);
553 kref_init(&operation->kref);
554 atomic_set(&operation->waiters, 0);
556 return operation;
559 gb_operation_message_free(operation->request);
561 kmem_cache_free(gb_operation_cache, operation);
567 * Create a new operation associated with the given connection. The
571 * invalid operation type for all protocols, and this is enforced
580 struct gb_operation *operation;
590 operation = gb_operation_create_common(connection, type,
593 if (operation)
594 trace_gb_operation_create(operation);
596 return operation;
606 struct gb_operation *operation;
610 operation = gb_operation_create_common(connection, type,
613 if (operation)
614 trace_gb_operation_create_core(operation);
616 return operation;
633 struct gb_operation *operation;
643 operation = gb_operation_create_common(connection, type,
647 if (!operation)
650 operation->id = id;
651 memcpy(operation->request->header, data, size);
652 trace_gb_operation_create_incoming(operation);
654 return operation;
658 * Get an additional reference on an operation.
660 void gb_operation_get(struct gb_operation *operation)
662 kref_get(&operation->kref);
667 * Destroy a previously created operation.
671 struct gb_operation *operation;
673 operation = container_of(kref, struct gb_operation, kref);
675 trace_gb_operation_destroy(operation);
677 if (operation->response)
678 gb_operation_message_free(operation->response);
679 gb_operation_message_free(operation->request);
681 kmem_cache_free(gb_operation_cache, operation);
685 * Drop a reference on an operation, and destroy it when the last
688 void gb_operation_put(struct gb_operation *operation)
690 if (WARN_ON(!operation))
693 kref_put(&operation->kref, _gb_operation_destroy);
698 static void gb_operation_sync_callback(struct gb_operation *operation)
700 complete(&operation->completion);
704 * gb_operation_request_send() - send an operation request message
705 * @operation: the operation to initiate
706 * @callback: the operation completion callback
707 * @timeout: operation timeout in milliseconds, or zero for no timeout
712 * arrived, a unidirectional request has been sent, or the operation is
713 * cancelled, indicating that the operation is complete. The callback function
714 * can fetch the result of the operation using gb_operation_result() if
720 int gb_operation_request_send(struct gb_operation *operation,
725 struct gb_connection *connection = operation->connection;
739 * of an operation has been set.
741 operation->callback = callback;
744 * Assign the operation's id, and store it in the request header.
745 * Zero is a reserved operation id for unidirectional operations.
747 if (gb_operation_is_unidirectional(operation)) {
748 operation->id = 0;
751 operation->id = (u16)(cycle % U16_MAX + 1);
754 header = operation->request->header;
755 header->operation_id = cpu_to_le16(operation->id);
757 gb_operation_result_set(operation, -EINPROGRESS);
760 * Get an extra reference on the operation. It'll be dropped when the
761 * operation completes.
763 gb_operation_get(operation);
764 ret = gb_operation_get_active(operation);
768 ret = gb_message_send(operation->request, gfp);
773 operation->timer.expires = jiffies + msecs_to_jiffies(timeout);
774 add_timer(&operation->timer);
780 gb_operation_put_active(operation);
782 gb_operation_put(operation);
789 * Send a synchronous operation. This function is expected to
792 * operation.
794 int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
799 ret = gb_operation_request_send(operation, gb_operation_sync_callback,
804 ret = wait_for_completion_interruptible(&operation->completion);
806 /* Cancel the operation if interrupted */
807 gb_operation_cancel(operation, -ECANCELED);
810 return gb_operation_result(operation);
815 * Send a response for an incoming operation request. A non-zero
816 * errno indicates a failed operation.
823 static int gb_operation_response_send(struct gb_operation *operation,
826 struct gb_connection *connection = operation->connection;
829 if (!operation->response &&
830 !gb_operation_is_unidirectional(operation)) {
831 if (!gb_operation_response_alloc(operation, 0, GFP_KERNEL))
836 if (!gb_operation_result_set(operation, errno)) {
842 if (gb_operation_is_unidirectional(operation))
846 gb_operation_get(operation);
847 ret = gb_operation_get_active(operation);
852 operation->response->header->result = gb_operation_errno_map(errno);
854 ret = gb_message_send(operation->response, GFP_KERNEL);
861 gb_operation_put_active(operation);
863 gb_operation_put(operation);
874 struct gb_operation *operation = message->operation;
875 struct gb_connection *connection = operation->connection;
879 * reference to the operation. If an error occurred, report
882 * For requests, if there's no error and the operation in not
885 * operation is unidrectional, record the result of the operation and
888 if (message == operation->response) {
892 connection->name, operation->type, status);
895 gb_operation_put_active(operation);
896 gb_operation_put(operation);
897 } else if (status || gb_operation_is_unidirectional(operation)) {
898 if (gb_operation_result_set(operation, status)) {
900 &operation->work);
917 struct gb_operation *operation;
925 operation = gb_operation_create_incoming(connection, operation_id,
927 if (!operation) {
929 "%s: can't create incoming operation\n",
934 ret = gb_operation_get_active(operation);
936 gb_operation_put(operation);
939 trace_gb_message_recv_request(operation->request);
942 * The initial reference to the operation will be dropped when the
945 if (gb_operation_result_set(operation, -EINPROGRESS))
946 queue_work(connection->wq, &operation->work);
950 * We've received data that appears to be an operation response
951 * message. Look up the operation, and record that we've received
961 struct gb_operation *operation;
976 operation = gb_operation_find_outgoing(connection, operation_id);
977 if (!operation) {
985 message = operation->response;
994 if (gb_operation_short_response_allowed(operation)) {
1010 if (gb_operation_result_set(operation, errno)) {
1015 queue_work(gb_operation_completion_wq, &operation->work);
1018 gb_operation_put(operation);
1055 return; /* XXX Should still complete operation */
1068 * Cancel an outgoing operation synchronously, and record the given error to
1071 void gb_operation_cancel(struct gb_operation *operation, int errno)
1073 if (WARN_ON(gb_operation_is_incoming(operation)))
1076 if (gb_operation_result_set(operation, errno)) {
1077 gb_message_cancel(operation->request);
1078 queue_work(gb_operation_completion_wq, &operation->work);
1080 trace_gb_message_cancel_outgoing(operation->request);
1082 atomic_inc(&operation->waiters);
1084 !gb_operation_is_active(operation));
1085 atomic_dec(&operation->waiters);
1090 * Cancel an incoming operation synchronously. Called during connection tear
1093 void gb_operation_cancel_incoming(struct gb_operation *operation, int errno)
1095 if (WARN_ON(!gb_operation_is_incoming(operation)))
1098 if (!gb_operation_is_unidirectional(operation)) {
1103 flush_work(&operation->work);
1104 if (!gb_operation_result_set(operation, errno))
1105 gb_message_cancel(operation->response);
1107 trace_gb_message_cancel_incoming(operation->response);
1109 atomic_inc(&operation->waiters);
1111 !gb_operation_is_active(operation));
1112 atomic_dec(&operation->waiters);
1116 * gb_operation_sync_timeout() - implement a "simple" synchronous operation
1118 * @type: the type of operation to send
1123 * @timeout: operation timeout in milliseconds
1125 * This function implements a simple synchronous Greybus operation. It sends
1126 * the provided operation request and waits (sleeps) until the corresponding
1127 * operation response message has been successfully received, or an error
1133 * @response_size number of bytes will be copied into @response if the operation
1143 struct gb_operation *operation;
1150 operation = gb_operation_create(connection, type,
1153 if (!operation)
1157 memcpy(operation->request->payload, request, request_size);
1159 ret = gb_operation_request_send_sync_timeout(operation, timeout);
1162 "%s: synchronous operation id 0x%04x of type 0x%02x failed: %d\n",
1163 connection->name, operation->id, type, ret);
1166 memcpy(response, operation->response->payload,
1171 gb_operation_put(operation);
1178 * gb_operation_unidirectional_timeout() - initiate a unidirectional operation
1180 * @type: type of operation to send
1185 * Initiate a unidirectional operation by sending a request message and
1188 * Note that successful send of a unidirectional operation does not imply that
1196 struct gb_operation *operation;
1202 operation = gb_operation_create_flags(connection, type,
1206 if (!operation)
1210 memcpy(operation->request->payload, request, request_size);
1212 ret = gb_operation_request_send_sync_timeout(operation, timeout);
1215 "%s: unidirectional operation of type 0x%02x failed: %d\n",
1219 gb_operation_put(operation);