Lines Matching refs:xfer
262 struct scmi_xfer *xfer) in scmi_xfer_token_set() argument
274 next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1)); in scmi_xfer_token_set()
302 xfer->hdr.seq = (u16)xfer_id; in scmi_xfer_token_set()
314 struct scmi_xfer *xfer) in scmi_xfer_token_clear() argument
316 clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table); in scmi_xfer_token_clear()
347 struct scmi_xfer *xfer; in scmi_xfer_get() local
356 xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node); in scmi_xfer_get()
357 hlist_del_init(&xfer->node); in scmi_xfer_get()
363 xfer->transfer_id = atomic_inc_return(&transfer_last_id); in scmi_xfer_get()
367 ret = scmi_xfer_token_set(minfo, xfer); in scmi_xfer_get()
369 hash_add(minfo->pending_xfers, &xfer->node, in scmi_xfer_get()
370 xfer->hdr.seq); in scmi_xfer_get()
371 xfer->pending = true; in scmi_xfer_get()
375 hlist_add_head(&xfer->node, &minfo->free_xfers); in scmi_xfer_get()
376 xfer = ERR_PTR(ret); in scmi_xfer_get()
380 if (!IS_ERR(xfer)) { in scmi_xfer_get()
381 refcount_set(&xfer->users, 1); in scmi_xfer_get()
382 atomic_set(&xfer->busy, SCMI_XFER_FREE); in scmi_xfer_get()
386 return xfer; in scmi_xfer_get()
401 __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer) in __scmi_xfer_put() argument
406 if (refcount_dec_and_test(&xfer->users)) { in __scmi_xfer_put()
407 if (xfer->pending) { in __scmi_xfer_put()
408 scmi_xfer_token_clear(minfo, xfer); in __scmi_xfer_put()
409 hash_del(&xfer->node); in __scmi_xfer_put()
410 xfer->pending = false; in __scmi_xfer_put()
412 hlist_add_head(&xfer->node, &minfo->free_xfers); in __scmi_xfer_put()
432 struct scmi_xfer *xfer = NULL; in scmi_xfer_lookup_unlocked() local
435 xfer = XFER_FIND(minfo->pending_xfers, xfer_id); in scmi_xfer_lookup_unlocked()
437 return xfer ?: ERR_PTR(-EINVAL); in scmi_xfer_lookup_unlocked()
461 struct scmi_xfer *xfer) in scmi_msg_response_validate() argument
469 if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) { in scmi_msg_response_validate()
472 xfer->hdr.seq); in scmi_msg_response_validate()
476 switch (xfer->state) { in scmi_msg_response_validate()
483 xfer->hdr.status = SCMI_SUCCESS; in scmi_msg_response_validate()
484 xfer->state = SCMI_XFER_RESP_OK; in scmi_msg_response_validate()
485 complete(&xfer->done); in scmi_msg_response_validate()
488 xfer->hdr.seq); in scmi_msg_response_validate()
515 static inline void scmi_xfer_state_update(struct scmi_xfer *xfer, u8 msg_type) in scmi_xfer_state_update() argument
517 xfer->hdr.type = msg_type; in scmi_xfer_state_update()
520 if (xfer->hdr.type == MSG_TYPE_COMMAND) in scmi_xfer_state_update()
521 xfer->state = SCMI_XFER_RESP_OK; in scmi_xfer_state_update()
523 xfer->state = SCMI_XFER_DRESP_OK; in scmi_xfer_state_update()
526 static bool scmi_xfer_acquired(struct scmi_xfer *xfer) in scmi_xfer_acquired() argument
530 ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY); in scmi_xfer_acquired()
552 struct scmi_xfer *xfer; in scmi_xfer_command_acquire() local
560 xfer = scmi_xfer_lookup_unlocked(minfo, xfer_id); in scmi_xfer_command_acquire()
561 if (IS_ERR(xfer)) { in scmi_xfer_command_acquire()
566 return xfer; in scmi_xfer_command_acquire()
568 refcount_inc(&xfer->users); in scmi_xfer_command_acquire()
571 spin_lock_irqsave(&xfer->lock, flags); in scmi_xfer_command_acquire()
572 ret = scmi_msg_response_validate(cinfo, msg_type, xfer); in scmi_xfer_command_acquire()
581 spin_until_cond(scmi_xfer_acquired(xfer)); in scmi_xfer_command_acquire()
582 scmi_xfer_state_update(xfer, msg_type); in scmi_xfer_command_acquire()
584 spin_unlock_irqrestore(&xfer->lock, flags); in scmi_xfer_command_acquire()
589 msg_type, xfer_id, msg_hdr, xfer->state); in scmi_xfer_command_acquire()
591 __scmi_xfer_put(minfo, xfer); in scmi_xfer_command_acquire()
592 xfer = ERR_PTR(-EINVAL); in scmi_xfer_command_acquire()
595 return xfer; in scmi_xfer_command_acquire()
599 struct scmi_xfer *xfer) in scmi_xfer_command_release() argument
601 atomic_set(&xfer->busy, SCMI_XFER_FREE); in scmi_xfer_command_release()
602 __scmi_xfer_put(&info->tx_minfo, xfer); in scmi_xfer_command_release()
615 struct scmi_xfer *xfer; in scmi_handle_notification() local
622 xfer = scmi_xfer_get(cinfo->handle, minfo, false); in scmi_handle_notification()
623 if (IS_ERR(xfer)) { in scmi_handle_notification()
625 PTR_ERR(xfer)); in scmi_handle_notification()
630 unpack_scmi_header(msg_hdr, &xfer->hdr); in scmi_handle_notification()
632 xfer->priv = priv; in scmi_handle_notification()
634 xfer); in scmi_handle_notification()
635 scmi_notify(cinfo->handle, xfer->hdr.protocol_id, in scmi_handle_notification()
636 xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts); in scmi_handle_notification()
638 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id, in scmi_handle_notification()
639 xfer->hdr.protocol_id, xfer->hdr.seq, in scmi_handle_notification()
642 __scmi_xfer_put(minfo, xfer); in scmi_handle_notification()
650 struct scmi_xfer *xfer; in scmi_handle_response() local
653 xfer = scmi_xfer_command_acquire(cinfo, msg_hdr); in scmi_handle_response()
654 if (IS_ERR(xfer)) { in scmi_handle_response()
660 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) in scmi_handle_response()
661 xfer->rx.len = info->desc->max_msg_size; in scmi_handle_response()
664 xfer->priv = priv; in scmi_handle_response()
665 info->desc->ops->fetch_response(cinfo, xfer); in scmi_handle_response()
667 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id, in scmi_handle_response()
668 xfer->hdr.protocol_id, xfer->hdr.seq, in scmi_handle_response()
669 xfer->hdr.type); in scmi_handle_response()
671 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) { in scmi_handle_response()
673 complete(xfer->async_done); in scmi_handle_response()
675 complete(&xfer->done); in scmi_handle_response()
678 scmi_xfer_command_release(info, xfer); in scmi_handle_response()
719 struct scmi_xfer *xfer) in xfer_put() argument
724 __scmi_xfer_put(&info->tx_minfo, xfer); in xfer_put()
730 struct scmi_xfer *xfer, ktime_t stop) in scmi_xfer_done_no_timeout() argument
738 return info->desc->ops->poll_done(cinfo, xfer) || in scmi_xfer_done_no_timeout()
739 try_wait_for_completion(&xfer->done) || in scmi_xfer_done_no_timeout()
754 struct scmi_xfer *xfer) in do_xfer() argument
763 if (xfer->hdr.poll_completion && !info->desc->ops->poll_done) { in do_xfer()
774 xfer->hdr.protocol_id = pi->proto->id; in do_xfer()
775 reinit_completion(&xfer->done); in do_xfer()
777 cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id); in do_xfer()
781 trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id, in do_xfer()
782 xfer->hdr.protocol_id, xfer->hdr.seq, in do_xfer()
783 xfer->hdr.poll_completion); in do_xfer()
785 xfer->state = SCMI_XFER_SENT_OK; in do_xfer()
795 ret = info->desc->ops->send_message(cinfo, xfer); in do_xfer()
801 if (xfer->hdr.poll_completion) { in do_xfer()
804 spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop)); in do_xfer()
812 spin_lock_irqsave(&xfer->lock, flags); in do_xfer()
813 if (xfer->state == SCMI_XFER_SENT_OK) { in do_xfer()
814 info->desc->ops->fetch_response(cinfo, xfer); in do_xfer()
815 xfer->state = SCMI_XFER_RESP_OK; in do_xfer()
817 spin_unlock_irqrestore(&xfer->lock, flags); in do_xfer()
824 if (!wait_for_completion_timeout(&xfer->done, timeout)) { in do_xfer()
831 if (!ret && xfer->hdr.status) in do_xfer()
832 ret = scmi_to_linux_errno(xfer->hdr.status); in do_xfer()
837 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id, in do_xfer()
838 xfer->hdr.protocol_id, xfer->hdr.seq, ret); in do_xfer()
844 struct scmi_xfer *xfer) in reset_rx_to_maxsz() argument
849 xfer->rx.len = info->desc->max_msg_size; in reset_rx_to_maxsz()
865 struct scmi_xfer *xfer) in do_xfer_with_response() argument
870 xfer->async_done = &async_response; in do_xfer_with_response()
872 ret = do_xfer(ph, xfer); in do_xfer_with_response()
874 if (!wait_for_completion_timeout(xfer->async_done, timeout)) in do_xfer_with_response()
876 else if (xfer->hdr.status) in do_xfer_with_response()
877 ret = scmi_to_linux_errno(xfer->hdr.status); in do_xfer_with_response()
880 xfer->async_done = NULL; in do_xfer_with_response()
904 struct scmi_xfer *xfer; in xfer_get_init() local
915 xfer = scmi_xfer_get(pi->handle, minfo, true); in xfer_get_init()
916 if (IS_ERR(xfer)) { in xfer_get_init()
917 ret = PTR_ERR(xfer); in xfer_get_init()
922 xfer->tx.len = tx_size; in xfer_get_init()
923 xfer->rx.len = rx_size ? : info->desc->max_msg_size; in xfer_get_init()
924 xfer->hdr.type = MSG_TYPE_COMMAND; in xfer_get_init()
925 xfer->hdr.id = msg_id; in xfer_get_init()
926 xfer->hdr.poll_completion = false; in xfer_get_init()
928 *p = xfer; in xfer_get_init()
1380 struct scmi_xfer *xfer; in __scmi_xfer_info_init() local
1407 xfer = devm_kzalloc(dev, sizeof(*xfer), GFP_KERNEL); in __scmi_xfer_info_init()
1408 if (!xfer) in __scmi_xfer_info_init()
1411 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size, in __scmi_xfer_info_init()
1413 if (!xfer->rx.buf) in __scmi_xfer_info_init()
1416 xfer->tx.buf = xfer->rx.buf; in __scmi_xfer_info_init()
1417 init_completion(&xfer->done); in __scmi_xfer_info_init()
1418 spin_lock_init(&xfer->lock); in __scmi_xfer_info_init()
1421 hlist_add_head(&xfer->node, &info->free_xfers); in __scmi_xfer_info_init()