1 /*
2 * SHARED BUFFER
3 *
4 * Copyright (C) 2017-2022 Intel Corporation.
5 *
6 * SPDX-License-Identifier: BSD-3-Clause
7 *
8 * Li Fei <fei1.li@intel.com>
9 *
10 */
11
12 #include <types.h>
13 #include <rtl.h>
14 #include <errno.h>
15 #include <asm/cpu.h>
16 #include <asm/per_cpu.h>
17 #include <vm_event.h>
18
sbuf_next_ptr(uint32_t pos_arg,uint32_t span,uint32_t scope)19 uint32_t sbuf_next_ptr(uint32_t pos_arg,
20 uint32_t span, uint32_t scope)
21 {
22 uint32_t pos = pos_arg;
23 pos += span;
24 pos = (pos >= scope) ? (pos - scope) : pos;
25 return pos;
26 }
27
28 /**
29 * The high caller should guarantee each time there must have
30 * sbuf->ele_size data can be write form data.
31 *
32 * As sbuf->ele_size is possibly setup by some sources outside of the
33 * HV (e.g. the service VM), it is not meant to be trusted. So caller
34 * should provide the max length of the data for safety reason.
35 *
36 * And this function should guarantee execution atomically.
37 *
38 * flag:
39 * If OVERWRITE_EN set, buf can store (ele_num - 1) elements at most.
40 * Should use lock to guarantee that only one read or write at
41 * the same time.
42 * if OVERWRITE_EN not set, buf can store (ele_num - 1) elements
43 * at most. Shouldn't modify the sbuf->head.
44 *
45 * return:
46 * ele_size: write succeeded.
47 * 0: no write, buf is full
48 * UINT32_MAX: failed, sbuf corrupted.
49 */
50
sbuf_put(struct shared_buf * sbuf,uint8_t * data,uint32_t max_len)51 uint32_t sbuf_put(struct shared_buf *sbuf, uint8_t *data, uint32_t max_len)
52 {
53 void *to;
54 uint32_t next_tail;
55 uint32_t ele_size, ret;
56 bool trigger_overwrite = false;
57
58 stac();
59 ele_size = sbuf->ele_size;
60 next_tail = sbuf_next_ptr(sbuf->tail, ele_size, sbuf->size);
61
62 if ((next_tail == sbuf->head) && ((sbuf->flags & OVERWRITE_EN) == 0U)) {
63 /* if overrun is not enabled, return 0 directly */
64 ret = 0U;
65 } else if (ele_size <= max_len) {
66 if (next_tail == sbuf->head) {
67 /* accumulate overrun count if necessary */
68 sbuf->overrun_cnt += sbuf->flags & OVERRUN_CNT_EN;
69 trigger_overwrite = true;
70 }
71 to = (void *)sbuf + SBUF_HEAD_SIZE + sbuf->tail;
72
73 (void)memcpy_s(to, ele_size, data, max_len);
74 /* make sure write data before update head */
75 cpu_write_memory_barrier();
76
77 if (trigger_overwrite) {
78 sbuf->head = sbuf_next_ptr(sbuf->head,
79 ele_size, sbuf->size);
80 }
81 sbuf->tail = next_tail;
82 ret = ele_size;
83 } else {
84 /* there must be something wrong */
85 ret = UINT32_MAX;
86 }
87 clac();
88
89 return ret;
90 }
91
sbuf_setup_common(struct acrn_vm * vm,uint16_t cpu_id,uint32_t sbuf_id,uint64_t * hva)92 int32_t sbuf_setup_common(struct acrn_vm *vm, uint16_t cpu_id, uint32_t sbuf_id, uint64_t *hva)
93 {
94 int32_t ret = 0;
95
96 switch (sbuf_id) {
97 case ACRN_TRACE:
98 case ACRN_HVLOG:
99 case ACRN_SEP:
100 case ACRN_SOCWATCH:
101 ret = sbuf_share_setup(cpu_id, sbuf_id, hva);
102 break;
103 case ACRN_ASYNCIO:
104 ret = init_asyncio(vm, hva);
105 break;
106 case ACRN_VM_EVENT:
107 ret = init_vm_event(vm, hva);
108 break;
109 default:
110 pr_err("%s not support sbuf_id %d", __func__, sbuf_id);
111 ret = -1;
112 }
113
114 return ret;
115 }
116
117 /* try put a batch of elememts from data to sbuf
118 * data_size should be equel to n*elem_size, data not enough to fill the elem_size will be ignored.
119 *
120 * return:
121 * elem_size * n: bytes put in sbuf
122 * UINT32_MAX: failed, sbuf corrupted.
123 */
sbuf_put_many(struct shared_buf * sbuf,uint32_t elem_size,uint8_t * data,uint32_t data_size)124 uint32_t sbuf_put_many(struct shared_buf *sbuf, uint32_t elem_size, uint8_t *data, uint32_t data_size)
125 {
126 uint32_t ret, sent = 0U;
127 uint32_t i;
128
129 for (i = 0U; i < (data_size / elem_size); i++) {
130 ret = sbuf_put(sbuf, data + i * elem_size, elem_size);
131 if (ret == elem_size) {
132 sent += ret;
133 } else {
134 if (ret == UINT32_MAX) {
135 sent = UINT32_MAX;
136 }
137 break;
138 }
139 }
140 return sent;
141 }
142