1 /*
2  * Copyright (c) 2008-2015 Travis Geiselbrecht
3  *
4  * Use of this source code is governed by a MIT-style
5  * license that can be found in the LICENSE file or at
6  * https://opensource.org/licenses/MIT
7  */
8 #include <stdlib.h>
9 #include <lk/debug.h>
10 #include <lk/trace.h>
11 #include <lk/pow2.h>
12 #include <string.h>
13 #include <assert.h>
14 #include <lib/cbuf.h>
15 #include <kernel/event.h>
16 #include <kernel/spinlock.h>
17 
18 #define LOCAL_TRACE 0
19 
20 #define INC_POINTER(cbuf, ptr, inc) \
21     modpow2(((ptr) + (inc)), (cbuf)->len_pow2)
22 
cbuf_initialize(cbuf_t * cbuf,size_t len)23 void cbuf_initialize(cbuf_t *cbuf, size_t len) {
24     cbuf_initialize_etc(cbuf, len, malloc(len));
25 }
26 
cbuf_initialize_etc(cbuf_t * cbuf,size_t len,void * buf)27 void cbuf_initialize_etc(cbuf_t *cbuf, size_t len, void *buf) {
28     DEBUG_ASSERT(cbuf);
29     DEBUG_ASSERT(len > 0);
30     DEBUG_ASSERT(ispow2(len));
31 
32     cbuf->head = 0;
33     cbuf->tail = 0;
34     cbuf->len_pow2 = log2_uint(len);
35     cbuf->buf = buf;
36     event_init(&cbuf->event, false, 0);
37     spin_lock_init(&cbuf->lock);
38 
39     LTRACEF("len %zd, len_pow2 %u\n", len, cbuf->len_pow2);
40 }
41 
cbuf_space_avail(cbuf_t * cbuf)42 size_t cbuf_space_avail(cbuf_t *cbuf) {
43     uint consumed = modpow2((uint)(cbuf->head - cbuf->tail), cbuf->len_pow2);
44     return valpow2(cbuf->len_pow2) - consumed - 1;
45 }
46 
cbuf_space_used(cbuf_t * cbuf)47 size_t cbuf_space_used(cbuf_t *cbuf) {
48     return modpow2((uint)(cbuf->head - cbuf->tail), cbuf->len_pow2);
49 }
50 
cbuf_write(cbuf_t * cbuf,const void * _buf,size_t len,bool canreschedule)51 size_t cbuf_write(cbuf_t *cbuf, const void *_buf, size_t len, bool canreschedule) {
52     const char *buf = (const char *)_buf;
53 
54     LTRACEF("len %zd\n", len);
55 
56     DEBUG_ASSERT(cbuf);
57     DEBUG_ASSERT(len < valpow2(cbuf->len_pow2));
58 
59     spin_lock_saved_state_t state;
60     spin_lock_irqsave(&cbuf->lock, state);
61 
62     size_t write_len;
63     size_t pos = 0;
64 
65     while (pos < len && cbuf_space_avail(cbuf) > 0) {
66         if (cbuf->head >= cbuf->tail) {
67             if (cbuf->tail == 0) {
68                 // Special case - if tail is at position 0, we can't write all
69                 // the way to the end of the buffer. Otherwise, head ends up at
70                 // 0, head == tail, and buffer is considered "empty" again.
71                 write_len =
72                     MIN(valpow2(cbuf->len_pow2) - cbuf->head - 1, len - pos);
73             } else {
74                 // Write to the end of the buffer.
75                 write_len =
76                     MIN(valpow2(cbuf->len_pow2) - cbuf->head, len - pos);
77             }
78         } else {
79             // Write from head to tail-1.
80             write_len = MIN(cbuf->tail - cbuf->head - 1, len - pos);
81         }
82 
83         // if it's full, abort and return how much we've written
84         if (write_len == 0) {
85             break;
86         }
87 
88         if (NULL == buf) {
89             memset(cbuf->buf + cbuf->head, 0, write_len);
90         } else {
91             memcpy(cbuf->buf + cbuf->head, buf + pos, write_len);
92         }
93 
94         cbuf->head = INC_POINTER(cbuf, cbuf->head, write_len);
95         pos += write_len;
96     }
97 
98     if (cbuf->head != cbuf->tail)
99         event_signal(&cbuf->event, false);
100 
101     spin_unlock_irqrestore(&cbuf->lock, state);
102 
103     // XXX convert to only rescheduling if
104     if (canreschedule)
105         thread_preempt();
106 
107     return pos;
108 }
109 
cbuf_read(cbuf_t * cbuf,void * _buf,size_t buflen,bool block)110 size_t cbuf_read(cbuf_t *cbuf, void *_buf, size_t buflen, bool block) {
111     char *buf = (char *)_buf;
112 
113     DEBUG_ASSERT(cbuf);
114 
115 retry:
116     // block on the cbuf outside of the lock, which may
117     // unblock us early and we'll have to double check below
118     if (block)
119         event_wait(&cbuf->event);
120 
121     spin_lock_saved_state_t state;
122     spin_lock_irqsave(&cbuf->lock, state);
123 
124     // see if there's data available
125     size_t ret = 0;
126     if (cbuf->tail != cbuf->head) {
127         size_t pos = 0;
128 
129         // loop until we've read everything we need
130         // at most this will make two passes to deal with wraparound
131         while (pos < buflen && cbuf->tail != cbuf->head) {
132             size_t read_len;
133             if (cbuf->head > cbuf->tail) {
134                 // simple case where there is no wraparound
135                 read_len = MIN(cbuf->head - cbuf->tail, buflen - pos);
136             } else {
137                 // read to the end of buffer in this pass
138                 read_len = MIN(valpow2(cbuf->len_pow2) - cbuf->tail, buflen - pos);
139             }
140 
141             // Only perform the copy if a buf was supplied
142             if (NULL != buf) {
143                 memcpy(buf + pos, cbuf->buf + cbuf->tail, read_len);
144             }
145 
146             cbuf->tail = INC_POINTER(cbuf, cbuf->tail, read_len);
147             pos += read_len;
148         }
149 
150         if (cbuf->tail == cbuf->head) {
151             DEBUG_ASSERT(pos > 0);
152             // we've emptied the buffer, unsignal the event
153             event_unsignal(&cbuf->event);
154         }
155 
156         ret = pos;
157     }
158 
159     spin_unlock_irqrestore(&cbuf->lock, state);
160 
161     // we apparently blocked but raced with another thread and found no data, retry
162     if (block && ret == 0)
163         goto retry;
164 
165     return ret;
166 }
167 
cbuf_peek(cbuf_t * cbuf,iovec_t * regions)168 size_t cbuf_peek(cbuf_t *cbuf, iovec_t *regions) {
169     DEBUG_ASSERT(cbuf && regions);
170 
171     spin_lock_saved_state_t state;
172     spin_lock_irqsave(&cbuf->lock, state);
173 
174     size_t ret = cbuf_space_used(cbuf);
175     size_t sz  = cbuf_size(cbuf);
176 
177     DEBUG_ASSERT(cbuf->tail < sz);
178     DEBUG_ASSERT(ret <= sz);
179 
180     regions[0].iov_base = ret ? (cbuf->buf + cbuf->tail) : NULL;
181     if (ret + cbuf->tail > sz) {
182         regions[0].iov_len  = sz - cbuf->tail;
183         regions[1].iov_base = cbuf->buf;
184         regions[1].iov_len  = ret - regions[0].iov_len;
185     } else {
186         regions[0].iov_len  = ret;
187         regions[1].iov_base = NULL;
188         regions[1].iov_len  = 0;
189     }
190 
191     spin_unlock_irqrestore(&cbuf->lock, state);
192     return ret;
193 }
194 
cbuf_write_char(cbuf_t * cbuf,char c,bool canreschedule)195 size_t cbuf_write_char(cbuf_t *cbuf, char c, bool canreschedule) {
196     DEBUG_ASSERT(cbuf);
197 
198     spin_lock_saved_state_t state;
199     spin_lock_irqsave(&cbuf->lock, state);
200 
201     size_t ret = 0;
202     if (cbuf_space_avail(cbuf) > 0) {
203         cbuf->buf[cbuf->head] = c;
204 
205         cbuf->head = INC_POINTER(cbuf, cbuf->head, 1);
206         ret = 1;
207 
208         if (cbuf->head != cbuf->tail)
209             event_signal(&cbuf->event, canreschedule);
210     }
211 
212     spin_unlock_irqrestore(&cbuf->lock, state);
213 
214     return ret;
215 }
216 
cbuf_read_char(cbuf_t * cbuf,char * c,bool block)217 size_t cbuf_read_char(cbuf_t *cbuf, char *c, bool block) {
218     DEBUG_ASSERT(cbuf);
219     DEBUG_ASSERT(c);
220 
221 retry:
222     if (block)
223         event_wait(&cbuf->event);
224 
225     spin_lock_saved_state_t state;
226     spin_lock_irqsave(&cbuf->lock, state);
227 
228     // see if there's data available
229     size_t ret = 0;
230     if (cbuf->tail != cbuf->head) {
231 
232         *c = cbuf->buf[cbuf->tail];
233         cbuf->tail = INC_POINTER(cbuf, cbuf->tail, 1);
234 
235         if (cbuf->tail == cbuf->head) {
236             // we've emptied the buffer, unsignal the event
237             event_unsignal(&cbuf->event);
238         }
239 
240         ret = 1;
241     }
242 
243     spin_unlock_irqrestore(&cbuf->lock, state);
244 
245     if (block && ret == 0)
246         goto retry;
247 
248     return ret;
249 }
250