1 /*
2 * Copyright (c) 2014 Brian Swetland
3 * Copyright (c) 2014-2015 Christopher Anderson
4 *
5 * Use of this source code is governed by a MIT-style
6 * license that can be found in the LICENSE file or at
7 * https://opensource.org/licenses/MIT
8 */
9
10 #include <assert.h>
11 #include <lk/debug.h>
12 #include <lk/trace.h>
13 #include <printf.h>
14 #include <string.h>
15 #include <malloc.h>
16
17 #include <kernel/thread.h>
18 #include <kernel/semaphore.h>
19 #include <kernel/spinlock.h>
20 #include <lib/pktbuf.h>
21 #include <lib/pool.h>
22 #include <lk/init.h>
23
24 #if WITH_KERNEL_VM
25 #include <kernel/vm.h>
26 #endif
27
28 #define LOCAL_TRACE 0
29
30 static pool_t pktbuf_pool;
31 static semaphore_t pktbuf_sem;
32 static spin_lock_t lock;
33
34
35 /* Take an object from the pool of pktbuf objects to act as a header or buffer. */
get_pool_object(void)36 static void *get_pool_object(void) {
37 pool_t *entry;
38 spin_lock_saved_state_t state;
39
40 sem_wait(&pktbuf_sem);
41 spin_lock_irqsave(&lock, state);
42 entry = pool_alloc(&pktbuf_pool);
43 spin_unlock_irqrestore(&lock, state);
44
45 return (pktbuf_pool_object_t *) entry;
46
47 }
48
49 /* Return an object to thje pktbuf object pool. */
free_pool_object(pktbuf_pool_object_t * entry,bool reschedule)50 static void free_pool_object(pktbuf_pool_object_t *entry, bool reschedule) {
51 DEBUG_ASSERT(entry);
52 spin_lock_saved_state_t state;
53
54 spin_lock_irqsave(&lock, state);
55 pool_free(&pktbuf_pool, entry);
56 spin_unlock_irqrestore(&lock, state);
57 sem_post(&pktbuf_sem, reschedule);
58 }
59
60 /* Callback used internally to place a pktbuf_pool_object back in the pool after
61 * it was used as a buffer for another pktbuf
62 */
free_pktbuf_buf_cb(void * buf,void * arg)63 static void free_pktbuf_buf_cb(void *buf, void *arg) {
64 free_pool_object((pktbuf_pool_object_t *)buf, true);
65 }
66
67 /* Add a buffer to a pktbuf. Header space for prepending data is adjusted based on
68 * header_sz. cb is called when the pktbuf is freed / released by the driver level
69 * and should handle proper management / freeing of the buffer pointed to by the iovec.
70 *
71 * It's important to note that there is a flag to note that the buffer is cached and should
72 * be properly handled via the appropriate driver when it's time to deal with buffer
73 * descriptors.
74 */
pktbuf_add_buffer(pktbuf_t * p,u8 * buf,u32 len,uint32_t header_sz,uint32_t flags,pktbuf_free_callback cb,void * cb_args)75 void pktbuf_add_buffer(pktbuf_t *p, u8 *buf, u32 len, uint32_t header_sz, uint32_t flags,
76 pktbuf_free_callback cb, void *cb_args) {
77 DEBUG_ASSERT(p);
78 DEBUG_ASSERT(buf);
79 DEBUG_ASSERT(header_sz < len);
80
81 p->buffer = buf;
82 p->blen = len;
83 p->data = p->buffer + header_sz;
84 p->dlen = 0;
85 p->flags = PKTBUF_FLAG_EOF | flags;
86 p->cb = cb;
87 p->cb_args = cb_args;
88
89 /* If we're using a VM then this may be a virtual address, look up to see
90 * if there is an associated physical address we can store. If not, then
91 * stick with the address as presented to us.
92 */
93 #if WITH_KERNEL_VM
94 p->phys_base = vaddr_to_paddr(buf) | (uintptr_t) buf % PAGE_SIZE;
95 #else
96 p->phys_base = (uintptr_t) buf;
97 #endif
98 }
99
pktbuf_alloc(void)100 pktbuf_t *pktbuf_alloc(void) {
101 pktbuf_t *p = NULL;
102 void *buf = NULL;
103
104 p = get_pool_object();
105 if (!p) {
106 return NULL;
107 }
108
109 buf = get_pool_object();
110 if (!buf) {
111 free_pool_object((pktbuf_pool_object_t *)p, false);
112 return NULL;
113 }
114
115 memset(p, 0, sizeof(pktbuf_t));
116 pktbuf_add_buffer(p, buf, PKTBUF_SIZE, PKTBUF_MAX_HDR, 0, free_pktbuf_buf_cb, NULL);
117 return p;
118 }
119
pktbuf_reset(pktbuf_t * p,uint32_t header_sz)120 void pktbuf_reset(pktbuf_t *p, uint32_t header_sz) {
121 DEBUG_ASSERT(p);
122 DEBUG_ASSERT(p->buffer);
123 DEBUG_ASSERT(header_sz < p->blen);
124
125 p->data = p->buffer + header_sz;
126 p->dlen = 0;
127 }
128
pktbuf_alloc_empty(void)129 pktbuf_t *pktbuf_alloc_empty(void) {
130 pktbuf_t *p = (pktbuf_t *) get_pool_object();
131
132 p->flags = PKTBUF_FLAG_EOF;
133 return p;
134 }
135
pktbuf_free(pktbuf_t * p,bool reschedule)136 int pktbuf_free(pktbuf_t *p, bool reschedule) {
137 DEBUG_ASSERT(p);
138
139 if (p->cb) {
140 p->cb(p->buffer, p->cb_args);
141 }
142 free_pool_object((pktbuf_pool_object_t *)p, false);
143
144 return 1;
145 }
146
pktbuf_append_data(pktbuf_t * p,const void * data,size_t sz)147 void pktbuf_append_data(pktbuf_t *p, const void *data, size_t sz) {
148 if (pktbuf_avail_tail(p) < sz) {
149 panic("pktbuf_append_data: overflow");
150 }
151
152 memcpy(p->data + p->dlen, data, sz);
153 p->dlen += sz;
154 }
155
pktbuf_append(pktbuf_t * p,size_t sz)156 void *pktbuf_append(pktbuf_t *p, size_t sz) {
157 if (pktbuf_avail_tail(p) < sz) {
158 panic("pktbuf_append: overflow");
159 }
160
161 void *data = p->data + p->dlen;
162 p->dlen += sz;
163
164 return data;
165 }
166
pktbuf_prepend(pktbuf_t * p,size_t sz)167 void *pktbuf_prepend(pktbuf_t *p, size_t sz) {
168 if (pktbuf_avail_head(p) < sz) {
169 panic("pktbuf_prepend: not enough space");
170 }
171
172 p->dlen += sz;
173 p->data -= sz;
174
175 return p->data;
176 }
177
pktbuf_consume(pktbuf_t * p,size_t sz)178 void *pktbuf_consume(pktbuf_t *p, size_t sz) {
179 void *data = p->data;
180
181 if (sz > p->dlen) {
182 return NULL;
183 }
184
185 p->data += sz;
186 p->dlen -= sz;
187
188 return data;
189 }
190
pktbuf_consume_tail(pktbuf_t * p,size_t sz)191 void pktbuf_consume_tail(pktbuf_t *p, size_t sz) {
192 if (sz > p->dlen) {
193 p->dlen = 0;
194 return;
195 }
196
197 p->dlen -= sz;
198 }
199
pktbuf_dump(pktbuf_t * p)200 void pktbuf_dump(pktbuf_t *p) {
201 printf("pktbuf data %p, buffer %p, dlen %u, data offset %lu, phys_base %p\n",
202 p->data, p->buffer, p->dlen, (uintptr_t) p->data - (uintptr_t) p->buffer,
203 (void *)p->phys_base);
204 }
205
pktbuf_init(uint level)206 static void pktbuf_init(uint level) {
207 void *slab;
208
209 #if LK_DEBUGLEVEL > 0
210 printf("pktbuf: creating %u pktbuf entries of size %zu (total %zu)\n",
211 PKTBUF_POOL_SIZE, sizeof(struct pktbuf_pool_object),
212 PKTBUF_POOL_SIZE * sizeof(struct pktbuf_pool_object));
213 #endif
214
215 #if WITH_KERNEL_VM
216 if (vmm_alloc_contiguous(vmm_get_kernel_aspace(), "pktbuf",
217 PKTBUF_POOL_SIZE * sizeof(struct pktbuf_pool_object),
218 &slab, 0, 0, ARCH_MMU_FLAG_CACHED) < 0) {
219 printf("Failed to initialize pktbuf hdr slab\n");
220 return;
221 }
222 #else
223 slab = memalign(CACHE_LINE, PKTBUF_POOL_SIZE * sizeof(pktbuf_pool_object_t));
224 #endif
225
226 pool_init(&pktbuf_pool, sizeof(struct pktbuf_pool_object), CACHE_LINE, PKTBUF_POOL_SIZE, slab);
227 sem_init(&pktbuf_sem, PKTBUF_POOL_SIZE);
228 }
229
230 LK_INIT_HOOK(pktbuf, pktbuf_init, LK_INIT_LEVEL_THREADING);
231