1 /*
2 * Copyright (c) 2014 Brian Swetland
3 * Copyright (c) 2014-2015 Christopher Anderson
4 *
5 * Use of this source code is governed by a MIT-style
6 * license that can be found in the LICENSE file or at
7 * https://opensource.org/licenses/MIT
8 */
9
10 #include <assert.h>
11 #include <lk/debug.h>
12 #include <lk/trace.h>
13 #include <printf.h>
14 #include <string.h>
15 #include <malloc.h>
16
17 #include <kernel/thread.h>
18 #include <kernel/semaphore.h>
19 #include <kernel/spinlock.h>
20 #include <lib/pktbuf.h>
21 #include <lib/pool.h>
22 #include <lk/init.h>
23
24 #if WITH_KERNEL_VM
25 #include <kernel/vm.h>
26 #endif
27
28 #define LOCAL_TRACE 0
29
30 static pool_t pktbuf_pool;
31 static semaphore_t pktbuf_sem;
32 static spin_lock_t lock;
33
34
35 /* Take an object from the pool of pktbuf objects to act as a header or buffer. */
get_pool_object(void)36 static void *get_pool_object(void) {
37 pool_t *entry;
38 spin_lock_saved_state_t state;
39
40 sem_wait(&pktbuf_sem);
41 spin_lock_irqsave(&lock, state);
42 entry = pool_alloc(&pktbuf_pool);
43 spin_unlock_irqrestore(&lock, state);
44
45 return (pktbuf_pool_object_t *) entry;
46
47 }
48
49 /* Return an object to thje pktbuf object pool. */
free_pool_object(pktbuf_pool_object_t * entry,bool reschedule)50 static void free_pool_object(pktbuf_pool_object_t *entry, bool reschedule) {
51 DEBUG_ASSERT(entry);
52 spin_lock_saved_state_t state;
53
54 spin_lock_irqsave(&lock, state);
55 pool_free(&pktbuf_pool, entry);
56 spin_unlock_irqrestore(&lock, state);
57 sem_post(&pktbuf_sem, reschedule);
58 }
59
60 /* Callback used internally to place a pktbuf_pool_object back in the pool after
61 * it was used as a buffer for another pktbuf
62 */
free_pktbuf_buf_cb(void * buf,void * arg)63 static void free_pktbuf_buf_cb(void *buf, void *arg) {
64 free_pool_object((pktbuf_pool_object_t *)buf, true);
65 }
66
67 /* Add a buffer to a pktbuf. Header space for prepending data is adjusted based on
68 * header_sz. cb is called when the pktbuf is freed / released by the driver level
69 * and should handle proper management / freeing of the buffer pointed to by the iovec.
70 *
71 * It's important to note that there is a flag to note that the buffer is cached and should
72 * be properly handled via the appropriate driver when it's time to deal with buffer
73 * descriptors.
74 */
pktbuf_add_buffer(pktbuf_t * p,u8 * buf,u32 len,uint32_t header_sz,uint32_t flags,pktbuf_free_callback cb,void * cb_args)75 void pktbuf_add_buffer(pktbuf_t *p, u8 *buf, u32 len, uint32_t header_sz, uint32_t flags,
76 pktbuf_free_callback cb, void *cb_args) {
77 DEBUG_ASSERT(p);
78 DEBUG_ASSERT(buf);
79 DEBUG_ASSERT(header_sz < len);
80
81 p->buffer = buf;
82 p->blen = len;
83 p->data = p->buffer + header_sz;
84 p->dlen = 0;
85 p->flags = PKTBUF_FLAG_EOF | flags;
86 p->cb = cb;
87 p->cb_args = cb_args;
88
89 /* If we're using a VM then this may be a virtual address, look up to see
90 * if there is an associated physical address we can store. If not, then
91 * stick with the address as presented to us.
92 */
93 #if WITH_KERNEL_VM
94 p->phys_base = vaddr_to_paddr(buf) | (uintptr_t) buf % PAGE_SIZE;
95 #else
96 p->phys_base = (uintptr_t) buf;
97 #endif
98 }
99
pktbuf_alloc(void)100 pktbuf_t *pktbuf_alloc(void) {
101 pktbuf_t *p = NULL;
102 void *buf = NULL;
103
104 p = get_pool_object();
105 if (!p) {
106 return NULL;
107 }
108
109 buf = get_pool_object();
110 if (!buf) {
111 free_pool_object((pktbuf_pool_object_t *)p, false);
112 return NULL;
113 }
114
115 memset(p, 0, sizeof(pktbuf_t));
116 pktbuf_add_buffer(p, buf, PKTBUF_SIZE, PKTBUF_MAX_HDR, 0, free_pktbuf_buf_cb, NULL);
117 return p;
118 }
119
pktbuf_alloc_empty(void)120 pktbuf_t *pktbuf_alloc_empty(void) {
121 pktbuf_t *p = (pktbuf_t *) get_pool_object();
122
123 p->flags = PKTBUF_FLAG_EOF;
124 return p;
125 }
126
pktbuf_free(pktbuf_t * p,bool reschedule)127 int pktbuf_free(pktbuf_t *p, bool reschedule) {
128 DEBUG_ASSERT(p);
129
130 if (p->cb) {
131 p->cb(p->buffer, p->cb_args);
132 }
133 free_pool_object((pktbuf_pool_object_t *)p, false);
134
135 return 1;
136 }
137
pktbuf_append_data(pktbuf_t * p,const void * data,size_t sz)138 void pktbuf_append_data(pktbuf_t *p, const void *data, size_t sz) {
139 if (pktbuf_avail_tail(p) < sz) {
140 panic("pktbuf_append_data: overflow");
141 }
142
143 memcpy(p->data + p->dlen, data, sz);
144 p->dlen += sz;
145 }
146
pktbuf_append(pktbuf_t * p,size_t sz)147 void *pktbuf_append(pktbuf_t *p, size_t sz) {
148 if (pktbuf_avail_tail(p) < sz) {
149 panic("pktbuf_append: overflow");
150 }
151
152 void *data = p->data + p->dlen;
153 p->dlen += sz;
154
155 return data;
156 }
157
pktbuf_prepend(pktbuf_t * p,size_t sz)158 void *pktbuf_prepend(pktbuf_t *p, size_t sz) {
159 if (pktbuf_avail_head(p) < sz) {
160 panic("pktbuf_prepend: not enough space");
161 }
162
163 p->dlen += sz;
164 p->data -= sz;
165
166 return p->data;
167 }
168
pktbuf_consume(pktbuf_t * p,size_t sz)169 void *pktbuf_consume(pktbuf_t *p, size_t sz) {
170 void *data = p->data;
171
172 if (sz > p->dlen) {
173 return NULL;
174 }
175
176 p->data += sz;
177 p->dlen -= sz;
178
179 return data;
180 }
181
pktbuf_consume_tail(pktbuf_t * p,size_t sz)182 void pktbuf_consume_tail(pktbuf_t *p, size_t sz) {
183 if (sz > p->dlen) {
184 p->dlen = 0;
185 return;
186 }
187
188 p->dlen -= sz;
189 }
190
pktbuf_dump(pktbuf_t * p)191 void pktbuf_dump(pktbuf_t *p) {
192 printf("pktbuf data %p, buffer %p, dlen %u, data offset %lu, phys_base %p\n",
193 p->data, p->buffer, p->dlen, (uintptr_t) p->data - (uintptr_t) p->buffer,
194 (void *)p->phys_base);
195 }
196
pktbuf_init(uint level)197 static void pktbuf_init(uint level) {
198 void *slab;
199
200 #if LK_DEBUGLEVEL > 0
201 printf("pktbuf: creating %u pktbuf entries of size %zu (total %zu)\n",
202 PKTBUF_POOL_SIZE, sizeof(struct pktbuf_pool_object),
203 PKTBUF_POOL_SIZE * sizeof(struct pktbuf_pool_object));
204 #endif
205
206 #if WITH_KERNEL_VM
207 if (vmm_alloc_contiguous(vmm_get_kernel_aspace(), "pktbuf",
208 PKTBUF_POOL_SIZE * sizeof(struct pktbuf_pool_object),
209 &slab, 0, 0, ARCH_MMU_FLAG_CACHED) < 0) {
210 printf("Failed to initialize pktbuf hdr slab\n");
211 return;
212 }
213 #else
214 slab = memalign(CACHE_LINE, PKTBUF_POOL_SIZE * sizeof(pktbuf_pool_object_t));
215 #endif
216
217 pool_init(&pktbuf_pool, sizeof(struct pktbuf_pool_object), CACHE_LINE, PKTBUF_POOL_SIZE, slab);
218 sem_init(&pktbuf_sem, PKTBUF_POOL_SIZE);
219 }
220
221 LK_INIT_HOOK(pktbuf, pktbuf_init, LK_INIT_LEVEL_THREADING);
222