1 // Copyright 2016 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <assert.h>
6 #include <dirent.h>
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <inttypes.h>
10 #include <limits.h>
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <unistd.h>
15 #include <threads.h>
16 
17 #include "eth-client.h"
18 
19 #include <fuchsia/hardware/ethernet/c/fidl.h>
20 #include <zircon/device/device.h>
21 #include <zircon/process.h>
22 #include <zircon/syscalls.h>
23 #include <zircon/time.h>
24 #include <zircon/types.h>
25 
26 #include <inet6/inet6.h>
27 #include <inet6/netifc.h>
28 
29 #include <lib/fdio/io.h>
30 #include <lib/fdio/util.h>
31 #include <lib/fdio/watcher.h>
32 
33 #define ALIGN(n, a) (((n) + ((a) - 1)) & ~((a) - 1))
34 // if nonzero, drop 1 in DROP_PACKETS packets at random
35 #define DROP_PACKETS 0
36 
37 #if DROP_PACKETS > 0
38 
39 //TODO: use libc random() once it's actually random
40 
41 // Xorshift32 prng
42 typedef struct {
43     uint32_t n;
44 } rand32_t;
45 
rand32(rand32_t * state)46 static inline uint32_t rand32(rand32_t* state) {
47     uint32_t n = state->n;
48     n ^= (n << 13);
49     n ^= (n >> 17);
50     n ^= (n << 5);
51     return (state->n = n);
52 }
53 
54 rand32_t rstate = { .n = 0x8716253 };
55 #define random() rand32(&rstate)
56 
57 static int txc;
58 static int rxc;
59 #endif
60 
61 static mtx_t eth_lock = MTX_INIT;
62 static zx_handle_t netsvc = ZX_HANDLE_INVALID;
63 static eth_client_t* eth;
64 static uint8_t netmac[6];
65 static size_t netmtu;
66 
67 static zx_handle_t iovmo;
68 static void* iobuf;
69 
70 #define NET_BUFFERS 256
71 #define NET_BUFFERSZ 2048
72 
73 #define ETH_BUFFER_MAGIC 0x424201020304A7A7UL
74 
75 #define ETH_BUFFER_FREE   0u // on free list
76 #define ETH_BUFFER_TX     1u // in tx ring
77 #define ETH_BUFFER_RX     2u // in rx ring
78 #define ETH_BUFFER_CLIENT 3u // in use by stack
79 
80 typedef struct eth_buffer eth_buffer_t;
81 
82 struct eth_buffer {
83     uint64_t magic;
84     eth_buffer_t* next;
85     void* data;
86     uint32_t state;
87     uint32_t reserved;
88 };
89 
90 static_assert(sizeof(eth_buffer_t) == 32, "");
91 
92 static eth_buffer_t* eth_buffer_base;
93 static size_t eth_buffer_count;
94 
_check_ethbuf(eth_buffer_t * ethbuf,uint32_t state)95 static int _check_ethbuf(eth_buffer_t* ethbuf, uint32_t state) {
96     if (((uintptr_t) ethbuf) & 31) {
97         printf("ethbuf %p misaligned\n", ethbuf);
98         return -1;
99     }
100     if ((ethbuf < eth_buffer_base) ||
101         (ethbuf >= (eth_buffer_base + eth_buffer_count))) {
102         printf("ethbuf %p outside of arena\n", ethbuf);
103         return -1;
104     }
105     if (ethbuf->magic != ETH_BUFFER_MAGIC) {
106         printf("ethbuf %p bad magic\n", ethbuf);
107         return -1;
108     }
109     if (ethbuf->state != state) {
110         printf("ethbuf %p incorrect state (%u != %u)\n", ethbuf, ethbuf->state, state);
111         return -1;
112     }
113     return 0;
114 }
115 
check_ethbuf(eth_buffer_t * ethbuf,uint32_t state)116 static void check_ethbuf(eth_buffer_t* ethbuf, uint32_t state) {
117     if (_check_ethbuf(ethbuf, state)) {
118         __builtin_trap();
119     }
120 }
121 
122 static eth_buffer_t* eth_buffers = NULL;
123 
eth_put_buffer_locked(eth_buffer_t * buf,uint32_t state)124 static void eth_put_buffer_locked(eth_buffer_t* buf, uint32_t state) __TA_REQUIRES(eth_lock) {
125     check_ethbuf(buf, state);
126     buf->state = ETH_BUFFER_FREE;
127     buf->next = eth_buffers;
128     eth_buffers = buf;
129 }
130 
eth_put_buffer(eth_buffer_t * ethbuf)131 void eth_put_buffer(eth_buffer_t* ethbuf) {
132     mtx_lock(&eth_lock);
133     eth_put_buffer_locked(ethbuf, ETH_BUFFER_CLIENT);
134     mtx_unlock(&eth_lock);
135 }
136 
tx_complete(void * ctx,void * cookie)137 static void tx_complete(void* ctx, void* cookie) __TA_REQUIRES(eth_lock) {
138     eth_put_buffer_locked(cookie, ETH_BUFFER_TX);
139 }
140 
eth_get_buffer_locked(size_t sz,void ** data,eth_buffer_t ** out,uint32_t newstate,bool block)141 static zx_status_t eth_get_buffer_locked(size_t sz, void** data, eth_buffer_t** out,
142                                          uint32_t newstate, bool block) __TA_REQUIRES(eth_lock) {
143     eth_buffer_t* buf;
144     if (sz > NET_BUFFERSZ) {
145         return ZX_ERR_INVALID_ARGS;
146     }
147     if (eth_buffers == NULL) {
148         while (1) {
149             eth_complete_tx(eth, NULL, tx_complete);
150             if (eth_buffers != NULL) {
151                 break;
152             }
153             if (!block) {
154                 return ZX_ERR_SHOULD_WAIT;
155             }
156             zx_status_t status;
157             zx_signals_t signals;
158             mtx_unlock(&eth_lock);
159             status = zx_object_wait_one(eth->tx_fifo, ZX_FIFO_READABLE | ZX_FIFO_PEER_CLOSED,
160                                         ZX_TIME_INFINITE, &signals);
161             mtx_lock(&eth_lock);
162             if (status < 0) {
163                 return status;
164             }
165             if (signals & ZX_FIFO_PEER_CLOSED) {
166                 return ZX_ERR_PEER_CLOSED;
167             }
168         }
169     }
170     buf = eth_buffers;
171     eth_buffers = buf->next;
172     buf->next = NULL;
173 
174     check_ethbuf(buf, ETH_BUFFER_FREE);
175 
176     buf->state = newstate;
177     *data = buf->data;
178     *out = buf;
179     return ZX_OK;
180 }
181 
eth_get_buffer(size_t sz,void ** data,eth_buffer_t ** out,bool block)182 zx_status_t eth_get_buffer(size_t sz, void** data, eth_buffer_t** out, bool block) {
183     mtx_lock(&eth_lock);
184     zx_status_t r = eth_get_buffer_locked(sz, data, out, ETH_BUFFER_CLIENT, block);
185     mtx_unlock(&eth_lock);
186     return r;
187 }
188 
eth_send(eth_buffer_t * ethbuf,size_t skip,size_t len)189 zx_status_t eth_send(eth_buffer_t* ethbuf, size_t skip, size_t len) {
190     zx_status_t status;
191     mtx_lock(&eth_lock);
192 
193     check_ethbuf(ethbuf, ETH_BUFFER_CLIENT);
194 
195 #if DROP_PACKETS
196     txc++;
197     if ((random() % DROP_PACKETS) == 0) {
198         printf("tx drop %d\n", txc);
199         eth_put_buffer_locked(ethbuf, ETH_BUFFER_CLIENT);
200         status = ZX_ERR_INTERNAL;
201         goto fail;
202     }
203 #endif
204 
205     if (eth == NULL) {
206         printf("eth_fifo_send: not connected\n");
207         eth_put_buffer_locked(ethbuf, ETH_BUFFER_CLIENT);
208         status = ZX_ERR_ADDRESS_UNREACHABLE;
209         goto fail;
210     }
211 
212     ethbuf->state = ETH_BUFFER_TX;
213     status = eth_queue_tx(eth, ethbuf, ethbuf->data + skip, len, 0);
214     if (status < 0) {
215         printf("eth_fifo_send: queue tx failed: %d\n", status);
216         eth_put_buffer_locked(ethbuf, ETH_BUFFER_TX);
217         goto fail;
218     }
219 
220     mtx_unlock(&eth_lock);
221     return ZX_OK;
222 
223 fail:
224     mtx_unlock(&eth_lock);
225     return status;
226 }
227 
eth_add_mcast_filter(const mac_addr_t * addr)228 int eth_add_mcast_filter(const mac_addr_t* addr) {
229     return 0;
230 }
231 
232 static volatile zx_time_t net_timer = 0;
233 
netifc_set_timer(uint32_t ms)234 void netifc_set_timer(uint32_t ms) {
235     net_timer = zx_deadline_after(ZX_MSEC(ms));
236 }
237 
netifc_timer_expired(void)238 int netifc_timer_expired(void) {
239     if (net_timer == 0) {
240         return 0;
241     }
242     if (zx_clock_get_monotonic() > net_timer) {
243         return 1;
244     }
245     return 0;
246 }
247 
netifc_get_info(uint8_t * addr,uint16_t * mtu)248 void netifc_get_info(uint8_t* addr, uint16_t* mtu) {
249     memcpy(addr, netmac, 6);
250     *mtu = netmtu;
251 }
252 
netifc_open_cb(int dirfd,int event,const char * fn,void * cookie)253 static zx_status_t netifc_open_cb(int dirfd, int event, const char* fn, void* cookie) {
254     if (event != WATCH_EVENT_ADD_FILE) {
255         return ZX_OK;
256     }
257 
258     printf("netifc: ? /dev/class/ethernet/%s\n", fn);
259 
260     mtx_lock(&eth_lock);
261     int fd;
262     if ((fd = openat(dirfd, fn, O_RDWR)) < 0) {
263         goto finish;
264     }
265 
266     // If an interface was specified, check the topological path of this device and reject it if it
267     // doesn't match.
268     if (cookie != NULL) {
269         const char* interface = cookie;
270         char buf[1024];
271         if (ioctl_device_get_topo_path(fd, buf, sizeof(buf)) < 0) {
272             close(fd);
273             goto finish;
274         }
275         const char* topo_path = buf;
276         // Skip the instance sigil if it's present in either the topological path or the given
277         // interface path.
278         if (topo_path[0] == '@') topo_path++;
279         if (interface[0] == '@') interface++;
280 
281         if (strncmp(topo_path, interface, sizeof(buf))) {
282             close(fd);
283             goto finish;
284         }
285     }
286 
287     zx_status_t status = fdio_get_service_handle(fd, &netsvc);
288     if (status != ZX_OK) {
289         goto finish;
290     }
291 
292     fuchsia_hardware_ethernet_Info info;
293     if (fuchsia_hardware_ethernet_DeviceGetInfo(netsvc, &info) != ZX_OK) {
294         goto fail_close_svc;
295     }
296     if (info.features & (fuchsia_hardware_ethernet_INFO_FEATURE_WLAN |
297                          fuchsia_hardware_ethernet_INFO_FEATURE_SYNTH)) {
298         // Don't run netsvc for wireless or synthetic network devices
299         goto fail_close_svc;
300     }
301     memcpy(netmac, info.mac.octets, sizeof(netmac));
302     netmtu = info.mtu;
303 
304     // we only do this the very first time
305     if (eth_buffer_base == NULL) {
306         eth_buffer_base = memalign(sizeof(eth_buffer_t), 2 * NET_BUFFERS * sizeof(eth_buffer_t));
307         if (eth_buffer_base == NULL) {
308             goto fail_close_svc;
309         }
310         eth_buffer_count = 2 * NET_BUFFERS;
311     }
312 
313     // we only do this the very first time
314     if (iobuf == NULL) {
315         // allocate shareable ethernet buffer data heap
316         size_t iosize = 2 * NET_BUFFERS * NET_BUFFERSZ;
317         if ((status = zx_vmo_create(iosize, ZX_VMO_NON_RESIZABLE, &iovmo)) < 0) {
318             goto fail_close_svc;
319         }
320         zx_object_set_property(iovmo, ZX_PROP_NAME, "eth-buffers", 11);
321         if ((status = zx_vmar_map(zx_vmar_root_self(),
322                                   ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
323                                   0, iovmo, 0, iosize, (uintptr_t*)&iobuf)) < 0) {
324             zx_handle_close(iovmo);
325             iovmo = ZX_HANDLE_INVALID;
326             goto fail_close_svc;
327         }
328         printf("netifc: create %zu eth buffers\n", eth_buffer_count);
329         // assign data chunks to ethbufs
330         for (unsigned n = 0; n < eth_buffer_count; n++) {
331             eth_buffer_base[n].magic = ETH_BUFFER_MAGIC;
332             eth_buffer_base[n].data = iobuf + n * NET_BUFFERSZ;
333             eth_buffer_base[n].state = ETH_BUFFER_FREE;
334             eth_buffer_base[n].reserved = 0;
335             eth_put_buffer_locked(eth_buffer_base + n, ETH_BUFFER_FREE);
336         }
337     }
338 
339     status = eth_create(netsvc, iovmo, iobuf, &eth);
340     if (status < 0) {
341         printf("eth_create() failed: %d\n", status);
342         goto fail_close_svc;
343     }
344 
345     zx_status_t call_status = ZX_OK;
346     status = fuchsia_hardware_ethernet_DeviceStart(netsvc, &call_status);
347     if (status != ZX_OK || call_status != ZX_OK) {
348         printf("netifc: ethernet_start(): %d, %d\n", status, call_status);
349         goto fail_destroy_client;
350     }
351 
352     ip6_init(netmac);
353 
354     // enqueue rx buffers
355     for (unsigned n = 0; n < NET_BUFFERS; n++) {
356         void* data;
357         eth_buffer_t* ethbuf;
358         if (eth_get_buffer_locked(NET_BUFFERSZ, &data, &ethbuf, ETH_BUFFER_RX, false)) {
359             printf("netifc: only queued %u buffers (desired: %u)\n", n, NET_BUFFERS);
360             break;
361         }
362         eth_queue_rx(eth, ethbuf, ethbuf->data, NET_BUFFERSZ, 0);
363     }
364 
365     mtx_unlock(&eth_lock);
366     printf("netsvc: using /dev/class/ethernet/%s\n", fn);
367 
368     // stop polling
369     return ZX_ERR_STOP;
370 
371 fail_destroy_client:
372     eth_destroy(eth);
373     eth = NULL;
374 fail_close_svc:
375     zx_handle_close(netsvc);
376     netsvc = ZX_HANDLE_INVALID;
377 finish:
378     mtx_unlock(&eth_lock);
379     return ZX_OK;
380 }
381 
netifc_open(const char * interface)382 int netifc_open(const char* interface) {
383     int dirfd;
384     if ((dirfd = open("/dev/class/ethernet", O_DIRECTORY|O_RDONLY)) < 0) {
385         return -1;
386     }
387 
388     zx_status_t status =
389         fdio_watch_directory(dirfd, netifc_open_cb, ZX_TIME_INFINITE, (void*)interface);
390     close(dirfd);
391 
392     // callback returns STOP if it finds and successfully
393     // opens a network interface
394     return (status == ZX_ERR_STOP) ? 0 : -1;
395 }
396 
netifc_close(void)397 void netifc_close(void) {
398     mtx_lock(&eth_lock);
399     if (netsvc != ZX_HANDLE_INVALID) {
400         zx_handle_close(netsvc);
401         netsvc = ZX_HANDLE_INVALID;
402     }
403     if (eth != NULL) {
404         eth_destroy(eth);
405         eth = NULL;
406     }
407     unsigned count = 0;
408     for (unsigned n = 0; n < eth_buffer_count; n++) {
409         switch (eth_buffer_base[n].state) {
410         case ETH_BUFFER_FREE:
411         case ETH_BUFFER_CLIENT:
412             // on free list or owned by client
413             // leave it alone
414             break;
415         case ETH_BUFFER_TX:
416         case ETH_BUFFER_RX:
417             // was sitting in ioring. reclaim.
418             eth_put_buffer_locked(eth_buffer_base + n, eth_buffer_base[n].state);
419             count++;
420             break;
421         default:
422             printf("ethbuf %p: illegal state %u\n",
423                    eth_buffer_base + n, eth_buffer_base[n].state);
424             __builtin_trap();
425             break;
426         }
427     }
428     printf("netifc: recovered %u buffers\n", count);
429     mtx_unlock(&eth_lock);
430 }
431 
rx_complete(void * ctx,void * cookie,size_t len,uint32_t flags)432 static void rx_complete(void* ctx, void* cookie, size_t len, uint32_t flags) {
433     eth_buffer_t* ethbuf = cookie;
434     check_ethbuf(ethbuf, ETH_BUFFER_RX);
435     netifc_recv(ethbuf->data, len);
436     eth_queue_rx(eth, ethbuf, ethbuf->data, NET_BUFFERSZ, 0);
437 }
438 
netifc_poll(void)439 int netifc_poll(void) {
440     for (;;) {
441         // Handle any completed rx packets
442         zx_status_t status;
443         if ((status = eth_complete_rx(eth, NULL, rx_complete)) < 0) {
444             printf("netifc: eth rx failed: %d\n", status);
445             return -1;
446         }
447 
448         // Timeout passed
449         if (net_timer && zx_clock_get_monotonic() > net_timer) {
450             return 0;
451         }
452 
453         if (netifc_send_pending()) {
454             continue;
455         }
456 
457         zx_time_t deadline;
458         if (net_timer) {
459             deadline = zx_time_add_duration(net_timer, ZX_MSEC(1));
460         } else {
461             deadline = ZX_TIME_INFINITE;
462         }
463         status = eth_wait_rx(eth, deadline);
464         if ((status < 0) && (status != ZX_ERR_TIMED_OUT)) {
465             printf("netifc: eth rx wait failed: %d\n", status);
466             return -1;
467         }
468     }
469 }
470 
471