1 /*
2  * Copyright (c) 2006-2022, RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author       Notes
8  * 2012-12-8      Bernard      add file header
9  *                             export bsd socket symbol for RT-Thread Application Module
10  * 2013-05-25     Bernard      port to v1.4.1
11  * 2017-03-26     HuangXiHans  port to v2.0.2
12  * 2017-11-15     Bernard      add lock for init_done callback
13  * 2018-11-02     MurphyZhao   port to v2.1.0
14  * 2020-06-20     liuxianliang port to v2.1.2
15  * 2021-06-25     liuxianliang port to v2.0.3
16  * 2022-01-18     Meco Man     remove v2.0.2
17  * 2022-02-20     Meco Man     integrate v1.4.1 v2.0.3 and v2.1.2 porting layer
18  * 2023-10-31     xqyjlj       fix spinlock`s deadlock
19  */
20 
21 #include <rtthread.h>
22 #include <rthw.h>
23 
24 #include <arch/sys_arch.h>
25 #include <lwip/sys.h>
26 #include <lwip/opt.h>
27 #include <lwip/stats.h>
28 #include <lwip/err.h>
29 #include <lwip/debug.h>
30 #include <lwip/netif.h>
31 #include <lwip/netifapi.h>
32 #include <lwip/tcpip.h>
33 #include <lwip/sio.h>
34 #include <lwip/init.h>
35 #include <lwip/dhcp.h>
36 #include <lwip/inet.h>
37 #include <netif/ethernetif.h>
38 #include <netif/etharp.h>
39 
40 #ifdef RT_USING_SMP
41 static struct rt_mutex _mutex = {0};
42 #else
43 static RT_DEFINE_SPINLOCK(_spinlock);
44 #endif
45 
46 /*
47  * Initialize the ethernetif layer and set network interface device up
48  */
tcpip_init_done_callback(void * arg)49 static void tcpip_init_done_callback(void *arg)
50 {
51     rt_sem_release((rt_sem_t)arg);
52 }
53 
54 /**
55  * LwIP system initialization
56  */
lwip_system_init(void)57 int lwip_system_init(void)
58 {
59     rt_err_t rc;
60     struct rt_semaphore done_sem;
61     static rt_bool_t init_ok = RT_FALSE;
62 
63     if (init_ok)
64     {
65         rt_kprintf("lwip system already init.\n");
66         return 0;
67     }
68 #ifdef RT_USING_SMP
69     rt_mutex_init(&_mutex, "sys_arch", RT_IPC_FLAG_FIFO);
70 #endif
71 
72     extern int eth_system_device_init_private(void);
73     eth_system_device_init_private();
74 
75     /* set default netif to NULL */
76     netif_default = RT_NULL;
77 
78     rc = rt_sem_init(&done_sem, "done", 0, RT_IPC_FLAG_FIFO);
79     if (rc != RT_EOK)
80     {
81         LWIP_ASSERT("Failed to create semaphore", 0);
82 
83         return -1;
84     }
85 
86     tcpip_init(tcpip_init_done_callback, (void *)&done_sem);
87 
88     /* waiting for initialization done */
89     if (rt_sem_take(&done_sem, RT_WAITING_FOREVER) != RT_EOK)
90     {
91         rt_sem_detach(&done_sem);
92 
93         return -1;
94     }
95     rt_sem_detach(&done_sem);
96 
97     rt_kprintf("lwIP-%d.%d.%d initialized!\n", LWIP_VERSION_MAJOR, LWIP_VERSION_MINOR, LWIP_VERSION_REVISION);
98 
99     init_ok = RT_TRUE;
100 
101     return 0;
102 }
103 INIT_PREV_EXPORT(lwip_system_init);
104 
sys_init(void)105 void sys_init(void)
106 {
107     /* nothing on RT-Thread porting */
108 }
109 
lwip_sys_init(void)110 void lwip_sys_init(void)
111 {
112     lwip_system_init();
113 }
114 
115 /*
116  * Create a new semaphore
117  *
118  * @return the operation status, ERR_OK on OK; others on error
119  */
sys_sem_new(sys_sem_t * sem,u8_t count)120 err_t sys_sem_new(sys_sem_t *sem, u8_t count)
121 {
122     static unsigned short counter = 0;
123     char tname[RT_NAME_MAX];
124     sys_sem_t tmpsem;
125 
126     RT_DEBUG_NOT_IN_INTERRUPT;
127 
128     rt_snprintf(tname, RT_NAME_MAX, "%s%d", SYS_LWIP_SEM_NAME, counter);
129     counter ++;
130 
131     tmpsem = rt_sem_create(tname, count, RT_IPC_FLAG_FIFO);
132     if (tmpsem == RT_NULL)
133     {
134         return ERR_MEM;
135     }
136     else
137     {
138         *sem = tmpsem;
139 
140         return ERR_OK;
141     }
142 }
143 
144 /*
145  * Deallocate a semaphore
146  */
sys_sem_free(sys_sem_t * sem)147 void sys_sem_free(sys_sem_t *sem)
148 {
149     RT_DEBUG_NOT_IN_INTERRUPT;
150     rt_sem_delete(*sem);
151 }
152 
153 /*
154  * Signal a semaphore
155  */
sys_sem_signal(sys_sem_t * sem)156 void sys_sem_signal(sys_sem_t *sem)
157 {
158     rt_sem_release(*sem);
159 }
160 
161 /*
162  * Block the thread while waiting for the semaphore to be signaled
163  *
164  * @return If the timeout argument is non-zero, it will return the number of milliseconds
165  *         spent waiting for the semaphore to be signaled; If the semaphore isn't signaled
166  *         within the specified time, it will return SYS_ARCH_TIMEOUT; If the thread doesn't
167  *         wait for the semaphore, it will return zero
168  */
sys_arch_sem_wait(sys_sem_t * sem,u32_t timeout)169 u32_t sys_arch_sem_wait(sys_sem_t *sem, u32_t timeout)
170 {
171     rt_err_t ret;
172     s32_t t;
173     u32_t tick;
174 
175     RT_DEBUG_NOT_IN_INTERRUPT;
176 
177     /* get the begin tick */
178     tick = rt_tick_get();
179     if (timeout == 0)
180     {
181         t = RT_WAITING_FOREVER;
182     }
183     else
184     {
185         /* convert msecond to os tick */
186         if (timeout < (1000 / RT_TICK_PER_SECOND))
187             t = 1;
188         else
189             t = timeout / (1000 / RT_TICK_PER_SECOND);
190     }
191 
192     ret = rt_sem_take(*sem, t);
193 
194     if (ret == -RT_ETIMEOUT)
195     {
196         return SYS_ARCH_TIMEOUT;
197     }
198     else
199     {
200         if (ret == RT_EOK)
201             ret = 1;
202     }
203 
204     /* get elapse msecond */
205     tick = rt_tick_get() - tick;
206 
207     /* convert tick to msecond */
208     tick = tick * (1000 / RT_TICK_PER_SECOND);
209     if (tick == 0)
210         tick = 1;
211 
212     return tick;
213 }
214 
215 #ifndef sys_sem_valid
216 /** Check if a semaphore is valid/allocated:
217  *  return 1 for valid, 0 for invalid
218  */
sys_sem_valid(sys_sem_t * sem)219 int sys_sem_valid(sys_sem_t *sem)
220 {
221     int ret = 0;
222 
223     if (*sem) ret = 1;
224 
225     return ret;
226 }
227 #endif
228 
229 #ifndef sys_sem_set_invalid
230 /** Set a semaphore invalid so that sys_sem_valid returns 0
231  */
sys_sem_set_invalid(sys_sem_t * sem)232 void sys_sem_set_invalid(sys_sem_t *sem)
233 {
234     *sem = RT_NULL;
235 }
236 #endif
237 
238 /* ====================== Mutex ====================== */
239 
240 /** Create a new mutex
241  * @param mutex pointer to the mutex to create
242  * @return a new mutex
243  */
sys_mutex_new(sys_mutex_t * mutex)244 err_t sys_mutex_new(sys_mutex_t *mutex)
245 {
246     static unsigned short counter = 0;
247     char tname[RT_NAME_MAX];
248     sys_mutex_t tmpmutex;
249 
250     RT_DEBUG_NOT_IN_INTERRUPT;
251 
252     rt_snprintf(tname, RT_NAME_MAX, "%s%d", SYS_LWIP_MUTEX_NAME, counter);
253     counter ++;
254 
255     tmpmutex = rt_mutex_create(tname, RT_IPC_FLAG_PRIO);
256     if (tmpmutex == RT_NULL)
257     {
258         return ERR_MEM;
259     }
260     else
261     {
262         *mutex = tmpmutex;
263         return ERR_OK;
264     }
265 }
266 
267 /** Lock a mutex
268  * @param mutex the mutex to lock
269  */
sys_mutex_lock(sys_mutex_t * mutex)270 void sys_mutex_lock(sys_mutex_t *mutex)
271 {
272     RT_DEBUG_NOT_IN_INTERRUPT;
273     rt_mutex_take(*mutex, RT_WAITING_FOREVER);
274     return;
275 }
276 
277 /** Unlock a mutex
278  * @param mutex the mutex to unlock
279  */
sys_mutex_unlock(sys_mutex_t * mutex)280 void sys_mutex_unlock(sys_mutex_t *mutex)
281 {
282     rt_mutex_release(*mutex);
283 }
284 
285 /** Delete a semaphore
286  * @param mutex the mutex to delete
287  */
sys_mutex_free(sys_mutex_t * mutex)288 void sys_mutex_free(sys_mutex_t *mutex)
289 {
290     RT_DEBUG_NOT_IN_INTERRUPT;
291     rt_mutex_delete(*mutex);
292 }
293 
294 #ifndef sys_mutex_valid
295 /** Check if a mutex is valid/allocated:
296  *  return 1 for valid, 0 for invalid
297  */
sys_mutex_valid(sys_mutex_t * mutex)298 int sys_mutex_valid(sys_mutex_t *mutex)
299 {
300     int ret = 0;
301 
302     if (*mutex) ret = 1;
303 
304     return ret;
305 }
306 #endif
307 
308 #ifndef sys_mutex_set_invalid
309 /** Set a mutex invalid so that sys_mutex_valid returns 0
310  */
sys_mutex_set_invalid(sys_mutex_t * mutex)311 void sys_mutex_set_invalid(sys_mutex_t *mutex)
312 {
313     *mutex = RT_NULL;
314 }
315 #endif
316 
317 /* ====================== Mailbox ====================== */
318 
319 /*
320  * Create an empty mailbox for maximum "size" elements
321  *
322  * @return the operation status, ERR_OK on OK; others on error
323  */
sys_mbox_new(sys_mbox_t * mbox,int size)324 err_t sys_mbox_new(sys_mbox_t *mbox, int size)
325 {
326     static unsigned short counter = 0;
327     char tname[RT_NAME_MAX];
328     sys_mbox_t tmpmbox;
329 
330     RT_DEBUG_NOT_IN_INTERRUPT;
331 
332     rt_snprintf(tname, RT_NAME_MAX, "%s%d", SYS_LWIP_MBOX_NAME, counter);
333     counter ++;
334 
335     tmpmbox = rt_mb_create(tname, size, RT_IPC_FLAG_FIFO);
336     if (tmpmbox != RT_NULL)
337     {
338         *mbox = tmpmbox;
339 
340         return ERR_OK;
341     }
342 
343     return ERR_MEM;
344 }
345 
346 /*
347  * Deallocate a mailbox
348  */
sys_mbox_free(sys_mbox_t * mbox)349 void sys_mbox_free(sys_mbox_t *mbox)
350 {
351     RT_DEBUG_NOT_IN_INTERRUPT;
352     rt_mb_delete(*mbox);
353     return;
354 }
355 
356 /** Post a message to an mbox - may not fail
357  * -> blocks if full, only used from tasks not from ISR
358  * @param mbox mbox to posts the message
359  * @param msg message to post (ATTENTION: can be NULL)
360  */
sys_mbox_post(sys_mbox_t * mbox,void * msg)361 void sys_mbox_post(sys_mbox_t *mbox, void *msg)
362 {
363     RT_DEBUG_NOT_IN_INTERRUPT;
364     rt_mb_send_wait(*mbox, (rt_ubase_t)msg, RT_WAITING_FOREVER);
365     return;
366 }
367 
368 /*
369  * Try to post the "msg" to the mailbox
370  *
371  * @return return ERR_OK if the "msg" is posted, ERR_MEM if the mailbox is full
372  */
sys_mbox_trypost(sys_mbox_t * mbox,void * msg)373 err_t sys_mbox_trypost(sys_mbox_t *mbox, void *msg)
374 {
375     if (rt_mb_send(*mbox, (rt_ubase_t)msg) == RT_EOK)
376     {
377         return ERR_OK;
378     }
379 
380     return ERR_MEM;
381 }
382 
383 #if (LWIP_VERSION_MAJOR * 100 + LWIP_VERSION_MINOR) >= 201 /* >= v2.1.0 */
sys_mbox_trypost_fromisr(sys_mbox_t * q,void * msg)384 err_t sys_mbox_trypost_fromisr(sys_mbox_t *q, void *msg)
385 {
386   return sys_mbox_trypost(q, msg);
387 }
388 #endif /* (LWIP_VERSION_MAJOR * 100 + LWIP_VERSION_MINOR) >= 201 */
389 
390 /** Wait for a new message to arrive in the mbox
391  * @param mbox mbox to get a message from
392  * @param msg pointer where the message is stored
393  * @param timeout maximum time (in milliseconds) to wait for a message
394  * @return time (in milliseconds) waited for a message, may be 0 if not waited
395            or SYS_ARCH_TIMEOUT on timeout
396  *         The returned time has to be accurate to prevent timer jitter!
397  */
sys_arch_mbox_fetch(sys_mbox_t * mbox,void ** msg,u32_t timeout)398 u32_t sys_arch_mbox_fetch(sys_mbox_t *mbox, void **msg, u32_t timeout)
399 {
400     rt_err_t ret;
401     s32_t t;
402     u32_t tick;
403 
404     RT_DEBUG_NOT_IN_INTERRUPT;
405 
406     /* get the begin tick */
407     tick = rt_tick_get();
408 
409     if(timeout == 0)
410     {
411         t = RT_WAITING_FOREVER;
412     }
413     else
414     {
415         /* convirt msecond to os tick */
416         if (timeout < (1000 / RT_TICK_PER_SECOND))
417             t = 1;
418         else
419             t = timeout / (1000 / RT_TICK_PER_SECOND);
420     }
421     /*When the waiting msg is generated by the application through signaling mechanisms,
422     only by using interruptible mode can the program be made runnable again*/
423     ret = rt_mb_recv_interruptible(*mbox, (rt_ubase_t *)msg, t);
424     if(ret != RT_EOK)
425     {
426         return SYS_ARCH_TIMEOUT;
427     }
428 
429     /* get elapse msecond */
430     tick = rt_tick_get() - tick;
431 
432     /* convert tick to msecond */
433     tick = tick * (1000 / RT_TICK_PER_SECOND);
434     if (tick == 0)
435         tick = 1;
436 
437     return tick;
438 }
439 
440 /**
441  * @ingroup sys_mbox
442  * This is similar to sys_arch_mbox_fetch, however if a message is not
443  * present in the mailbox, it immediately returns with the code
444  * SYS_MBOX_EMPTY. On success 0 is returned.
445  * To allow for efficient implementations, this can be defined as a
446  * function-like macro in sys_arch.h instead of a normal function. For
447  * example, a naive implementation could be:
448  * \#define sys_arch_mbox_tryfetch(mbox,msg) sys_arch_mbox_fetch(mbox,msg,1)
449  * although this would introduce unnecessary delays.
450  *
451  * @param mbox mbox to get a message from
452  * @param msg pointer where the message is stored
453  * @return 0 (milliseconds) if a message has been received
454  *         or SYS_MBOX_EMPTY if the mailbox is empty
455  */
sys_arch_mbox_tryfetch(sys_mbox_t * mbox,void ** msg)456 u32_t sys_arch_mbox_tryfetch(sys_mbox_t *mbox, void **msg)
457 {
458     int ret;
459 
460     ret = rt_mb_recv(*mbox, (rt_ubase_t *)msg, 0);
461     if(ret == -RT_ETIMEOUT)
462     {
463         return SYS_ARCH_TIMEOUT;
464     }
465     else
466     {
467         if (ret == RT_EOK)
468             ret = 0;
469     }
470 
471     return ret;
472 }
473 
474 #ifndef sys_mbox_valid
475 /** Check if an mbox is valid/allocated:
476  *  return 1 for valid, 0 for invalid
477  */
sys_mbox_valid(sys_mbox_t * mbox)478 int sys_mbox_valid(sys_mbox_t *mbox)
479 {
480     int ret = 0;
481 
482     if (*mbox) ret = 1;
483 
484     return ret;
485 }
486 #endif
487 
488 #ifndef sys_mbox_set_invalid
489 /** Set an mbox invalid so that sys_mbox_valid returns 0
490  */
sys_mbox_set_invalid(sys_mbox_t * mbox)491 void sys_mbox_set_invalid(sys_mbox_t *mbox)
492 {
493     *mbox = RT_NULL;
494 }
495 #endif
496 
497 /* ====================== System ====================== */
498 
499 /*
500  * Start a new thread named "name" with priority "prio" that will begin
501  * its execution in the function "thread()". The "arg" argument will be
502  * passed as an argument to the thread() function
503  */
sys_thread_new(const char * name,lwip_thread_fn thread,void * arg,int stacksize,int prio)504 sys_thread_t sys_thread_new(const char    *name,
505                             lwip_thread_fn thread,
506                             void          *arg,
507                             int            stacksize,
508                             int            prio)
509 {
510     rt_thread_t t;
511 
512     RT_DEBUG_NOT_IN_INTERRUPT;
513 
514     /* create thread */
515     t = rt_thread_create(name, thread, arg, stacksize, prio, 20);
516     RT_ASSERT(t != RT_NULL);
517 
518     /* startup thread */
519     rt_thread_startup(t);
520 
521     return t;
522 }
523 
sys_arch_protect(void)524 sys_prot_t sys_arch_protect(void)
525 {
526 #ifdef RT_USING_SMP
527     rt_mutex_take(&_mutex, RT_WAITING_FOREVER);
528     return 0;
529 #else
530     rt_base_t level;
531     level = rt_spin_lock_irqsave(&_spinlock);
532     return level;
533 #endif
534 }
535 
sys_arch_unprotect(sys_prot_t pval)536 void sys_arch_unprotect(sys_prot_t pval)
537 {
538 #ifdef RT_USING_SMP
539     RT_UNUSED(pval);
540     rt_mutex_release(&_mutex);
541 #else
542     rt_spin_unlock_irqrestore(&_spinlock, pval);
543 #endif
544 }
545 
sys_arch_assert(const char * file,int line)546 void sys_arch_assert(const char *file, int line)
547 {
548     rt_kprintf("\nAssertion: %d in %s, thread %s\n",
549                line, file, rt_thread_self()->parent.name);
550     RT_ASSERT(0);
551 }
552 
sys_jiffies(void)553 u32_t sys_jiffies(void)
554 {
555     return rt_tick_get();
556 }
557 
sys_now(void)558 u32_t sys_now(void)
559 {
560     return rt_tick_get_millisecond();
561 }
562 
mem_init(void)563 rt_weak void mem_init(void)
564 {
565 }
566 
mem_calloc(mem_size_t count,mem_size_t size)567 void *mem_calloc(mem_size_t count, mem_size_t size)
568 {
569     return rt_calloc(count, size);
570 }
571 
mem_trim(void * mem,mem_size_t size)572 void *mem_trim(void *mem, mem_size_t size)
573 {
574     // return rt_realloc(mem, size);
575     /* not support trim yet */
576     return mem;
577 }
578 
mem_malloc(mem_size_t size)579 void *mem_malloc(mem_size_t size)
580 {
581     return rt_malloc(size);
582 }
583 
mem_free(void * mem)584 void  mem_free(void *mem)
585 {
586     rt_free(mem);
587 }
588 
589 #ifdef RT_LWIP_PPP
sio_read(sio_fd_t fd,u8_t * buf,u32_t size)590 u32_t sio_read(sio_fd_t fd, u8_t *buf, u32_t size)
591 {
592     u32_t len;
593 
594     RT_ASSERT(fd != RT_NULL);
595 
596     len = rt_device_read((rt_device_t)fd, 0, buf, size);
597     if (len <= 0)
598         return 0;
599 
600     return len;
601 }
602 
sio_write(sio_fd_t fd,u8_t * buf,u32_t size)603 u32_t sio_write(sio_fd_t fd, u8_t *buf, u32_t size)
604 {
605     RT_ASSERT(fd != RT_NULL);
606     return rt_device_write((rt_device_t)fd, 0, buf, size);
607 }
608 
sio_read_abort(sio_fd_t fd)609 void sio_read_abort(sio_fd_t fd)
610 {
611     rt_kprintf("read_abort\n");
612 }
613 
ppp_trace(int level,const char * format,...)614 void ppp_trace(int level, const char *format, ...)
615 {
616     va_list args;
617     rt_size_t length;
618     static char rt_log_buf[RT_CONSOLEBUF_SIZE];
619 
620     va_start(args, format);
621     length = rt_vsprintf(rt_log_buf, format, args);
622     rt_device_write((rt_device_t)rt_console_get_device(), 0, rt_log_buf, length);
623     va_end(args);
624 }
625 #endif /* RT_LWIP_PPP */
626 
627 #if LWIP_VERSION_MAJOR >= 2 /* >= v2.x */
628 #if MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK
629 /**
630  * Check if a mep element was victim of an overflow or underflow
631  * (e.g. the restricted area after/before it has been altered)
632  *
633  * @param p the mem element to check
634  * @param size allocated size of the element
635  * @param descr1 description of the element source shown on error
636  * @param descr2 description of the element source shown on error
637  */
mem_overflow_check_raw(void * p,size_t size,const char * descr1,const char * descr2)638 void mem_overflow_check_raw(void *p, size_t size, const char *descr1, const char *descr2)
639 {
640 #if MEM_SANITY_REGION_AFTER_ALIGNED || MEM_SANITY_REGION_BEFORE_ALIGNED
641   u16_t k;
642   u8_t *m;
643 
644 #if MEM_SANITY_REGION_AFTER_ALIGNED > 0
645   m = (u8_t *)p + size;
646   for (k = 0; k < MEM_SANITY_REGION_AFTER_ALIGNED; k++) {
647     if (m[k] != 0xcd) {
648       char errstr[128];
649       rt_snprintf(errstr, sizeof(errstr), "detected mem overflow in %s%s", descr1, descr2);
650       LWIP_ASSERT(errstr, 0);
651     }
652   }
653 #endif /* MEM_SANITY_REGION_AFTER_ALIGNED > 0 */
654 
655 #if MEM_SANITY_REGION_BEFORE_ALIGNED > 0
656   m = (u8_t *)p - MEM_SANITY_REGION_BEFORE_ALIGNED;
657   for (k = 0; k < MEM_SANITY_REGION_BEFORE_ALIGNED; k++) {
658     if (m[k] != 0xcd) {
659       char errstr[128];
660       rt_snprintf(errstr, sizeof(errstr), "detected mem underflow in %s%s", descr1, descr2);
661       LWIP_ASSERT(errstr, 0);
662     }
663   }
664 #endif /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 */
665 #else
666   LWIP_UNUSED_ARG(p);
667   LWIP_UNUSED_ARG(descr1);
668   LWIP_UNUSED_ARG(descr2);
669 #endif /* MEM_SANITY_REGION_AFTER_ALIGNED || MEM_SANITY_REGION_BEFORE_ALIGNED */
670 }
671 
672 /**
673  * Initialize the restricted area of a mem element.
674  */
mem_overflow_init_raw(void * p,size_t size)675 void mem_overflow_init_raw(void *p, size_t size)
676 {
677 #if MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0
678   u8_t *m;
679 #if MEM_SANITY_REGION_BEFORE_ALIGNED > 0
680   m = (u8_t *)p - MEM_SANITY_REGION_BEFORE_ALIGNED;
681   rt_memset(m, 0xcd, MEM_SANITY_REGION_BEFORE_ALIGNED);
682 #endif
683 #if MEM_SANITY_REGION_AFTER_ALIGNED > 0
684   m = (u8_t *)p + size;
685   rt_memset(m, 0xcd, MEM_SANITY_REGION_AFTER_ALIGNED);
686 #endif
687 #else /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0 */
688   LWIP_UNUSED_ARG(p);
689   LWIP_UNUSED_ARG(size);
690 #endif /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0 */
691 }
692 #endif /* MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK */
693 
694 #ifdef LWIP_HOOK_IP4_ROUTE_SRC
lwip_ip4_route_src(const ip4_addr_t * dest,const ip4_addr_t * src)695 struct netif *lwip_ip4_route_src(const ip4_addr_t *dest, const ip4_addr_t *src)
696 {
697     struct netif *netif;
698 
699     if (src == NULL)
700         return NULL;
701 
702     /* iterate through netifs */
703     for (netif = netif_list; netif != NULL; netif = netif->next)
704     {
705         /* is the netif up, does it have a link and a valid address? */
706         if (netif_is_up(netif) && netif_is_link_up(netif) && !ip4_addr_isany_val(*netif_ip4_addr(netif)))
707         {
708             /* source ip address equals netif's ip address? */
709             if (ip4_addr_cmp(src, netif_ip4_addr(netif)))
710             {
711                 return netif;
712             }
713         }
714     }
715 
716     return NULL;
717 }
718 #endif /* LWIP_HOOK_IP4_ROUTE_SRC */
719 #endif /*LWIP_VERSION_MAJOR >= 2 */
720 
721 #if LWIP_SOCKET
722 #include <lwip/sockets.h>
723 RTM_EXPORT(lwip_accept);
724 RTM_EXPORT(lwip_bind);
725 RTM_EXPORT(lwip_shutdown);
726 RTM_EXPORT(lwip_getpeername);
727 RTM_EXPORT(lwip_getsockname);
728 RTM_EXPORT(lwip_getsockopt);
729 RTM_EXPORT(lwip_setsockopt);
730 RTM_EXPORT(lwip_close);
731 RTM_EXPORT(lwip_connect);
732 RTM_EXPORT(lwip_listen);
733 RTM_EXPORT(lwip_recv);
734 RTM_EXPORT(lwip_read);
735 RTM_EXPORT(lwip_recvfrom);
736 RTM_EXPORT(lwip_send);
737 RTM_EXPORT(lwip_sendto);
738 RTM_EXPORT(lwip_socket);
739 RTM_EXPORT(lwip_write);
740 RTM_EXPORT(lwip_select);
741 RTM_EXPORT(lwip_ioctl);
742 RTM_EXPORT(lwip_fcntl);
743 RTM_EXPORT(lwip_htons);
744 RTM_EXPORT(lwip_htonl);
745 
746 #if LWIP_DNS
747 #include <lwip/netdb.h>
748 RTM_EXPORT(lwip_gethostbyname);
749 RTM_EXPORT(lwip_gethostbyname_r);
750 RTM_EXPORT(lwip_freeaddrinfo);
751 RTM_EXPORT(lwip_getaddrinfo);
752 #endif /* LWIP_DNS */
753 #endif /* LWIP_SOCKET */
754 
755 #if LWIP_DHCP
756 #include <lwip/dhcp.h>
757 RTM_EXPORT(dhcp_start);
758 RTM_EXPORT(dhcp_renew);
759 RTM_EXPORT(dhcp_stop);
760 #endif /* LWIP_DHCP */
761 
762 #if LWIP_NETIF_API
763 #include <lwip/netifapi.h>
764 RTM_EXPORT(netifapi_netif_set_addr);
765 #endif /* LWIP_NETIF_API */
766 
767 #if LWIP_NETIF_LINK_CALLBACK
768 RTM_EXPORT(netif_set_link_callback);
769 #endif /* LWIP_NETIF_LINK_CALLBACK */
770 
771 #if LWIP_NETIF_STATUS_CALLBACK
772 RTM_EXPORT(netif_set_status_callback);
773 #endif /* LWIP_NETIF_STATUS_CALLBACK */
774 
775 RTM_EXPORT(netif_find);
776 RTM_EXPORT(netif_set_addr);
777 RTM_EXPORT(netif_set_ipaddr);
778 RTM_EXPORT(netif_set_gw);
779 RTM_EXPORT(netif_set_netmask);
780