1 /**************************************************************************//**
2 * @file     cmsis_gcc.h
3 * @brief    CMSIS compiler GCC header file
4 * @version  V5.3.0
5 * @date     26. March 2020
6 ******************************************************************************/
7 
8 /*
9  * Copyright (c) 2009-2020 Arm Limited. All rights reserved.
10  *
11  * SPDX-License-Identifier: Apache-2.0
12  *
13  * Licensed under the Apache License, Version 2.0 (the License); you may
14  * not use this file except in compliance with the License.
15  * You may obtain a copy of the License at
16  *
17  * www.apache.org/licenses/LICENSE-2.0
18  *
19  * Unless required by applicable law or agreed to in writing, software
20  * distributed under the License is distributed on an AS IS BASIS, WITHOUT
21  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
22  * See the License for the specific language governing permissions and
23  * limitations under the License.
24  */
25 
26 #ifndef __CMSIS_GCC_H
27 #define __CMSIS_GCC_H
28 
29 /* ignore some GCC warnings */
30 #pragma GCC diagnostic push
31 #pragma GCC diagnostic ignored "-Wsign-conversion"
32 #pragma GCC diagnostic ignored "-Wconversion"
33 #pragma GCC diagnostic ignored "-Wunused-parameter"
34 
35 /* Fallback for __has_builtin */
36 #ifndef __has_builtin
37     #define __has_builtin( x )    ( 0 )
38 #endif
39 
40 /* CMSIS compiler specific defines */
41 #ifndef   __ASM
42     #define __ASM                   __asm
43 #endif
44 #ifndef   __INLINE
45     #define __INLINE                inline
46 #endif
47 #ifndef   __STATIC_INLINE
48     #define __STATIC_INLINE         static inline
49 #endif
50 #ifndef   __STATIC_FORCEINLINE
51     #define __STATIC_FORCEINLINE    __attribute__( ( always_inline ) ) static inline
52 #endif
53 #ifndef   __NO_RETURN
54     #define __NO_RETURN             __attribute__( ( __noreturn__ ) )
55 #endif
56 #ifndef   __USED
57     #define __USED                  __attribute__( ( used ) )
58 #endif
59 #ifndef   __WEAK
60     #define __WEAK                  __attribute__( ( weak ) )
61 #endif
62 #ifndef   __PACKED
63     #define __PACKED                __attribute__( ( packed, aligned( 1 ) ) )
64 #endif
65 #ifndef   __PACKED_STRUCT
66     #define __PACKED_STRUCT         struct __attribute__( ( packed, aligned( 1 ) ) )
67 #endif
68 #ifndef   __PACKED_UNION
69     #define __PACKED_UNION          union __attribute__( ( packed, aligned( 1 ) ) )
70 #endif
71 #ifndef   __UNALIGNED_UINT32 /* deprecated */
72     #pragma GCC diagnostic push
73     #pragma GCC diagnostic ignored "-Wpacked"
74     #pragma GCC diagnostic ignored "-Wattributes"
75     struct __attribute__( ( packed ) ) T_UINT32
76     {
77         uint32_t v;
78     };
79     #pragma GCC diagnostic pop
80     #define __UNALIGNED_UINT32( x )    ( ( ( struct T_UINT32 * ) ( x ) )->v )
81 #endif
82 #ifndef   __UNALIGNED_UINT16_WRITE
83     #pragma GCC diagnostic push
84     #pragma GCC diagnostic ignored "-Wpacked"
85     #pragma GCC diagnostic ignored "-Wattributes"
86     __PACKED_STRUCT T_UINT16_WRITE {
87         uint16_t v;
88     };
89     #pragma GCC diagnostic pop
90     #define __UNALIGNED_UINT16_WRITE( addr, val )    ( void ) ( ( ( ( struct T_UINT16_WRITE * ) ( void * ) ( addr ) )->v ) = ( val ) )
91 #endif
92 #ifndef   __UNALIGNED_UINT16_READ
93     #pragma GCC diagnostic push
94     #pragma GCC diagnostic ignored "-Wpacked"
95     #pragma GCC diagnostic ignored "-Wattributes"
96     __PACKED_STRUCT T_UINT16_READ {
97         uint16_t v;
98     };
99     #pragma GCC diagnostic pop
100     #define __UNALIGNED_UINT16_READ( addr )    ( ( ( const struct T_UINT16_READ * ) ( const void * ) ( addr ) )->v )
101 #endif
102 #ifndef   __UNALIGNED_UINT32_WRITE
103     #pragma GCC diagnostic push
104     #pragma GCC diagnostic ignored "-Wpacked"
105     #pragma GCC diagnostic ignored "-Wattributes"
106     __PACKED_STRUCT T_UINT32_WRITE {
107         uint32_t v;
108     };
109     #pragma GCC diagnostic pop
110     #define __UNALIGNED_UINT32_WRITE( addr, val )    ( void ) ( ( ( ( struct T_UINT32_WRITE * ) ( void * ) ( addr ) )->v ) = ( val ) )
111 #endif
112 #ifndef   __UNALIGNED_UINT32_READ
113     #pragma GCC diagnostic push
114     #pragma GCC diagnostic ignored "-Wpacked"
115     #pragma GCC diagnostic ignored "-Wattributes"
116     __PACKED_STRUCT T_UINT32_READ {
117         uint32_t v;
118     };
119     #pragma GCC diagnostic pop
120     #define __UNALIGNED_UINT32_READ( addr )    ( ( ( const struct T_UINT32_READ * ) ( const void * ) ( addr ) )->v )
121 #endif
122 #ifndef   __ALIGNED
123     #define __ALIGNED( x )                     __attribute__( ( aligned( x ) ) )
124 #endif
125 #ifndef   __RESTRICT
126     #define __RESTRICT    __restrict
127 #endif
128 #ifndef   __COMPILER_BARRIER
129     #define __COMPILER_BARRIER()    __ASM volatile ( "" ::: "memory" )
130 #endif
131 
132 /* #########################  Startup and Lowlevel Init  ######################## */
133 
134 #ifndef __PROGRAM_START
135 
136 /**
137  * \brief   Initializes data and bss sections
138  * \details This default implementations initialized all data and additional bss
139  *         sections relying on .copy.table and .zero.table specified properly
140  *         in the used linker script.
141  *
142  */
__cmsis_start(void)143     __STATIC_FORCEINLINE __NO_RETURN void __cmsis_start( void )
144     {
145         extern void _start( void ) __NO_RETURN;
146 
147         typedef struct
148         {
149             uint32_t const * src;
150             uint32_t * dest;
151             uint32_t wlen;
152         } __copy_table_t;
153 
154         typedef struct
155         {
156             uint32_t * dest;
157             uint32_t wlen;
158         } __zero_table_t;
159 
160         extern const __copy_table_t __copy_table_start__;
161         extern const __copy_table_t __copy_table_end__;
162         extern const __zero_table_t __zero_table_start__;
163         extern const __zero_table_t __zero_table_end__;
164 
165         for( __copy_table_t const * pTable = &__copy_table_start__; pTable < &__copy_table_end__; ++pTable )
166         {
167             for( uint32_t i = 0u; i < pTable->wlen; ++i )
168             {
169                 pTable->dest[ i ] = pTable->src[ i ];
170             }
171         }
172 
173         for( __zero_table_t const * pTable = &__zero_table_start__; pTable < &__zero_table_end__; ++pTable )
174         {
175             for( uint32_t i = 0u; i < pTable->wlen; ++i )
176             {
177                 pTable->dest[ i ] = 0u;
178             }
179         }
180 
181         _start();
182     }
183 
184     #define __PROGRAM_START    __cmsis_start
185 #endif /* ifndef __PROGRAM_START */
186 
187 #ifndef __INITIAL_SP
188     #define __INITIAL_SP    __StackTop
189 #endif
190 
191 #ifndef __STACK_LIMIT
192     #define __STACK_LIMIT    __StackLimit
193 #endif
194 
195 #ifndef __VECTOR_TABLE
196     #define __VECTOR_TABLE    __Vectors
197 #endif
198 
199 #ifndef __VECTOR_TABLE_ATTRIBUTE
200     #define __VECTOR_TABLE_ATTRIBUTE    __attribute__( ( used, section( ".vectors" ) ) )
201 #endif
202 
203 /* ###########################  Core Function Access  ########################### */
204 
205 /** \ingroup  CMSIS_Core_FunctionInterface
206  *  \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions
207  * @{
208  */
209 
210 /**
211  * \brief   Enable IRQ Interrupts
212  * \details Enables IRQ interrupts by clearing the I-bit in the CPSR.
213  *         Can only be executed in Privileged modes.
214  */
__enable_irq(void)215 __STATIC_FORCEINLINE void __enable_irq( void )
216 {
217     __ASM volatile ( "cpsie i" : : : "memory" );
218 }
219 
220 
221 /**
222  * \brief   Disable IRQ Interrupts
223  * \details Disables IRQ interrupts by setting the I-bit in the CPSR.
224  *         Can only be executed in Privileged modes.
225  */
__disable_irq(void)226 __STATIC_FORCEINLINE void __disable_irq( void )
227 {
228     __ASM volatile ( "cpsid i" : : : "memory" );
229 }
230 
231 
232 /**
233  * \brief   Get Control Register
234  * \details Returns the content of the Control Register.
235  * \return               Control Register value
236  */
__get_CONTROL(void)237 __STATIC_FORCEINLINE uint32_t __get_CONTROL( void )
238 {
239     uint32_t result;
240 
241     __ASM volatile ( "MRS %0, control" : "=r" ( result ) );
242 
243     return( result );
244 }
245 
246 
247 #if ( defined( __ARM_FEATURE_CMSE ) && ( __ARM_FEATURE_CMSE == 3 ) )
248 
249 /**
250  * \brief   Get Control Register (non-secure)
251  * \details Returns the content of the non-secure Control Register when in secure mode.
252  * \return               non-secure Control Register value
253  */
__TZ_get_CONTROL_NS(void)254     __STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS( void )
255     {
256         uint32_t result;
257 
258         __ASM volatile ( "MRS %0, control_ns" : "=r" ( result ) );
259 
260         return( result );
261     }
262 #endif /* if ( defined( __ARM_FEATURE_CMSE ) && ( __ARM_FEATURE_CMSE == 3 ) ) */
263 
264 
265 /**
266  * \brief   Set Control Register
267  * \details Writes the given value to the Control Register.
268  * \param [in]    control  Control Register value to set
269  */
__set_CONTROL(uint32_t control)270 __STATIC_FORCEINLINE void __set_CONTROL( uint32_t control )
271 {
272     __ASM volatile ( "MSR control, %0" : : "r" ( control ) : "memory" );
273 }
274 
275 
276 #if ( defined( __ARM_FEATURE_CMSE ) && ( __ARM_FEATURE_CMSE == 3 ) )
277 
278 /**
279  * \brief   Set Control Register (non-secure)
280  * \details Writes the given value to the non-secure Control Register when in secure state.
281  * \param [in]    control  Control Register value to set
282  */
__TZ_set_CONTROL_NS(uint32_t control)283     __STATIC_FORCEINLINE void __TZ_set_CONTROL_NS( uint32_t control )
284     {
285         __ASM volatile ( "MSR control_ns, %0" : : "r" ( control ) : "memory" );
286     }
287 #endif
288 
289 
290 /**
291  * \brief   Get IPSR Register
292  * \details Returns the content of the IPSR Register.
293  * \return               IPSR Register value
294  */
__get_IPSR(void)295 __STATIC_FORCEINLINE uint32_t __get_IPSR( void )
296 {
297     uint32_t result;
298 
299     __ASM volatile ( "MRS %0, ipsr" : "=r" ( result ) );
300 
301     return( result );
302 }
303 
304 
305 /**
306  * \brief   Get APSR Register
307  * \details Returns the content of the APSR Register.
308  * \return               APSR Register value
309  */
__get_APSR(void)310 __STATIC_FORCEINLINE uint32_t __get_APSR( void )
311 {
312     uint32_t result;
313 
314     __ASM volatile ( "MRS %0, apsr" : "=r" ( result ) );
315 
316     return( result );
317 }
318 
319 
320 /**
321  * \brief   Get xPSR Register
322  * \details Returns the content of the xPSR Register.
323  * \return               xPSR Register value
324  */
__get_xPSR(void)325 __STATIC_FORCEINLINE uint32_t __get_xPSR( void )
326 {
327     uint32_t result;
328 
329     __ASM volatile ( "MRS %0, xpsr" : "=r" ( result ) );
330 
331     return( result );
332 }
333 
334 
335 /**
336  * \brief   Get Process Stack Pointer
337  * \details Returns the current value of the Process Stack Pointer (PSP).
338  * \return               PSP Register value
339  */
__get_PSP(void)340 __STATIC_FORCEINLINE uint32_t __get_PSP( void )
341 {
342     uint32_t result;
343 
344     __ASM volatile ( "MRS %0, psp"  : "=r" ( result ) );
345 
346     return( result );
347 }
348 
349 
350 #if ( defined( __ARM_FEATURE_CMSE ) && ( __ARM_FEATURE_CMSE == 3 ) )
351 
352 /**
353  * \brief   Get Process Stack Pointer (non-secure)
354  * \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state.
355  * \return               PSP Register value
356  */
__TZ_get_PSP_NS(void)357     __STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS( void )
358     {
359         uint32_t result;
360 
361         __ASM volatile ( "MRS %0, psp_ns"  : "=r" ( result ) );
362 
363         return( result );
364     }
365 #endif /* if ( defined( __ARM_FEATURE_CMSE ) && ( __ARM_FEATURE_CMSE == 3 ) ) */
366 
367 
368 /**
369  * \brief   Set Process Stack Pointer
370  * \details Assigns the given value to the Process Stack Pointer (PSP).
371  * \param [in]    topOfProcStack  Process Stack Pointer value to set
372  */
__set_PSP(uint32_t topOfProcStack)373 __STATIC_FORCEINLINE void __set_PSP( uint32_t topOfProcStack )
374 {
375     __ASM volatile ( "MSR psp, %0" : : "r" ( topOfProcStack ) : );
376 }
377 
378 
379 #if ( defined( __ARM_FEATURE_CMSE ) && ( __ARM_FEATURE_CMSE == 3 ) )
380 
381 /**
382  * \brief   Set Process Stack Pointer (non-secure)
383  * \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state.
384  * \param [in]    topOfProcStack  Process Stack Pointer value to set
385  */
__TZ_set_PSP_NS(uint32_t topOfProcStack)386     __STATIC_FORCEINLINE void __TZ_set_PSP_NS( uint32_t topOfProcStack )
387     {
388         __ASM volatile ( "MSR psp_ns, %0" : : "r" ( topOfProcStack ) : );
389     }
390 #endif
391 
392 
393 /**
394  * \brief   Get Main Stack Pointer
395  * \details Returns the current value of the Main Stack Pointer (MSP).
396  * \return               MSP Register value
397  */
__get_MSP(void)398 __STATIC_FORCEINLINE uint32_t __get_MSP( void )
399 {
400     uint32_t result;
401 
402     __ASM volatile ( "MRS %0, msp" : "=r" ( result ) );
403 
404     return( result );
405 }
406 
407 
408 #if ( defined( __ARM_FEATURE_CMSE ) && ( __ARM_FEATURE_CMSE == 3 ) )
409 
410 /**
411  * \brief   Get Main Stack Pointer (non-secure)
412  * \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state.
413  * \return               MSP Register value
414  */
__TZ_get_MSP_NS(void)415     __STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS( void )
416     {
417         uint32_t result;
418 
419         __ASM volatile ( "MRS %0, msp_ns" : "=r" ( result ) );
420 
421         return( result );
422     }
423 #endif /* if ( defined( __ARM_FEATURE_CMSE ) && ( __ARM_FEATURE_CMSE == 3 ) ) */
424 
425 
426 /**
427  * \brief   Set Main Stack Pointer
428  * \details Assigns the given value to the Main Stack Pointer (MSP).
429  * \param [in]    topOfMainStack  Main Stack Pointer value to set
430  */
__set_MSP(uint32_t topOfMainStack)431 __STATIC_FORCEINLINE void __set_MSP( uint32_t topOfMainStack )
432 {
433     __ASM volatile ( "MSR msp, %0" : : "r" ( topOfMainStack ) : );
434 }
435 
436 
437 #if ( defined( __ARM_FEATURE_CMSE ) && ( __ARM_FEATURE_CMSE == 3 ) )
438 
439 /**
440  * \brief   Set Main Stack Pointer (non-secure)
441  * \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state.
442  * \param [in]    topOfMainStack  Main Stack Pointer value to set
443  */
__TZ_set_MSP_NS(uint32_t topOfMainStack)444     __STATIC_FORCEINLINE void __TZ_set_MSP_NS( uint32_t topOfMainStack )
445     {
446         __ASM volatile ( "MSR msp_ns, %0" : : "r" ( topOfMainStack ) : );
447     }
448 #endif
449 
450 
451 #if ( defined( __ARM_FEATURE_CMSE ) && ( __ARM_FEATURE_CMSE == 3 ) )
452 
453 /**
454  * \brief   Get Stack Pointer (non-secure)
455  * \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state.
456  * \return               SP Register value
457  */
__TZ_get_SP_NS(void)458     __STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS( void )
459     {
460         uint32_t result;
461 
462         __ASM volatile ( "MRS %0, sp_ns" : "=r" ( result ) );
463 
464         return( result );
465     }
466 
467 
468 /**
469  * \brief   Set Stack Pointer (non-secure)
470  * \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state.
471  * \param [in]    topOfStack  Stack Pointer value to set
472  */
__TZ_set_SP_NS(uint32_t topOfStack)473     __STATIC_FORCEINLINE void __TZ_set_SP_NS( uint32_t topOfStack )
474     {
475         __ASM volatile ( "MSR sp_ns, %0" : : "r" ( topOfStack ) : );
476     }
477 #endif /* if ( defined( __ARM_FEATURE_CMSE ) && ( __ARM_FEATURE_CMSE == 3 ) ) */
478 
479 
480 /**
481  * \brief   Get Priority Mask
482  * \details Returns the current state of the priority mask bit from the Priority Mask Register.
483  * \return               Priority Mask value
484  */
__get_PRIMASK(void)485 __STATIC_FORCEINLINE uint32_t __get_PRIMASK( void )
486 {
487     uint32_t result;
488 
489     __ASM volatile ( "MRS %0, primask" : "=r" ( result ) );
490 
491     return( result );
492 }
493 
494 
495 #if ( defined( __ARM_FEATURE_CMSE ) && ( __ARM_FEATURE_CMSE == 3 ) )
496 
497 /**
498  * \brief   Get Priority Mask (non-secure)
499  * \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state.
500  * \return               Priority Mask value
501  */
__TZ_get_PRIMASK_NS(void)502     __STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS( void )
503     {
504         uint32_t result;
505 
506         __ASM volatile ( "MRS %0, primask_ns" : "=r" ( result ) );
507 
508         return( result );
509     }
510 #endif /* if ( defined( __ARM_FEATURE_CMSE ) && ( __ARM_FEATURE_CMSE == 3 ) ) */
511 
512 
513 /**
514  * \brief   Set Priority Mask
515  * \details Assigns the given value to the Priority Mask Register.
516  * \param [in]    priMask  Priority Mask
517  */
__set_PRIMASK(uint32_t priMask)518 __STATIC_FORCEINLINE void __set_PRIMASK( uint32_t priMask )
519 {
520     __ASM volatile ( "MSR primask, %0" : : "r" ( priMask ) : "memory" );
521 }
522 
523 
524 #if ( defined( __ARM_FEATURE_CMSE ) && ( __ARM_FEATURE_CMSE == 3 ) )
525 
526 /**
527  * \brief   Set Priority Mask (non-secure)
528  * \details Assigns the given value to the non-secure Priority Mask Register when in secure state.
529  * \param [in]    priMask  Priority Mask
530  */
__TZ_set_PRIMASK_NS(uint32_t priMask)531     __STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS( uint32_t priMask )
532     {
533         __ASM volatile ( "MSR primask_ns, %0" : : "r" ( priMask ) : "memory" );
534     }
535 #endif
536 
537 
538 #if ( ( defined( __ARM_ARCH_7M__ ) && ( __ARM_ARCH_7M__ == 1 ) ) || \
539     ( defined( __ARM_ARCH_7EM__ ) && ( __ARM_ARCH_7EM__ == 1 ) ) || \
540     ( defined( __ARM_ARCH_8M_MAIN__ ) && ( __ARM_ARCH_8M_MAIN__ == 1 ) ) )
541 
542 /**
543  * \brief   Enable FIQ
544  * \details Enables FIQ interrupts by clearing the F-bit in the CPSR.
545  *         Can only be executed in Privileged modes.
546  */
__enable_fault_irq(void)547     __STATIC_FORCEINLINE void __enable_fault_irq( void )
548     {
549         __ASM volatile ( "cpsie f" : : : "memory" );
550     }
551 
552 
553 /**
554  * \brief   Disable FIQ
555  * \details Disables FIQ interrupts by setting the F-bit in the CPSR.
556  *         Can only be executed in Privileged modes.
557  */
__disable_fault_irq(void)558     __STATIC_FORCEINLINE void __disable_fault_irq( void )
559     {
560         __ASM volatile ( "cpsid f" : : : "memory" );
561     }
562 
563 
564 /**
565  * \brief   Get Base Priority
566  * \details Returns the current value of the Base Priority register.
567  * \return               Base Priority register value
568  */
__get_BASEPRI(void)569     __STATIC_FORCEINLINE uint32_t __get_BASEPRI( void )
570     {
571         uint32_t result;
572 
573         __ASM volatile ( "MRS %0, basepri" : "=r" ( result ) );
574 
575         return( result );
576     }
577 
578 
579     #if ( defined( __ARM_FEATURE_CMSE ) && ( __ARM_FEATURE_CMSE == 3 ) )
580 
581 /**
582  * \brief   Get Base Priority (non-secure)
583  * \details Returns the current value of the non-secure Base Priority register when in secure state.
584  * \return               Base Priority register value
585  */
__TZ_get_BASEPRI_NS(void)586         __STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS( void )
587         {
588             uint32_t result;
589 
590             __ASM volatile ( "MRS %0, basepri_ns" : "=r" ( result ) );
591 
592             return( result );
593         }
594     #endif /* if ( defined( __ARM_FEATURE_CMSE ) && ( __ARM_FEATURE_CMSE == 3 ) ) */
595 
596 
597 /**
598  * \brief   Set Base Priority
599  * \details Assigns the given value to the Base Priority register.
600  * \param [in]    basePri  Base Priority value to set
601  */
__set_BASEPRI(uint32_t basePri)602     __STATIC_FORCEINLINE void __set_BASEPRI( uint32_t basePri )
603     {
604         __ASM volatile ( "MSR basepri, %0" : : "r" ( basePri ) : "memory" );
605     }
606 
607 
608     #if ( defined( __ARM_FEATURE_CMSE ) && ( __ARM_FEATURE_CMSE == 3 ) )
609 
610 /**
611  * \brief   Set Base Priority (non-secure)
612  * \details Assigns the given value to the non-secure Base Priority register when in secure state.
613  * \param [in]    basePri  Base Priority value to set
614  */
__TZ_set_BASEPRI_NS(uint32_t basePri)615         __STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS( uint32_t basePri )
616         {
617             __ASM volatile ( "MSR basepri_ns, %0" : : "r" ( basePri ) : "memory" );
618         }
619     #endif
620 
621 
622 /**
623  * \brief   Set Base Priority with condition
624  * \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled,
625  *         or the new value increases the BASEPRI priority level.
626  * \param [in]    basePri  Base Priority value to set
627  */
__set_BASEPRI_MAX(uint32_t basePri)628     __STATIC_FORCEINLINE void __set_BASEPRI_MAX( uint32_t basePri )
629     {
630         __ASM volatile ( "MSR basepri_max, %0" : : "r" ( basePri ) : "memory" );
631     }
632 
633 
634 /**
635  * \brief   Get Fault Mask
636  * \details Returns the current value of the Fault Mask register.
637  * \return               Fault Mask register value
638  */
__get_FAULTMASK(void)639     __STATIC_FORCEINLINE uint32_t __get_FAULTMASK( void )
640     {
641         uint32_t result;
642 
643         __ASM volatile ( "MRS %0, faultmask" : "=r" ( result ) );
644 
645         return( result );
646     }
647 
648 
649     #if ( defined( __ARM_FEATURE_CMSE ) && ( __ARM_FEATURE_CMSE == 3 ) )
650 
651 /**
652  * \brief   Get Fault Mask (non-secure)
653  * \details Returns the current value of the non-secure Fault Mask register when in secure state.
654  * \return               Fault Mask register value
655  */
__TZ_get_FAULTMASK_NS(void)656         __STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS( void )
657         {
658             uint32_t result;
659 
660             __ASM volatile ( "MRS %0, faultmask_ns" : "=r" ( result ) );
661 
662             return( result );
663         }
664     #endif /* if ( defined( __ARM_FEATURE_CMSE ) && ( __ARM_FEATURE_CMSE == 3 ) ) */
665 
666 
667 /**
668  * \brief   Set Fault Mask
669  * \details Assigns the given value to the Fault Mask register.
670  * \param [in]    faultMask  Fault Mask value to set
671  */
__set_FAULTMASK(uint32_t faultMask)672     __STATIC_FORCEINLINE void __set_FAULTMASK( uint32_t faultMask )
673     {
674         __ASM volatile ( "MSR faultmask, %0" : : "r" ( faultMask ) : "memory" );
675     }
676 
677 
678     #if ( defined( __ARM_FEATURE_CMSE ) && ( __ARM_FEATURE_CMSE == 3 ) )
679 
680 /**
681  * \brief   Set Fault Mask (non-secure)
682  * \details Assigns the given value to the non-secure Fault Mask register when in secure state.
683  * \param [in]    faultMask  Fault Mask value to set
684  */
__TZ_set_FAULTMASK_NS(uint32_t faultMask)685         __STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS( uint32_t faultMask )
686         {
687             __ASM volatile ( "MSR faultmask_ns, %0" : : "r" ( faultMask ) : "memory" );
688         }
689     #endif
690 
691 #endif /* ((defined (__ARM_ARCH_7M__      ) && (__ARM_ARCH_7M__      == 1)) || \
692         *  (defined (__ARM_ARCH_7EM__     ) && (__ARM_ARCH_7EM__     == 1)) || \
693         *  (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))    ) */
694 
695 
696 #if ( ( defined( __ARM_ARCH_8M_MAIN__ ) && ( __ARM_ARCH_8M_MAIN__ == 1 ) ) || \
697     ( defined( __ARM_ARCH_8M_BASE__ ) && ( __ARM_ARCH_8M_BASE__ == 1 ) ) )
698 
699 /**
700  * \brief   Get Process Stack Pointer Limit
701  * Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
702  * Stack Pointer Limit register hence zero is returned always in non-secure
703  * mode.
704  *
705  * \details Returns the current value of the Process Stack Pointer Limit (PSPLIM).
706  * \return               PSPLIM Register value
707  */
__get_PSPLIM(void)708     __STATIC_FORCEINLINE uint32_t __get_PSPLIM( void )
709     {
710         #if ( !( defined( __ARM_ARCH_8M_MAIN__ ) && ( __ARM_ARCH_8M_MAIN__ == 1 ) ) && \
711         ( !defined( __ARM_FEATURE_CMSE ) || ( __ARM_FEATURE_CMSE < 3 ) ) )
712             /* without main extensions, the non-secure PSPLIM is RAZ/WI */
713             return 0U;
714         #else
715             uint32_t result;
716             __ASM volatile ( "MRS %0, psplim"  : "=r" ( result ) );
717             return result;
718         #endif
719     }
720 
721     #if ( defined( __ARM_FEATURE_CMSE ) && ( __ARM_FEATURE_CMSE == 3 ) )
722 
723 /**
724  * \brief   Get Process Stack Pointer Limit (non-secure)
725  * Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
726  * Stack Pointer Limit register hence zero is returned always.
727  *
728  * \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state.
729  * \return               PSPLIM Register value
730  */
__TZ_get_PSPLIM_NS(void)731         __STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS( void )
732         {
733             #if ( !( defined( __ARM_ARCH_8M_MAIN__ ) && ( __ARM_ARCH_8M_MAIN__ == 1 ) ) )
734                 /* without main extensions, the non-secure PSPLIM is RAZ/WI */
735                 return 0U;
736             #else
737                 uint32_t result;
738                 __ASM volatile ( "MRS %0, psplim_ns"  : "=r" ( result ) );
739                 return result;
740             #endif
741         }
742     #endif /* if ( defined( __ARM_FEATURE_CMSE ) && ( __ARM_FEATURE_CMSE == 3 ) ) */
743 
744 
745 /**
746  * \brief   Set Process Stack Pointer Limit
747  * Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
748  * Stack Pointer Limit register hence the write is silently ignored in non-secure
749  * mode.
750  *
751  * \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM).
752  * \param [in]    ProcStackPtrLimit  Process Stack Pointer Limit value to set
753  */
__set_PSPLIM(uint32_t ProcStackPtrLimit)754     __STATIC_FORCEINLINE void __set_PSPLIM( uint32_t ProcStackPtrLimit )
755     {
756         #if ( !( defined( __ARM_ARCH_8M_MAIN__ ) && ( __ARM_ARCH_8M_MAIN__ == 1 ) ) && \
757         ( !defined( __ARM_FEATURE_CMSE ) || ( __ARM_FEATURE_CMSE < 3 ) ) )
758             /* without main extensions, the non-secure PSPLIM is RAZ/WI */
759             ( void ) ProcStackPtrLimit;
760         #else
761             __ASM volatile ( "MSR psplim, %0" : : "r" ( ProcStackPtrLimit ) );
762         #endif
763     }
764 
765 
766     #if ( defined( __ARM_FEATURE_CMSE ) && ( __ARM_FEATURE_CMSE == 3 ) )
767 
768 /**
769  * \brief   Set Process Stack Pointer (non-secure)
770  * Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
771  * Stack Pointer Limit register hence the write is silently ignored.
772  *
773  * \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state.
774  * \param [in]    ProcStackPtrLimit  Process Stack Pointer Limit value to set
775  */
__TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit)776         __STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS( uint32_t ProcStackPtrLimit )
777         {
778             #if ( !( defined( __ARM_ARCH_8M_MAIN__ ) && ( __ARM_ARCH_8M_MAIN__ == 1 ) ) )
779                 /* without main extensions, the non-secure PSPLIM is RAZ/WI */
780                 ( void ) ProcStackPtrLimit;
781             #else
782                 __ASM volatile ( "MSR psplim_ns, %0\n" : : "r" ( ProcStackPtrLimit ) );
783             #endif
784         }
785     #endif /* if ( defined( __ARM_FEATURE_CMSE ) && ( __ARM_FEATURE_CMSE == 3 ) ) */
786 
787 
788 /**
789  * \brief   Get Main Stack Pointer Limit
790  * Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
791  * Stack Pointer Limit register hence zero is returned always in non-secure
792  * mode.
793  *
794  * \details Returns the current value of the Main Stack Pointer Limit (MSPLIM).
795  * \return               MSPLIM Register value
796  */
__get_MSPLIM(void)797     __STATIC_FORCEINLINE uint32_t __get_MSPLIM( void )
798     {
799         #if ( !( defined( __ARM_ARCH_8M_MAIN__ ) && ( __ARM_ARCH_8M_MAIN__ == 1 ) ) && \
800         ( !defined( __ARM_FEATURE_CMSE ) || ( __ARM_FEATURE_CMSE < 3 ) ) )
801             /* without main extensions, the non-secure MSPLIM is RAZ/WI */
802             return 0U;
803         #else
804             uint32_t result;
805             __ASM volatile ( "MRS %0, msplim" : "=r" ( result ) );
806             return result;
807         #endif
808     }
809 
810 
811     #if ( defined( __ARM_FEATURE_CMSE ) && ( __ARM_FEATURE_CMSE == 3 ) )
812 
813 /**
814  * \brief   Get Main Stack Pointer Limit (non-secure)
815  * Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
816  * Stack Pointer Limit register hence zero is returned always.
817  *
818  * \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state.
819  * \return               MSPLIM Register value
820  */
__TZ_get_MSPLIM_NS(void)821         __STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS( void )
822         {
823             #if ( !( defined( __ARM_ARCH_8M_MAIN__ ) && ( __ARM_ARCH_8M_MAIN__ == 1 ) ) )
824                 /* without main extensions, the non-secure MSPLIM is RAZ/WI */
825                 return 0U;
826             #else
827                 uint32_t result;
828                 __ASM volatile ( "MRS %0, msplim_ns" : "=r" ( result ) );
829                 return result;
830             #endif
831         }
832     #endif /* if ( defined( __ARM_FEATURE_CMSE ) && ( __ARM_FEATURE_CMSE == 3 ) ) */
833 
834 
835 /**
836  * \brief   Set Main Stack Pointer Limit
837  * Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
838  * Stack Pointer Limit register hence the write is silently ignored in non-secure
839  * mode.
840  *
841  * \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM).
842  * \param [in]    MainStackPtrLimit  Main Stack Pointer Limit value to set
843  */
__set_MSPLIM(uint32_t MainStackPtrLimit)844     __STATIC_FORCEINLINE void __set_MSPLIM( uint32_t MainStackPtrLimit )
845     {
846         #if ( !( defined( __ARM_ARCH_8M_MAIN__ ) && ( __ARM_ARCH_8M_MAIN__ == 1 ) ) && \
847         ( !defined( __ARM_FEATURE_CMSE ) || ( __ARM_FEATURE_CMSE < 3 ) ) )
848             /* without main extensions, the non-secure MSPLIM is RAZ/WI */
849             ( void ) MainStackPtrLimit;
850         #else
851             __ASM volatile ( "MSR msplim, %0" : : "r" ( MainStackPtrLimit ) );
852         #endif
853     }
854 
855 
856     #if ( defined( __ARM_FEATURE_CMSE ) && ( __ARM_FEATURE_CMSE == 3 ) )
857 
858 /**
859  * \brief   Set Main Stack Pointer Limit (non-secure)
860  * Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
861  * Stack Pointer Limit register hence the write is silently ignored.
862  *
863  * \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state.
864  * \param [in]    MainStackPtrLimit  Main Stack Pointer value to set
865  */
__TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit)866         __STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS( uint32_t MainStackPtrLimit )
867         {
868             #if ( !( defined( __ARM_ARCH_8M_MAIN__ ) && ( __ARM_ARCH_8M_MAIN__ == 1 ) ) )
869                 /* without main extensions, the non-secure MSPLIM is RAZ/WI */
870                 ( void ) MainStackPtrLimit;
871             #else
872                 __ASM volatile ( "MSR msplim_ns, %0" : : "r" ( MainStackPtrLimit ) );
873             #endif
874         }
875     #endif /* if ( defined( __ARM_FEATURE_CMSE ) && ( __ARM_FEATURE_CMSE == 3 ) ) */
876 
877 #endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
878         *  (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1))    ) */
879 
880 
881 /**
882  * \brief   Get FPSCR
883  * \details Returns the current value of the Floating Point Status/Control register.
884  * \return               Floating Point Status/Control register value
885  */
__get_FPSCR(void)886 __STATIC_FORCEINLINE uint32_t __get_FPSCR( void )
887 {
888     #if ( ( defined( __FPU_PRESENT ) && ( __FPU_PRESENT == 1U ) ) && \
889     ( defined( __FPU_USED ) && ( __FPU_USED == 1U ) ) )
890         #if __has_builtin( __builtin_arm_get_fpscr )
891 /* Re-enable using built-in when GCC has been fixed */
892 /* || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) */
893             /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */
894             return __builtin_arm_get_fpscr();
895         #else
896             uint32_t result;
897 
898             __ASM volatile ( "VMRS %0, fpscr" : "=r" ( result ) );
899             return( result );
900         #endif
901     #else /* if ( ( defined( __FPU_PRESENT ) && ( __FPU_PRESENT == 1U ) ) && ( defined( __FPU_USED ) && ( __FPU_USED == 1U ) ) ) */
902         return( 0U );
903     #endif /* if ( ( defined( __FPU_PRESENT ) && ( __FPU_PRESENT == 1U ) ) && ( defined( __FPU_USED ) && ( __FPU_USED == 1U ) ) ) */
904 }
905 
906 
907 /**
908  * \brief   Set FPSCR
909  * \details Assigns the given value to the Floating Point Status/Control register.
910  * \param [in]    fpscr  Floating Point Status/Control value to set
911  */
__set_FPSCR(uint32_t fpscr)912 __STATIC_FORCEINLINE void __set_FPSCR( uint32_t fpscr )
913 {
914     #if ( ( defined( __FPU_PRESENT ) && ( __FPU_PRESENT == 1U ) ) && \
915     ( defined( __FPU_USED ) && ( __FPU_USED == 1U ) ) )
916         #if __has_builtin( __builtin_arm_set_fpscr )
917 /* Re-enable using built-in when GCC has been fixed */
918 /* || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) */
919             /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */
920             __builtin_arm_set_fpscr( fpscr );
921         #else
922             __ASM volatile ( "VMSR fpscr, %0" : : "r" ( fpscr ) : "vfpcc", "memory" );
923         #endif
924     #else
925         ( void ) fpscr;
926     #endif /* if ( ( defined( __FPU_PRESENT ) && ( __FPU_PRESENT == 1U ) ) && ( defined( __FPU_USED ) && ( __FPU_USED == 1U ) ) ) */
927 }
928 
929 
930 /*@} end of CMSIS_Core_RegAccFunctions */
931 
932 
933 /* ##########################  Core Instruction Access  ######################### */
934 
935 /** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface
936  * Access to dedicated instructions
937  * @{
938  */
939 
940 /* Define macros for porting to both thumb1 and thumb2.
941  * For thumb1, use low register (r0-r7), specified by constraint "l"
942  * Otherwise, use general registers, specified by constraint "r" */
943 #if defined( __thumb__ ) && !defined( __thumb2__ )
944     #define __CMSIS_GCC_OUT_REG( r )    "=l" ( r )
945     #define __CMSIS_GCC_RW_REG( r )     "+l" ( r )
946     #define __CMSIS_GCC_USE_REG( r )    "l" ( r )
947 #else
948     #define __CMSIS_GCC_OUT_REG( r )    "=r" ( r )
949     #define __CMSIS_GCC_RW_REG( r )     "+r" ( r )
950     #define __CMSIS_GCC_USE_REG( r )    "r" ( r )
951 #endif
952 
953 /**
954  * \brief   No Operation
955  * \details No Operation does nothing. This instruction can be used for code alignment purposes.
956  */
957 #define __NOP()    __ASM volatile ( "nop" )
958 
959 /**
960  * \brief   Wait For Interrupt
961  * \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs.
962  */
963 #define __WFI()    __ASM volatile ( "wfi" ::: "memory" )
964 
965 
966 /**
967  * \brief   Wait For Event
968  * \details Wait For Event is a hint instruction that permits the processor to enter
969  *         a low-power state until one of a number of events occurs.
970  */
971 #define __WFE()    __ASM volatile ( "wfe" ::: "memory" )
972 
973 
974 /**
975  * \brief   Send Event
976  * \details Send Event is a hint instruction. It causes an event to be signaled to the CPU.
977  */
978 #define __SEV()    __ASM volatile ( "sev" )
979 
980 
981 /**
982  * \brief   Instruction Synchronization Barrier
983  * \details Instruction Synchronization Barrier flushes the pipeline in the processor,
984  *         so that all instructions following the ISB are fetched from cache or memory,
985  *         after the instruction has been completed.
986  */
__ISB(void)987 __STATIC_FORCEINLINE void __ISB( void )
988 {
989     __ASM volatile ( "isb 0xF" ::: "memory" );
990 }
991 
992 
993 /**
994  * \brief   Data Synchronization Barrier
995  * \details Acts as a special kind of Data Memory Barrier.
996  *         It completes when all explicit memory accesses before this instruction complete.
997  */
__DSB(void)998 __STATIC_FORCEINLINE void __DSB( void )
999 {
1000     __ASM volatile ( "dsb 0xF" ::: "memory" );
1001 }
1002 
1003 
1004 /**
1005  * \brief   Data Memory Barrier
1006  * \details Ensures the apparent order of the explicit memory operations before
1007  *         and after the instruction, without ensuring their completion.
1008  */
__DMB(void)1009 __STATIC_FORCEINLINE void __DMB( void )
1010 {
1011     __ASM volatile ( "dmb 0xF" ::: "memory" );
1012 }
1013 
1014 
1015 /**
1016  * \brief   Reverse byte order (32 bit)
1017  * \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412.
1018  * \param [in]    value  Value to reverse
1019  * \return               Reversed value
1020  */
__REV(uint32_t value)1021 __STATIC_FORCEINLINE uint32_t __REV( uint32_t value )
1022 {
1023     #if ( __GNUC__ > 4 ) || ( __GNUC__ == 4 && __GNUC_MINOR__ >= 5 )
1024         return __builtin_bswap32( value );
1025     #else
1026         uint32_t result;
1027 
1028         __ASM( "rev %0, %1" : __CMSIS_GCC_OUT_REG( result ) : __CMSIS_GCC_USE_REG( value ) );
1029         return result;
1030     #endif
1031 }
1032 
1033 
1034 /**
1035  * \brief   Reverse byte order (16 bit)
1036  * \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856.
1037  * \param [in]    value  Value to reverse
1038  * \return               Reversed value
1039  */
__REV16(uint32_t value)1040 __STATIC_FORCEINLINE uint32_t __REV16( uint32_t value )
1041 {
1042     uint32_t result;
1043 
1044     __ASM( "rev16 %0, %1" : __CMSIS_GCC_OUT_REG( result ) : __CMSIS_GCC_USE_REG( value ) );
1045     return result;
1046 }
1047 
1048 
1049 /**
1050  * \brief   Reverse byte order (16 bit)
1051  * \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000.
1052  * \param [in]    value  Value to reverse
1053  * \return               Reversed value
1054  */
__REVSH(int16_t value)1055 __STATIC_FORCEINLINE int16_t __REVSH( int16_t value )
1056 {
1057     #if ( __GNUC__ > 4 ) || ( __GNUC__ == 4 && __GNUC_MINOR__ >= 8 )
1058         return ( int16_t ) __builtin_bswap16( value );
1059     #else
1060         int16_t result;
1061 
1062         __ASM( "revsh %0, %1" : __CMSIS_GCC_OUT_REG( result ) : __CMSIS_GCC_USE_REG( value ) );
1063         return result;
1064     #endif
1065 }
1066 
1067 
1068 /**
1069  * \brief   Rotate Right in unsigned value (32 bit)
1070  * \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
1071  * \param [in]    op1  Value to rotate
1072  * \param [in]    op2  Number of Bits to rotate
1073  * \return               Rotated value
1074  */
__ROR(uint32_t op1,uint32_t op2)1075 __STATIC_FORCEINLINE uint32_t __ROR( uint32_t op1,
1076                                      uint32_t op2 )
1077 {
1078     op2 %= 32U;
1079 
1080     if( op2 == 0U )
1081     {
1082         return op1;
1083     }
1084 
1085     return ( op1 >> op2 ) | ( op1 << ( 32U - op2 ) );
1086 }
1087 
1088 
1089 /**
1090  * \brief   Breakpoint
1091  * \details Causes the processor to enter Debug state.
1092  *         Debug tools can use this to investigate system state when the instruction at a particular address is reached.
1093  * \param [in]    value  is ignored by the processor.
1094  *               If required, a debugger can use it to store additional information about the breakpoint.
1095  */
1096 #define __BKPT( value )    __ASM volatile ( "bkpt "# value )
1097 
1098 
1099 /**
1100  * \brief   Reverse bit order of value
1101  * \details Reverses the bit order of the given value.
1102  * \param [in]    value  Value to reverse
1103  * \return               Reversed value
1104  */
__RBIT(uint32_t value)1105 __STATIC_FORCEINLINE uint32_t __RBIT( uint32_t value )
1106 {
1107     uint32_t result;
1108 
1109     #if ( ( defined( __ARM_ARCH_7M__ ) && ( __ARM_ARCH_7M__ == 1 ) ) || \
1110     ( defined( __ARM_ARCH_7EM__ ) && ( __ARM_ARCH_7EM__ == 1 ) ) ||     \
1111     ( defined( __ARM_ARCH_8M_MAIN__ ) && ( __ARM_ARCH_8M_MAIN__ == 1 ) ) )
1112         __ASM( "rbit %0, %1" : "=r" ( result ) : "r" ( value ) );
1113     #else
1114         uint32_t s = ( 4U /*sizeof(v)*/ * 8U ) - 1U; /* extra shift needed at end */
1115 
1116         result = value;                              /* r will be reversed bits of v; first get LSB of v */
1117 
1118         for( value >>= 1U; value != 0U; value >>= 1U )
1119         {
1120             result <<= 1U;
1121             result |= value & 1U;
1122             s--;
1123         }
1124         result <<= s; /* shift when v's highest bits are zero */
1125     #endif /* if ( ( defined( __ARM_ARCH_7M__ ) && ( __ARM_ARCH_7M__ == 1 ) ) || ( defined( __ARM_ARCH_7EM__ ) && ( __ARM_ARCH_7EM__ == 1 ) ) || ( defined( __ARM_ARCH_8M_MAIN__ ) && ( __ARM_ARCH_8M_MAIN__ == 1 ) ) ) */
1126     return result;
1127 }
1128 
1129 
1130 /**
1131  * \brief   Count leading zeros
1132  * \details Counts the number of leading zeros of a data value.
1133  * \param [in]  value  Value to count the leading zeros
1134  * \return             number of leading zeros in value
1135  */
__CLZ(uint32_t value)1136 __STATIC_FORCEINLINE uint8_t __CLZ( uint32_t value )
1137 {
1138     /* Even though __builtin_clz produces a CLZ instruction on ARM, formally
1139      * __builtin_clz(0) is undefined behaviour, so handle this case specially.
1140      * This guarantees ARM-compatible results if happening to compile on a non-ARM
1141      * target, and ensures the compiler doesn't decide to activate any
1142      * optimisations using the logic "value was passed to __builtin_clz, so it
1143      * is non-zero".
1144      * ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a
1145      * single CLZ instruction.
1146      */
1147     if( value == 0U )
1148     {
1149         return 32U;
1150     }
1151 
1152     return __builtin_clz( value );
1153 }
1154 
1155 
1156 #if ( ( defined( __ARM_ARCH_7M__ ) && ( __ARM_ARCH_7M__ == 1 ) ) ||         \
1157     ( defined( __ARM_ARCH_7EM__ ) && ( __ARM_ARCH_7EM__ == 1 ) ) ||         \
1158     ( defined( __ARM_ARCH_8M_MAIN__ ) && ( __ARM_ARCH_8M_MAIN__ == 1 ) ) || \
1159     ( defined( __ARM_ARCH_8M_BASE__ ) && ( __ARM_ARCH_8M_BASE__ == 1 ) ) )
1160 
1161 /**
1162  * \brief   LDR Exclusive (8 bit)
1163  * \details Executes a exclusive LDR instruction for 8 bit value.
1164  * \param [in]    ptr  Pointer to data
1165  * \return             value of type uint8_t at (*ptr)
1166  */
__LDREXB(volatile uint8_t * addr)1167     __STATIC_FORCEINLINE uint8_t __LDREXB( volatile uint8_t * addr )
1168     {
1169         uint32_t result;
1170 
1171         #if ( __GNUC__ > 4 ) || ( __GNUC__ == 4 && __GNUC_MINOR__ >= 8 )
1172             __ASM volatile ( "ldrexb %0, %1" : "=r" ( result ) : "Q" ( *addr ) );
1173         #else
1174 
1175             /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
1176              * accepted by assembler. So has to use following less efficient pattern.
1177              */
1178             __ASM volatile ( "ldrexb %0, [%1]" : "=r" ( result ) : "r" ( addr ) : "memory" );
1179         #endif
1180         return( ( uint8_t ) result ); /* Add explicit type cast here */
1181     }
1182 
1183 
1184 /**
1185  * \brief   LDR Exclusive (16 bit)
1186  * \details Executes a exclusive LDR instruction for 16 bit values.
1187  * \param [in]    ptr  Pointer to data
1188  * \return        value of type uint16_t at (*ptr)
1189  */
__LDREXH(volatile uint16_t * addr)1190     __STATIC_FORCEINLINE uint16_t __LDREXH( volatile uint16_t * addr )
1191     {
1192         uint32_t result;
1193 
1194         #if ( __GNUC__ > 4 ) || ( __GNUC__ == 4 && __GNUC_MINOR__ >= 8 )
1195             __ASM volatile ( "ldrexh %0, %1" : "=r" ( result ) : "Q" ( *addr ) );
1196         #else
1197 
1198             /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
1199              * accepted by assembler. So has to use following less efficient pattern.
1200              */
1201             __ASM volatile ( "ldrexh %0, [%1]" : "=r" ( result ) : "r" ( addr ) : "memory" );
1202         #endif
1203         return( ( uint16_t ) result ); /* Add explicit type cast here */
1204     }
1205 
1206 
1207 /**
1208  * \brief   LDR Exclusive (32 bit)
1209  * \details Executes a exclusive LDR instruction for 32 bit values.
1210  * \param [in]    ptr  Pointer to data
1211  * \return        value of type uint32_t at (*ptr)
1212  */
__LDREXW(volatile uint32_t * addr)1213     __STATIC_FORCEINLINE uint32_t __LDREXW( volatile uint32_t * addr )
1214     {
1215         uint32_t result;
1216 
1217         __ASM volatile ( "ldrex %0, %1" : "=r" ( result ) : "Q" ( *addr ) );
1218 
1219         return( result );
1220     }
1221 
1222 
1223 /**
1224  * \brief   STR Exclusive (8 bit)
1225  * \details Executes a exclusive STR instruction for 8 bit values.
1226  * \param [in]  value  Value to store
1227  * \param [in]    ptr  Pointer to location
1228  * \return          0  Function succeeded
1229  * \return          1  Function failed
1230  */
__STREXB(uint8_t value,volatile uint8_t * addr)1231     __STATIC_FORCEINLINE uint32_t __STREXB( uint8_t value,
1232                                             volatile uint8_t * addr )
1233     {
1234         uint32_t result;
1235 
1236         __ASM volatile ( "strexb %0, %2, %1" : "=&r" ( result ), "=Q" ( *addr ) : "r" ( ( uint32_t ) value ) );
1237 
1238         return( result );
1239     }
1240 
1241 
1242 /**
1243  * \brief   STR Exclusive (16 bit)
1244  * \details Executes a exclusive STR instruction for 16 bit values.
1245  * \param [in]  value  Value to store
1246  * \param [in]    ptr  Pointer to location
1247  * \return          0  Function succeeded
1248  * \return          1  Function failed
1249  */
__STREXH(uint16_t value,volatile uint16_t * addr)1250     __STATIC_FORCEINLINE uint32_t __STREXH( uint16_t value,
1251                                             volatile uint16_t * addr )
1252     {
1253         uint32_t result;
1254 
1255         __ASM volatile ( "strexh %0, %2, %1" : "=&r" ( result ), "=Q" ( *addr ) : "r" ( ( uint32_t ) value ) );
1256 
1257         return( result );
1258     }
1259 
1260 
1261 /**
1262  * \brief   STR Exclusive (32 bit)
1263  * \details Executes a exclusive STR instruction for 32 bit values.
1264  * \param [in]  value  Value to store
1265  * \param [in]    ptr  Pointer to location
1266  * \return          0  Function succeeded
1267  * \return          1  Function failed
1268  */
__STREXW(uint32_t value,volatile uint32_t * addr)1269     __STATIC_FORCEINLINE uint32_t __STREXW( uint32_t value,
1270                                             volatile uint32_t * addr )
1271     {
1272         uint32_t result;
1273 
1274         __ASM volatile ( "strex %0, %2, %1" : "=&r" ( result ), "=Q" ( *addr ) : "r" ( value ) );
1275 
1276         return( result );
1277     }
1278 
1279 
1280 /**
1281  * \brief   Remove the exclusive lock
1282  * \details Removes the exclusive lock which is created by LDREX.
1283  */
__CLREX(void)1284     __STATIC_FORCEINLINE void __CLREX( void )
1285     {
1286         __ASM volatile ( "clrex" ::: "memory" );
1287     }
1288 
1289 #endif /* ((defined (__ARM_ARCH_7M__      ) && (__ARM_ARCH_7M__      == 1)) || \
1290         *  (defined (__ARM_ARCH_7EM__     ) && (__ARM_ARCH_7EM__     == 1)) || \
1291         *  (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
1292         *  (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1))    ) */
1293 
1294 
1295 #if ( ( defined( __ARM_ARCH_7M__ ) && ( __ARM_ARCH_7M__ == 1 ) ) || \
1296     ( defined( __ARM_ARCH_7EM__ ) && ( __ARM_ARCH_7EM__ == 1 ) ) || \
1297     ( defined( __ARM_ARCH_8M_MAIN__ ) && ( __ARM_ARCH_8M_MAIN__ == 1 ) ) )
1298 
1299 /**
1300  * \brief   Signed Saturate
1301  * \details Saturates a signed value.
1302  * \param [in]  ARG1  Value to be saturated
1303  * \param [in]  ARG2  Bit position to saturate to (1..32)
1304  * \return             Saturated value
1305  */
1306     #define __SSAT( ARG1, ARG2 )                                                                      \
1307     __extension__                                                                                     \
1308         ( {                                                                                           \
1309         int32_t __RES, __ARG1 = ( ARG1 );                                                             \
1310         __ASM volatile ( "ssat %0, %1, %2" : "=r" ( __RES ) :  "I" ( ARG2 ), "r" ( __ARG1 ) : "cc" ); \
1311         __RES;                                                                                        \
1312     } )
1313 
1314 
1315 /**
1316  * \brief   Unsigned Saturate
1317  * \details Saturates an unsigned value.
1318  * \param [in]  ARG1  Value to be saturated
1319  * \param [in]  ARG2  Bit position to saturate to (0..31)
1320  * \return             Saturated value
1321  */
1322     #define __USAT( ARG1, ARG2 )                                                                      \
1323     __extension__                                                                                     \
1324         ( {                                                                                           \
1325         uint32_t __RES, __ARG1 = ( ARG1 );                                                            \
1326         __ASM volatile ( "usat %0, %1, %2" : "=r" ( __RES ) :  "I" ( ARG2 ), "r" ( __ARG1 ) : "cc" ); \
1327         __RES;                                                                                        \
1328     } )
1329 
1330 
1331 /**
1332  * \brief   Rotate Right with Extend (32 bit)
1333  * \details Moves each bit of a bitstring right by one bit.
1334  *         The carry input is shifted in at the left end of the bitstring.
1335  * \param [in]    value  Value to rotate
1336  * \return               Rotated value
1337  */
__RRX(uint32_t value)1338     __STATIC_FORCEINLINE uint32_t __RRX( uint32_t value )
1339     {
1340         uint32_t result;
1341 
1342         __ASM volatile ( "rrx %0, %1" : __CMSIS_GCC_OUT_REG( result ) : __CMSIS_GCC_USE_REG( value ) );
1343 
1344         return( result );
1345     }
1346 
1347 
1348 /**
1349  * \brief   LDRT Unprivileged (8 bit)
1350  * \details Executes a Unprivileged LDRT instruction for 8 bit value.
1351  * \param [in]    ptr  Pointer to data
1352  * \return             value of type uint8_t at (*ptr)
1353  */
__LDRBT(volatile uint8_t * ptr)1354     __STATIC_FORCEINLINE uint8_t __LDRBT( volatile uint8_t * ptr )
1355     {
1356         uint32_t result;
1357 
1358         #if ( __GNUC__ > 4 ) || ( __GNUC__ == 4 && __GNUC_MINOR__ >= 8 )
1359             __ASM volatile ( "ldrbt %0, %1" : "=r" ( result ) : "Q" ( *ptr ) );
1360         #else
1361 
1362             /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
1363              * accepted by assembler. So has to use following less efficient pattern.
1364              */
1365             __ASM volatile ( "ldrbt %0, [%1]" : "=r" ( result ) : "r" ( ptr ) : "memory" );
1366         #endif
1367         return( ( uint8_t ) result ); /* Add explicit type cast here */
1368     }
1369 
1370 
1371 /**
1372  * \brief   LDRT Unprivileged (16 bit)
1373  * \details Executes a Unprivileged LDRT instruction for 16 bit values.
1374  * \param [in]    ptr  Pointer to data
1375  * \return        value of type uint16_t at (*ptr)
1376  */
__LDRHT(volatile uint16_t * ptr)1377     __STATIC_FORCEINLINE uint16_t __LDRHT( volatile uint16_t * ptr )
1378     {
1379         uint32_t result;
1380 
1381         #if ( __GNUC__ > 4 ) || ( __GNUC__ == 4 && __GNUC_MINOR__ >= 8 )
1382             __ASM volatile ( "ldrht %0, %1" : "=r" ( result ) : "Q" ( *ptr ) );
1383         #else
1384 
1385             /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
1386              * accepted by assembler. So has to use following less efficient pattern.
1387              */
1388             __ASM volatile ( "ldrht %0, [%1]" : "=r" ( result ) : "r" ( ptr ) : "memory" );
1389         #endif
1390         return( ( uint16_t ) result ); /* Add explicit type cast here */
1391     }
1392 
1393 
1394 /**
1395  * \brief   LDRT Unprivileged (32 bit)
1396  * \details Executes a Unprivileged LDRT instruction for 32 bit values.
1397  * \param [in]    ptr  Pointer to data
1398  * \return        value of type uint32_t at (*ptr)
1399  */
__LDRT(volatile uint32_t * ptr)1400     __STATIC_FORCEINLINE uint32_t __LDRT( volatile uint32_t * ptr )
1401     {
1402         uint32_t result;
1403 
1404         __ASM volatile ( "ldrt %0, %1" : "=r" ( result ) : "Q" ( *ptr ) );
1405 
1406         return( result );
1407     }
1408 
1409 
1410 /**
1411  * \brief   STRT Unprivileged (8 bit)
1412  * \details Executes a Unprivileged STRT instruction for 8 bit values.
1413  * \param [in]  value  Value to store
1414  * \param [in]    ptr  Pointer to location
1415  */
__STRBT(uint8_t value,volatile uint8_t * ptr)1416     __STATIC_FORCEINLINE void __STRBT( uint8_t value,
1417                                        volatile uint8_t * ptr )
1418     {
1419         __ASM volatile ( "strbt %1, %0" : "=Q" ( *ptr ) : "r" ( ( uint32_t ) value ) );
1420     }
1421 
1422 
1423 /**
1424  * \brief   STRT Unprivileged (16 bit)
1425  * \details Executes a Unprivileged STRT instruction for 16 bit values.
1426  * \param [in]  value  Value to store
1427  * \param [in]    ptr  Pointer to location
1428  */
__STRHT(uint16_t value,volatile uint16_t * ptr)1429     __STATIC_FORCEINLINE void __STRHT( uint16_t value,
1430                                        volatile uint16_t * ptr )
1431     {
1432         __ASM volatile ( "strht %1, %0" : "=Q" ( *ptr ) : "r" ( ( uint32_t ) value ) );
1433     }
1434 
1435 
1436 /**
1437  * \brief   STRT Unprivileged (32 bit)
1438  * \details Executes a Unprivileged STRT instruction for 32 bit values.
1439  * \param [in]  value  Value to store
1440  * \param [in]    ptr  Pointer to location
1441  */
__STRT(uint32_t value,volatile uint32_t * ptr)1442     __STATIC_FORCEINLINE void __STRT( uint32_t value,
1443                                       volatile uint32_t * ptr )
1444     {
1445         __ASM volatile ( "strt %1, %0" : "=Q" ( *ptr ) : "r" ( value ) );
1446     }
1447 
1448 #else /* ((defined (__ARM_ARCH_7M__      ) && (__ARM_ARCH_7M__      == 1)) || \
1449        *  (defined (__ARM_ARCH_7EM__     ) && (__ARM_ARCH_7EM__     == 1)) || \
1450        *  (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))    ) */
1451 
1452 /**
1453  * \brief   Signed Saturate
1454  * \details Saturates a signed value.
1455  * \param [in]  value  Value to be saturated
1456  * \param [in]    sat  Bit position to saturate to (1..32)
1457  * \return             Saturated value
1458  */
__SSAT(int32_t val,uint32_t sat)1459     __STATIC_FORCEINLINE int32_t __SSAT( int32_t val,
1460                                          uint32_t sat )
1461     {
1462         if( ( sat >= 1U ) && ( sat <= 32U ) )
1463         {
1464             const int32_t max = ( int32_t ) ( ( 1U << ( sat - 1U ) ) - 1U );
1465             const int32_t min = -1 - max;
1466 
1467             if( val > max )
1468             {
1469                 return max;
1470             }
1471             else if( val < min )
1472             {
1473                 return min;
1474             }
1475         }
1476 
1477         return val;
1478     }
1479 
1480 /**
1481  * \brief   Unsigned Saturate
1482  * \details Saturates an unsigned value.
1483  * \param [in]  value  Value to be saturated
1484  * \param [in]    sat  Bit position to saturate to (0..31)
1485  * \return             Saturated value
1486  */
__USAT(int32_t val,uint32_t sat)1487     __STATIC_FORCEINLINE uint32_t __USAT( int32_t val,
1488                                           uint32_t sat )
1489     {
1490         if( sat <= 31U )
1491         {
1492             const uint32_t max = ( ( 1U << sat ) - 1U );
1493 
1494             if( val > ( int32_t ) max )
1495             {
1496                 return max;
1497             }
1498             else if( val < 0 )
1499             {
1500                 return 0U;
1501             }
1502         }
1503 
1504         return ( uint32_t ) val;
1505     }
1506 
1507 #endif /* ((defined (__ARM_ARCH_7M__      ) && (__ARM_ARCH_7M__      == 1)) || \
1508         *  (defined (__ARM_ARCH_7EM__     ) && (__ARM_ARCH_7EM__     == 1)) || \
1509         *  (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))    ) */
1510 
1511 
1512 #if ( ( defined( __ARM_ARCH_8M_MAIN__ ) && ( __ARM_ARCH_8M_MAIN__ == 1 ) ) || \
1513     ( defined( __ARM_ARCH_8M_BASE__ ) && ( __ARM_ARCH_8M_BASE__ == 1 ) ) )
1514 
1515 /**
1516  * \brief   Load-Acquire (8 bit)
1517  * \details Executes a LDAB instruction for 8 bit value.
1518  * \param [in]    ptr  Pointer to data
1519  * \return             value of type uint8_t at (*ptr)
1520  */
__LDAB(volatile uint8_t * ptr)1521     __STATIC_FORCEINLINE uint8_t __LDAB( volatile uint8_t * ptr )
1522     {
1523         uint32_t result;
1524 
1525         __ASM volatile ( "ldab %0, %1" : "=r" ( result ) : "Q" ( *ptr ) : "memory" );
1526 
1527         return( ( uint8_t ) result );
1528     }
1529 
1530 
1531 /**
1532  * \brief   Load-Acquire (16 bit)
1533  * \details Executes a LDAH instruction for 16 bit values.
1534  * \param [in]    ptr  Pointer to data
1535  * \return        value of type uint16_t at (*ptr)
1536  */
__LDAH(volatile uint16_t * ptr)1537     __STATIC_FORCEINLINE uint16_t __LDAH( volatile uint16_t * ptr )
1538     {
1539         uint32_t result;
1540 
1541         __ASM volatile ( "ldah %0, %1" : "=r" ( result ) : "Q" ( *ptr ) : "memory" );
1542 
1543         return( ( uint16_t ) result );
1544     }
1545 
1546 
1547 /**
1548  * \brief   Load-Acquire (32 bit)
1549  * \details Executes a LDA instruction for 32 bit values.
1550  * \param [in]    ptr  Pointer to data
1551  * \return        value of type uint32_t at (*ptr)
1552  */
__LDA(volatile uint32_t * ptr)1553     __STATIC_FORCEINLINE uint32_t __LDA( volatile uint32_t * ptr )
1554     {
1555         uint32_t result;
1556 
1557         __ASM volatile ( "lda %0, %1" : "=r" ( result ) : "Q" ( *ptr ) : "memory" );
1558 
1559         return( result );
1560     }
1561 
1562 
1563 /**
1564  * \brief   Store-Release (8 bit)
1565  * \details Executes a STLB instruction for 8 bit values.
1566  * \param [in]  value  Value to store
1567  * \param [in]    ptr  Pointer to location
1568  */
__STLB(uint8_t value,volatile uint8_t * ptr)1569     __STATIC_FORCEINLINE void __STLB( uint8_t value,
1570                                       volatile uint8_t * ptr )
1571     {
1572         __ASM volatile ( "stlb %1, %0" : "=Q" ( *ptr ) : "r" ( ( uint32_t ) value ) : "memory" );
1573     }
1574 
1575 
1576 /**
1577  * \brief   Store-Release (16 bit)
1578  * \details Executes a STLH instruction for 16 bit values.
1579  * \param [in]  value  Value to store
1580  * \param [in]    ptr  Pointer to location
1581  */
__STLH(uint16_t value,volatile uint16_t * ptr)1582     __STATIC_FORCEINLINE void __STLH( uint16_t value,
1583                                       volatile uint16_t * ptr )
1584     {
1585         __ASM volatile ( "stlh %1, %0" : "=Q" ( *ptr ) : "r" ( ( uint32_t ) value ) : "memory" );
1586     }
1587 
1588 
1589 /**
1590  * \brief   Store-Release (32 bit)
1591  * \details Executes a STL instruction for 32 bit values.
1592  * \param [in]  value  Value to store
1593  * \param [in]    ptr  Pointer to location
1594  */
__STL(uint32_t value,volatile uint32_t * ptr)1595     __STATIC_FORCEINLINE void __STL( uint32_t value,
1596                                      volatile uint32_t * ptr )
1597     {
1598         __ASM volatile ( "stl %1, %0" : "=Q" ( *ptr ) : "r" ( ( uint32_t ) value ) : "memory" );
1599     }
1600 
1601 
1602 /**
1603  * \brief   Load-Acquire Exclusive (8 bit)
1604  * \details Executes a LDAB exclusive instruction for 8 bit value.
1605  * \param [in]    ptr  Pointer to data
1606  * \return             value of type uint8_t at (*ptr)
1607  */
__LDAEXB(volatile uint8_t * ptr)1608     __STATIC_FORCEINLINE uint8_t __LDAEXB( volatile uint8_t * ptr )
1609     {
1610         uint32_t result;
1611 
1612         __ASM volatile ( "ldaexb %0, %1" : "=r" ( result ) : "Q" ( *ptr ) : "memory" );
1613 
1614         return( ( uint8_t ) result );
1615     }
1616 
1617 
1618 /**
1619  * \brief   Load-Acquire Exclusive (16 bit)
1620  * \details Executes a LDAH exclusive instruction for 16 bit values.
1621  * \param [in]    ptr  Pointer to data
1622  * \return        value of type uint16_t at (*ptr)
1623  */
__LDAEXH(volatile uint16_t * ptr)1624     __STATIC_FORCEINLINE uint16_t __LDAEXH( volatile uint16_t * ptr )
1625     {
1626         uint32_t result;
1627 
1628         __ASM volatile ( "ldaexh %0, %1" : "=r" ( result ) : "Q" ( *ptr ) : "memory" );
1629 
1630         return( ( uint16_t ) result );
1631     }
1632 
1633 
1634 /**
1635  * \brief   Load-Acquire Exclusive (32 bit)
1636  * \details Executes a LDA exclusive instruction for 32 bit values.
1637  * \param [in]    ptr  Pointer to data
1638  * \return        value of type uint32_t at (*ptr)
1639  */
__LDAEX(volatile uint32_t * ptr)1640     __STATIC_FORCEINLINE uint32_t __LDAEX( volatile uint32_t * ptr )
1641     {
1642         uint32_t result;
1643 
1644         __ASM volatile ( "ldaex %0, %1" : "=r" ( result ) : "Q" ( *ptr ) : "memory" );
1645 
1646         return( result );
1647     }
1648 
1649 
1650 /**
1651  * \brief   Store-Release Exclusive (8 bit)
1652  * \details Executes a STLB exclusive instruction for 8 bit values.
1653  * \param [in]  value  Value to store
1654  * \param [in]    ptr  Pointer to location
1655  * \return          0  Function succeeded
1656  * \return          1  Function failed
1657  */
__STLEXB(uint8_t value,volatile uint8_t * ptr)1658     __STATIC_FORCEINLINE uint32_t __STLEXB( uint8_t value,
1659                                             volatile uint8_t * ptr )
1660     {
1661         uint32_t result;
1662 
1663         __ASM volatile ( "stlexb %0, %2, %1" : "=&r" ( result ), "=Q" ( *ptr ) : "r" ( ( uint32_t ) value ) : "memory" );
1664 
1665         return( result );
1666     }
1667 
1668 
1669 /**
1670  * \brief   Store-Release Exclusive (16 bit)
1671  * \details Executes a STLH exclusive instruction for 16 bit values.
1672  * \param [in]  value  Value to store
1673  * \param [in]    ptr  Pointer to location
1674  * \return          0  Function succeeded
1675  * \return          1  Function failed
1676  */
__STLEXH(uint16_t value,volatile uint16_t * ptr)1677     __STATIC_FORCEINLINE uint32_t __STLEXH( uint16_t value,
1678                                             volatile uint16_t * ptr )
1679     {
1680         uint32_t result;
1681 
1682         __ASM volatile ( "stlexh %0, %2, %1" : "=&r" ( result ), "=Q" ( *ptr ) : "r" ( ( uint32_t ) value ) : "memory" );
1683 
1684         return( result );
1685     }
1686 
1687 
1688 /**
1689  * \brief   Store-Release Exclusive (32 bit)
1690  * \details Executes a STL exclusive instruction for 32 bit values.
1691  * \param [in]  value  Value to store
1692  * \param [in]    ptr  Pointer to location
1693  * \return          0  Function succeeded
1694  * \return          1  Function failed
1695  */
__STLEX(uint32_t value,volatile uint32_t * ptr)1696     __STATIC_FORCEINLINE uint32_t __STLEX( uint32_t value,
1697                                            volatile uint32_t * ptr )
1698     {
1699         uint32_t result;
1700 
1701         __ASM volatile ( "stlex %0, %2, %1" : "=&r" ( result ), "=Q" ( *ptr ) : "r" ( ( uint32_t ) value ) : "memory" );
1702 
1703         return( result );
1704     }
1705 
1706 #endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
1707         *  (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1))    ) */
1708 
1709 /*@}*/ /* end of group CMSIS_Core_InstructionInterface */
1710 
1711 
1712 /* ###################  Compiler specific Intrinsics  ########################### */
1713 
1714 /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
1715  * Access to dedicated SIMD instructions
1716  * @{
1717  */
1718 
1719 #if ( defined( __ARM_FEATURE_DSP ) && ( __ARM_FEATURE_DSP == 1 ) )
1720 
__SADD8(uint32_t op1,uint32_t op2)1721     __STATIC_FORCEINLINE uint32_t __SADD8( uint32_t op1,
1722                                            uint32_t op2 )
1723     {
1724         uint32_t result;
1725 
1726         __ASM volatile ( "sadd8 %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
1727 
1728         return( result );
1729     }
1730 
__QADD8(uint32_t op1,uint32_t op2)1731     __STATIC_FORCEINLINE uint32_t __QADD8( uint32_t op1,
1732                                            uint32_t op2 )
1733     {
1734         uint32_t result;
1735 
1736         __ASM( "qadd8 %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
1737         return( result );
1738     }
1739 
__SHADD8(uint32_t op1,uint32_t op2)1740     __STATIC_FORCEINLINE uint32_t __SHADD8( uint32_t op1,
1741                                             uint32_t op2 )
1742     {
1743         uint32_t result;
1744 
1745         __ASM( "shadd8 %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
1746         return( result );
1747     }
1748 
__UADD8(uint32_t op1,uint32_t op2)1749     __STATIC_FORCEINLINE uint32_t __UADD8( uint32_t op1,
1750                                            uint32_t op2 )
1751     {
1752         uint32_t result;
1753 
1754         __ASM volatile ( "uadd8 %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
1755 
1756         return( result );
1757     }
1758 
__UQADD8(uint32_t op1,uint32_t op2)1759     __STATIC_FORCEINLINE uint32_t __UQADD8( uint32_t op1,
1760                                             uint32_t op2 )
1761     {
1762         uint32_t result;
1763 
1764         __ASM( "uqadd8 %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
1765         return( result );
1766     }
1767 
__UHADD8(uint32_t op1,uint32_t op2)1768     __STATIC_FORCEINLINE uint32_t __UHADD8( uint32_t op1,
1769                                             uint32_t op2 )
1770     {
1771         uint32_t result;
1772 
1773         __ASM( "uhadd8 %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
1774         return( result );
1775     }
1776 
1777 
__SSUB8(uint32_t op1,uint32_t op2)1778     __STATIC_FORCEINLINE uint32_t __SSUB8( uint32_t op1,
1779                                            uint32_t op2 )
1780     {
1781         uint32_t result;
1782 
1783         __ASM volatile ( "ssub8 %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
1784 
1785         return( result );
1786     }
1787 
__QSUB8(uint32_t op1,uint32_t op2)1788     __STATIC_FORCEINLINE uint32_t __QSUB8( uint32_t op1,
1789                                            uint32_t op2 )
1790     {
1791         uint32_t result;
1792 
1793         __ASM( "qsub8 %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
1794         return( result );
1795     }
1796 
__SHSUB8(uint32_t op1,uint32_t op2)1797     __STATIC_FORCEINLINE uint32_t __SHSUB8( uint32_t op1,
1798                                             uint32_t op2 )
1799     {
1800         uint32_t result;
1801 
1802         __ASM( "shsub8 %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
1803         return( result );
1804     }
1805 
__USUB8(uint32_t op1,uint32_t op2)1806     __STATIC_FORCEINLINE uint32_t __USUB8( uint32_t op1,
1807                                            uint32_t op2 )
1808     {
1809         uint32_t result;
1810 
1811         __ASM volatile ( "usub8 %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
1812 
1813         return( result );
1814     }
1815 
__UQSUB8(uint32_t op1,uint32_t op2)1816     __STATIC_FORCEINLINE uint32_t __UQSUB8( uint32_t op1,
1817                                             uint32_t op2 )
1818     {
1819         uint32_t result;
1820 
1821         __ASM( "uqsub8 %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
1822         return( result );
1823     }
1824 
__UHSUB8(uint32_t op1,uint32_t op2)1825     __STATIC_FORCEINLINE uint32_t __UHSUB8( uint32_t op1,
1826                                             uint32_t op2 )
1827     {
1828         uint32_t result;
1829 
1830         __ASM( "uhsub8 %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
1831         return( result );
1832     }
1833 
1834 
__SADD16(uint32_t op1,uint32_t op2)1835     __STATIC_FORCEINLINE uint32_t __SADD16( uint32_t op1,
1836                                             uint32_t op2 )
1837     {
1838         uint32_t result;
1839 
1840         __ASM volatile ( "sadd16 %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
1841 
1842         return( result );
1843     }
1844 
__QADD16(uint32_t op1,uint32_t op2)1845     __STATIC_FORCEINLINE uint32_t __QADD16( uint32_t op1,
1846                                             uint32_t op2 )
1847     {
1848         uint32_t result;
1849 
1850         __ASM( "qadd16 %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
1851         return( result );
1852     }
1853 
__SHADD16(uint32_t op1,uint32_t op2)1854     __STATIC_FORCEINLINE uint32_t __SHADD16( uint32_t op1,
1855                                              uint32_t op2 )
1856     {
1857         uint32_t result;
1858 
1859         __ASM( "shadd16 %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
1860         return( result );
1861     }
1862 
__UADD16(uint32_t op1,uint32_t op2)1863     __STATIC_FORCEINLINE uint32_t __UADD16( uint32_t op1,
1864                                             uint32_t op2 )
1865     {
1866         uint32_t result;
1867 
1868         __ASM volatile ( "uadd16 %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
1869 
1870         return( result );
1871     }
1872 
__UQADD16(uint32_t op1,uint32_t op2)1873     __STATIC_FORCEINLINE uint32_t __UQADD16( uint32_t op1,
1874                                              uint32_t op2 )
1875     {
1876         uint32_t result;
1877 
1878         __ASM( "uqadd16 %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
1879         return( result );
1880     }
1881 
__UHADD16(uint32_t op1,uint32_t op2)1882     __STATIC_FORCEINLINE uint32_t __UHADD16( uint32_t op1,
1883                                              uint32_t op2 )
1884     {
1885         uint32_t result;
1886 
1887         __ASM( "uhadd16 %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
1888         return( result );
1889     }
1890 
__SSUB16(uint32_t op1,uint32_t op2)1891     __STATIC_FORCEINLINE uint32_t __SSUB16( uint32_t op1,
1892                                             uint32_t op2 )
1893     {
1894         uint32_t result;
1895 
1896         __ASM volatile ( "ssub16 %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
1897 
1898         return( result );
1899     }
1900 
__QSUB16(uint32_t op1,uint32_t op2)1901     __STATIC_FORCEINLINE uint32_t __QSUB16( uint32_t op1,
1902                                             uint32_t op2 )
1903     {
1904         uint32_t result;
1905 
1906         __ASM( "qsub16 %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
1907         return( result );
1908     }
1909 
__SHSUB16(uint32_t op1,uint32_t op2)1910     __STATIC_FORCEINLINE uint32_t __SHSUB16( uint32_t op1,
1911                                              uint32_t op2 )
1912     {
1913         uint32_t result;
1914 
1915         __ASM( "shsub16 %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
1916         return( result );
1917     }
1918 
__USUB16(uint32_t op1,uint32_t op2)1919     __STATIC_FORCEINLINE uint32_t __USUB16( uint32_t op1,
1920                                             uint32_t op2 )
1921     {
1922         uint32_t result;
1923 
1924         __ASM volatile ( "usub16 %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
1925 
1926         return( result );
1927     }
1928 
__UQSUB16(uint32_t op1,uint32_t op2)1929     __STATIC_FORCEINLINE uint32_t __UQSUB16( uint32_t op1,
1930                                              uint32_t op2 )
1931     {
1932         uint32_t result;
1933 
1934         __ASM( "uqsub16 %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
1935         return( result );
1936     }
1937 
__UHSUB16(uint32_t op1,uint32_t op2)1938     __STATIC_FORCEINLINE uint32_t __UHSUB16( uint32_t op1,
1939                                              uint32_t op2 )
1940     {
1941         uint32_t result;
1942 
1943         __ASM( "uhsub16 %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
1944         return( result );
1945     }
1946 
__SASX(uint32_t op1,uint32_t op2)1947     __STATIC_FORCEINLINE uint32_t __SASX( uint32_t op1,
1948                                           uint32_t op2 )
1949     {
1950         uint32_t result;
1951 
1952         __ASM volatile ( "sasx %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
1953 
1954         return( result );
1955     }
1956 
__QASX(uint32_t op1,uint32_t op2)1957     __STATIC_FORCEINLINE uint32_t __QASX( uint32_t op1,
1958                                           uint32_t op2 )
1959     {
1960         uint32_t result;
1961 
1962         __ASM( "qasx %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
1963         return( result );
1964     }
1965 
__SHASX(uint32_t op1,uint32_t op2)1966     __STATIC_FORCEINLINE uint32_t __SHASX( uint32_t op1,
1967                                            uint32_t op2 )
1968     {
1969         uint32_t result;
1970 
1971         __ASM( "shasx %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
1972         return( result );
1973     }
1974 
__UASX(uint32_t op1,uint32_t op2)1975     __STATIC_FORCEINLINE uint32_t __UASX( uint32_t op1,
1976                                           uint32_t op2 )
1977     {
1978         uint32_t result;
1979 
1980         __ASM volatile ( "uasx %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
1981 
1982         return( result );
1983     }
1984 
__UQASX(uint32_t op1,uint32_t op2)1985     __STATIC_FORCEINLINE uint32_t __UQASX( uint32_t op1,
1986                                            uint32_t op2 )
1987     {
1988         uint32_t result;
1989 
1990         __ASM( "uqasx %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
1991         return( result );
1992     }
1993 
__UHASX(uint32_t op1,uint32_t op2)1994     __STATIC_FORCEINLINE uint32_t __UHASX( uint32_t op1,
1995                                            uint32_t op2 )
1996     {
1997         uint32_t result;
1998 
1999         __ASM( "uhasx %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
2000         return( result );
2001     }
2002 
__SSAX(uint32_t op1,uint32_t op2)2003     __STATIC_FORCEINLINE uint32_t __SSAX( uint32_t op1,
2004                                           uint32_t op2 )
2005     {
2006         uint32_t result;
2007 
2008         __ASM volatile ( "ssax %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
2009 
2010         return( result );
2011     }
2012 
__QSAX(uint32_t op1,uint32_t op2)2013     __STATIC_FORCEINLINE uint32_t __QSAX( uint32_t op1,
2014                                           uint32_t op2 )
2015     {
2016         uint32_t result;
2017 
2018         __ASM( "qsax %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
2019         return( result );
2020     }
2021 
__SHSAX(uint32_t op1,uint32_t op2)2022     __STATIC_FORCEINLINE uint32_t __SHSAX( uint32_t op1,
2023                                            uint32_t op2 )
2024     {
2025         uint32_t result;
2026 
2027         __ASM( "shsax %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
2028         return( result );
2029     }
2030 
__USAX(uint32_t op1,uint32_t op2)2031     __STATIC_FORCEINLINE uint32_t __USAX( uint32_t op1,
2032                                           uint32_t op2 )
2033     {
2034         uint32_t result;
2035 
2036         __ASM volatile ( "usax %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
2037 
2038         return( result );
2039     }
2040 
__UQSAX(uint32_t op1,uint32_t op2)2041     __STATIC_FORCEINLINE uint32_t __UQSAX( uint32_t op1,
2042                                            uint32_t op2 )
2043     {
2044         uint32_t result;
2045 
2046         __ASM( "uqsax %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
2047         return( result );
2048     }
2049 
__UHSAX(uint32_t op1,uint32_t op2)2050     __STATIC_FORCEINLINE uint32_t __UHSAX( uint32_t op1,
2051                                            uint32_t op2 )
2052     {
2053         uint32_t result;
2054 
2055         __ASM( "uhsax %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
2056         return( result );
2057     }
2058 
__USAD8(uint32_t op1,uint32_t op2)2059     __STATIC_FORCEINLINE uint32_t __USAD8( uint32_t op1,
2060                                            uint32_t op2 )
2061     {
2062         uint32_t result;
2063 
2064         __ASM( "usad8 %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
2065         return( result );
2066     }
2067 
__USADA8(uint32_t op1,uint32_t op2,uint32_t op3)2068     __STATIC_FORCEINLINE uint32_t __USADA8( uint32_t op1,
2069                                             uint32_t op2,
2070                                             uint32_t op3 )
2071     {
2072         uint32_t result;
2073 
2074         __ASM( "usada8 %0, %1, %2, %3" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ), "r" ( op3 ) );
2075         return( result );
2076     }
2077 
2078     #define __SSAT16( ARG1, ARG2 )                                                                      \
2079     ( {                                                                                                 \
2080         int32_t __RES, __ARG1 = ( ARG1 );                                                               \
2081         __ASM volatile ( "ssat16 %0, %1, %2" : "=r" ( __RES ) :  "I" ( ARG2 ), "r" ( __ARG1 ) : "cc" ); \
2082         __RES;                                                                                          \
2083     } )
2084 
2085     #define __USAT16( ARG1, ARG2 )                                                                      \
2086     ( {                                                                                                 \
2087         uint32_t __RES, __ARG1 = ( ARG1 );                                                              \
2088         __ASM volatile ( "usat16 %0, %1, %2" : "=r" ( __RES ) :  "I" ( ARG2 ), "r" ( __ARG1 ) : "cc" ); \
2089         __RES;                                                                                          \
2090     } )
2091 
__UXTB16(uint32_t op1)2092     __STATIC_FORCEINLINE uint32_t __UXTB16( uint32_t op1 )
2093     {
2094         uint32_t result;
2095 
2096         __ASM( "uxtb16 %0, %1" : "=r" ( result ) : "r" ( op1 ) );
2097         return( result );
2098     }
2099 
__UXTAB16(uint32_t op1,uint32_t op2)2100     __STATIC_FORCEINLINE uint32_t __UXTAB16( uint32_t op1,
2101                                              uint32_t op2 )
2102     {
2103         uint32_t result;
2104 
2105         __ASM( "uxtab16 %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
2106         return( result );
2107     }
2108 
__SXTB16(uint32_t op1)2109     __STATIC_FORCEINLINE uint32_t __SXTB16( uint32_t op1 )
2110     {
2111         uint32_t result;
2112 
2113         __ASM( "sxtb16 %0, %1" : "=r" ( result ) : "r" ( op1 ) );
2114         return( result );
2115     }
2116 
__SXTB16_RORn(uint32_t op1,uint32_t rotate)2117     __STATIC_FORCEINLINE uint32_t __SXTB16_RORn( uint32_t op1,
2118                                                  uint32_t rotate )
2119     {
2120         uint32_t result;
2121 
2122         if( __builtin_constant_p( rotate ) && ( ( rotate == 8U ) || ( rotate == 16U ) || ( rotate == 24U ) ) )
2123         {
2124             __ASM volatile ( "sxtb16 %0, %1, ROR %2" : "=r" ( result ) : "r" ( op1 ), "i" ( rotate ) );
2125         }
2126         else
2127         {
2128             result = __SXTB16( __ROR( op1, rotate ) );
2129         }
2130 
2131         return result;
2132     }
2133 
__SXTAB16(uint32_t op1,uint32_t op2)2134     __STATIC_FORCEINLINE uint32_t __SXTAB16( uint32_t op1,
2135                                              uint32_t op2 )
2136     {
2137         uint32_t result;
2138 
2139         __ASM( "sxtab16 %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
2140         return( result );
2141     }
2142 
__SMUAD(uint32_t op1,uint32_t op2)2143     __STATIC_FORCEINLINE uint32_t __SMUAD( uint32_t op1,
2144                                            uint32_t op2 )
2145     {
2146         uint32_t result;
2147 
2148         __ASM volatile ( "smuad %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
2149 
2150         return( result );
2151     }
2152 
__SMUADX(uint32_t op1,uint32_t op2)2153     __STATIC_FORCEINLINE uint32_t __SMUADX( uint32_t op1,
2154                                             uint32_t op2 )
2155     {
2156         uint32_t result;
2157 
2158         __ASM volatile ( "smuadx %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
2159 
2160         return( result );
2161     }
2162 
__SMLAD(uint32_t op1,uint32_t op2,uint32_t op3)2163     __STATIC_FORCEINLINE uint32_t __SMLAD( uint32_t op1,
2164                                            uint32_t op2,
2165                                            uint32_t op3 )
2166     {
2167         uint32_t result;
2168 
2169         __ASM volatile ( "smlad %0, %1, %2, %3" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ), "r" ( op3 ) );
2170 
2171         return( result );
2172     }
2173 
__SMLADX(uint32_t op1,uint32_t op2,uint32_t op3)2174     __STATIC_FORCEINLINE uint32_t __SMLADX( uint32_t op1,
2175                                             uint32_t op2,
2176                                             uint32_t op3 )
2177     {
2178         uint32_t result;
2179 
2180         __ASM volatile ( "smladx %0, %1, %2, %3" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ), "r" ( op3 ) );
2181 
2182         return( result );
2183     }
2184 
__SMLALD(uint32_t op1,uint32_t op2,uint64_t acc)2185     __STATIC_FORCEINLINE uint64_t __SMLALD( uint32_t op1,
2186                                             uint32_t op2,
2187                                             uint64_t acc )
2188     {
2189         union llreg_u
2190         {
2191             uint32_t w32[ 2 ];
2192             uint64_t w64;
2193         }
2194         llr;
2195 
2196         llr.w64 = acc;
2197 
2198         #ifndef __ARMEB__ /* Little endian */
2199             __ASM volatile ( "smlald %0, %1, %2, %3" : "=r" ( llr.w32[ 0 ] ), "=r" ( llr.w32[ 1 ] ) : "r" ( op1 ), "r" ( op2 ), "0" ( llr.w32[ 0 ] ), "1" ( llr.w32[ 1 ] ) );
2200         #else /* Big endian */
2201             __ASM volatile ( "smlald %0, %1, %2, %3" : "=r" ( llr.w32[ 1 ] ), "=r" ( llr.w32[ 0 ] ) : "r" ( op1 ), "r" ( op2 ), "0" ( llr.w32[ 1 ] ), "1" ( llr.w32[ 0 ] ) );
2202         #endif
2203 
2204         return( llr.w64 );
2205     }
2206 
__SMLALDX(uint32_t op1,uint32_t op2,uint64_t acc)2207     __STATIC_FORCEINLINE uint64_t __SMLALDX( uint32_t op1,
2208                                              uint32_t op2,
2209                                              uint64_t acc )
2210     {
2211         union llreg_u
2212         {
2213             uint32_t w32[ 2 ];
2214             uint64_t w64;
2215         }
2216         llr;
2217 
2218         llr.w64 = acc;
2219 
2220         #ifndef __ARMEB__ /* Little endian */
2221             __ASM volatile ( "smlaldx %0, %1, %2, %3" : "=r" ( llr.w32[ 0 ] ), "=r" ( llr.w32[ 1 ] ) : "r" ( op1 ), "r" ( op2 ), "0" ( llr.w32[ 0 ] ), "1" ( llr.w32[ 1 ] ) );
2222         #else /* Big endian */
2223             __ASM volatile ( "smlaldx %0, %1, %2, %3" : "=r" ( llr.w32[ 1 ] ), "=r" ( llr.w32[ 0 ] ) : "r" ( op1 ), "r" ( op2 ), "0" ( llr.w32[ 1 ] ), "1" ( llr.w32[ 0 ] ) );
2224         #endif
2225 
2226         return( llr.w64 );
2227     }
2228 
__SMUSD(uint32_t op1,uint32_t op2)2229     __STATIC_FORCEINLINE uint32_t __SMUSD( uint32_t op1,
2230                                            uint32_t op2 )
2231     {
2232         uint32_t result;
2233 
2234         __ASM volatile ( "smusd %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
2235 
2236         return( result );
2237     }
2238 
__SMUSDX(uint32_t op1,uint32_t op2)2239     __STATIC_FORCEINLINE uint32_t __SMUSDX( uint32_t op1,
2240                                             uint32_t op2 )
2241     {
2242         uint32_t result;
2243 
2244         __ASM volatile ( "smusdx %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
2245 
2246         return( result );
2247     }
2248 
__SMLSD(uint32_t op1,uint32_t op2,uint32_t op3)2249     __STATIC_FORCEINLINE uint32_t __SMLSD( uint32_t op1,
2250                                            uint32_t op2,
2251                                            uint32_t op3 )
2252     {
2253         uint32_t result;
2254 
2255         __ASM volatile ( "smlsd %0, %1, %2, %3" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ), "r" ( op3 ) );
2256 
2257         return( result );
2258     }
2259 
__SMLSDX(uint32_t op1,uint32_t op2,uint32_t op3)2260     __STATIC_FORCEINLINE uint32_t __SMLSDX( uint32_t op1,
2261                                             uint32_t op2,
2262                                             uint32_t op3 )
2263     {
2264         uint32_t result;
2265 
2266         __ASM volatile ( "smlsdx %0, %1, %2, %3" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ), "r" ( op3 ) );
2267 
2268         return( result );
2269     }
2270 
__SMLSLD(uint32_t op1,uint32_t op2,uint64_t acc)2271     __STATIC_FORCEINLINE uint64_t __SMLSLD( uint32_t op1,
2272                                             uint32_t op2,
2273                                             uint64_t acc )
2274     {
2275         union llreg_u
2276         {
2277             uint32_t w32[ 2 ];
2278             uint64_t w64;
2279         }
2280         llr;
2281 
2282         llr.w64 = acc;
2283 
2284         #ifndef __ARMEB__ /* Little endian */
2285             __ASM volatile ( "smlsld %0, %1, %2, %3" : "=r" ( llr.w32[ 0 ] ), "=r" ( llr.w32[ 1 ] ) : "r" ( op1 ), "r" ( op2 ), "0" ( llr.w32[ 0 ] ), "1" ( llr.w32[ 1 ] ) );
2286         #else /* Big endian */
2287             __ASM volatile ( "smlsld %0, %1, %2, %3" : "=r" ( llr.w32[ 1 ] ), "=r" ( llr.w32[ 0 ] ) : "r" ( op1 ), "r" ( op2 ), "0" ( llr.w32[ 1 ] ), "1" ( llr.w32[ 0 ] ) );
2288         #endif
2289 
2290         return( llr.w64 );
2291     }
2292 
__SMLSLDX(uint32_t op1,uint32_t op2,uint64_t acc)2293     __STATIC_FORCEINLINE uint64_t __SMLSLDX( uint32_t op1,
2294                                              uint32_t op2,
2295                                              uint64_t acc )
2296     {
2297         union llreg_u
2298         {
2299             uint32_t w32[ 2 ];
2300             uint64_t w64;
2301         }
2302         llr;
2303 
2304         llr.w64 = acc;
2305 
2306         #ifndef __ARMEB__ /* Little endian */
2307             __ASM volatile ( "smlsldx %0, %1, %2, %3" : "=r" ( llr.w32[ 0 ] ), "=r" ( llr.w32[ 1 ] ) : "r" ( op1 ), "r" ( op2 ), "0" ( llr.w32[ 0 ] ), "1" ( llr.w32[ 1 ] ) );
2308         #else /* Big endian */
2309             __ASM volatile ( "smlsldx %0, %1, %2, %3" : "=r" ( llr.w32[ 1 ] ), "=r" ( llr.w32[ 0 ] ) : "r" ( op1 ), "r" ( op2 ), "0" ( llr.w32[ 1 ] ), "1" ( llr.w32[ 0 ] ) );
2310         #endif
2311 
2312         return( llr.w64 );
2313     }
2314 
__SEL(uint32_t op1,uint32_t op2)2315     __STATIC_FORCEINLINE uint32_t __SEL( uint32_t op1,
2316                                          uint32_t op2 )
2317     {
2318         uint32_t result;
2319 
2320         __ASM volatile ( "sel %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
2321 
2322         return( result );
2323     }
2324 
__QADD(int32_t op1,int32_t op2)2325     __STATIC_FORCEINLINE int32_t __QADD( int32_t op1,
2326                                          int32_t op2 )
2327     {
2328         int32_t result;
2329 
2330         __ASM volatile ( "qadd %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
2331 
2332         return( result );
2333     }
2334 
__QSUB(int32_t op1,int32_t op2)2335     __STATIC_FORCEINLINE int32_t __QSUB( int32_t op1,
2336                                          int32_t op2 )
2337     {
2338         int32_t result;
2339 
2340         __ASM volatile ( "qsub %0, %1, %2" : "=r" ( result ) : "r" ( op1 ), "r" ( op2 ) );
2341 
2342         return( result );
2343     }
2344 
2345 
2346     #define __PKHBT( ARG1, ARG2, ARG3 )                                                                       \
2347     ( {                                                                                                       \
2348         uint32_t __RES, __ARG1 = ( ARG1 ), __ARG2 = ( ARG2 );                                                 \
2349         __ASM( "pkhbt %0, %1, %2, lsl %3" : "=r" ( __RES ) :  "r" ( __ARG1 ), "r" ( __ARG2 ), "I" ( ARG3 ) ); \
2350         __RES;                                                                                                \
2351     } )
2352 
2353     #define __PKHTB( ARG1, ARG2, ARG3 )                                                                       \
2354     ( {                                                                                                       \
2355         uint32_t __RES, __ARG1 = ( ARG1 ), __ARG2 = ( ARG2 );                                                 \
2356         if( ARG3 == 0 )                                                                                       \
2357         __ASM( "pkhtb %0, %1, %2" : "=r" ( __RES ) :  "r" ( __ARG1 ), "r" ( __ARG2 ) );                       \
2358         else                                                                                                  \
2359         __ASM( "pkhtb %0, %1, %2, asr %3" : "=r" ( __RES ) :  "r" ( __ARG1 ), "r" ( __ARG2 ), "I" ( ARG3 ) ); \
2360         __RES;                                                                                                \
2361     } )
2362 
2363 
__SMMLA(int32_t op1,int32_t op2,int32_t op3)2364     __STATIC_FORCEINLINE int32_t __SMMLA( int32_t op1,
2365                                           int32_t op2,
2366                                           int32_t op3 )
2367     {
2368         int32_t result;
2369 
2370         __ASM( "smmla %0, %1, %2, %3" : "=r" ( result ) : "r"  ( op1 ), "r" ( op2 ), "r" ( op3 ) );
2371         return( result );
2372     }
2373 
2374 #endif /* (__ARM_FEATURE_DSP == 1) */
2375 /*@} end of group CMSIS_SIMD_intrinsics */
2376 
2377 
2378 #pragma GCC diagnostic pop
2379 
2380 #endif /* __CMSIS_GCC_H */
2381