openMMC
Open Source Modular MMC for AMCs
Loading...
Searching...
No Matches
cmsis_gcc.h
Go to the documentation of this file.
1/**************************************************************************/
7/*
8 * Copyright (c) 2009-2019 Arm Limited. All rights reserved.
9 *
10 * SPDX-License-Identifier: Apache-2.0
11 *
12 * Licensed under the Apache License, Version 2.0 (the License); you may
13 * not use this file except in compliance with the License.
14 * You may obtain a copy of the License at
15 *
16 * www.apache.org/licenses/LICENSE-2.0
17 *
18 * Unless required by applicable law or agreed to in writing, software
19 * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21 * See the License for the specific language governing permissions and
22 * limitations under the License.
23 */
24
25#ifndef __CMSIS_GCC_H
26#define __CMSIS_GCC_H
27
28/* ignore some GCC warnings */
29#pragma GCC diagnostic push
30#pragma GCC diagnostic ignored "-Wsign-conversion"
31#pragma GCC diagnostic ignored "-Wconversion"
32#pragma GCC diagnostic ignored "-Wunused-parameter"
33
34/* Fallback for __has_builtin */
35#ifndef __has_builtin
36 #define __has_builtin(x) (0)
37#endif
38
39/* CMSIS compiler specific defines */
40#ifndef __ASM
41 #define __ASM __asm
42#endif
43#ifndef __INLINE
44 #define __INLINE inline
45#endif
46#ifndef __STATIC_INLINE
47 #define __STATIC_INLINE static inline
48#endif
49#ifndef __STATIC_FORCEINLINE
50 #define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline
51#endif
52#ifndef __NO_RETURN
53 #define __NO_RETURN __attribute__((__noreturn__))
54#endif
55#ifndef __USED
56 #define __USED __attribute__((used))
57#endif
58#ifndef __WEAK
59 #define __WEAK __attribute__((weak))
60#endif
61#ifndef __PACKED
62 #define __PACKED __attribute__((packed, aligned(1)))
63#endif
64#ifndef __PACKED_STRUCT
65 #define __PACKED_STRUCT struct __attribute__((packed, aligned(1)))
66#endif
67#ifndef __PACKED_UNION
68 #define __PACKED_UNION union __attribute__((packed, aligned(1)))
69#endif
70#ifndef __UNALIGNED_UINT32 /* deprecated */
71 #pragma GCC diagnostic push
72 #pragma GCC diagnostic ignored "-Wpacked"
73 #pragma GCC diagnostic ignored "-Wattributes"
74 struct __attribute__((packed)) T_UINT32 { uint32_t v; };
75 #pragma GCC diagnostic pop
76 #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v)
77#endif
78#ifndef __UNALIGNED_UINT16_WRITE
79 #pragma GCC diagnostic push
80 #pragma GCC diagnostic ignored "-Wpacked"
81 #pragma GCC diagnostic ignored "-Wattributes"
83 #pragma GCC diagnostic pop
84 #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val))
85#endif
86#ifndef __UNALIGNED_UINT16_READ
87 #pragma GCC diagnostic push
88 #pragma GCC diagnostic ignored "-Wpacked"
89 #pragma GCC diagnostic ignored "-Wattributes"
91 #pragma GCC diagnostic pop
92 #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v)
93#endif
94#ifndef __UNALIGNED_UINT32_WRITE
95 #pragma GCC diagnostic push
96 #pragma GCC diagnostic ignored "-Wpacked"
97 #pragma GCC diagnostic ignored "-Wattributes"
99 #pragma GCC diagnostic pop
100 #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val))
101#endif
102#ifndef __UNALIGNED_UINT32_READ
103 #pragma GCC diagnostic push
104 #pragma GCC diagnostic ignored "-Wpacked"
105 #pragma GCC diagnostic ignored "-Wattributes"
107 #pragma GCC diagnostic pop
108 #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v)
109#endif
110#ifndef __ALIGNED
111 #define __ALIGNED(x) __attribute__((aligned(x)))
112#endif
113#ifndef __RESTRICT
114 #define __RESTRICT __restrict
115#endif
116#ifndef __COMPILER_BARRIER
117 #define __COMPILER_BARRIER() __ASM volatile("":::"memory")
118#endif
119
120/* ######################### Startup and Lowlevel Init ######################## */
121
122#ifndef __PROGRAM_START
123
132{
133 extern void _start(void) __NO_RETURN;
134
135 typedef struct {
136 uint32_t const* src;
137 uint32_t* dest;
138 uint32_t wlen;
139 } __copy_table_t;
140
141 typedef struct {
142 uint32_t* dest;
143 uint32_t wlen;
144 } __zero_table_t;
145
146 extern const __copy_table_t __copy_table_start__;
147 extern const __copy_table_t __copy_table_end__;
148 extern const __zero_table_t __zero_table_start__;
149 extern const __zero_table_t __zero_table_end__;
150
151 for (__copy_table_t const* pTable = &__copy_table_start__; pTable < &__copy_table_end__; ++pTable) {
152 for(uint32_t i=0u; i<pTable->wlen; ++i) {
153 pTable->dest[i] = pTable->src[i];
154 }
155 }
156
157 for (__zero_table_t const* pTable = &__zero_table_start__; pTable < &__zero_table_end__; ++pTable) {
158 for(uint32_t i=0u; i<pTable->wlen; ++i) {
159 pTable->dest[i] = 0u;
160 }
161 }
162
163 _start();
164}
165
166#define __PROGRAM_START __cmsis_start
167#endif
168
169#ifndef __INITIAL_SP
170#define __INITIAL_SP __StackTop
171#endif
172
173#ifndef __STACK_LIMIT
174#define __STACK_LIMIT __StackLimit
175#endif
176
177#ifndef __VECTOR_TABLE
178#define __VECTOR_TABLE __Vectors
179#endif
180
181#ifndef __VECTOR_TABLE_ATTRIBUTE
182#define __VECTOR_TABLE_ATTRIBUTE __attribute((used, section(".vectors")))
183#endif
184
185/* ########################### Core Function Access ########################### */
197{
198 __ASM volatile ("cpsie i" : : : "memory");
199}
200
201
208{
209 __ASM volatile ("cpsid i" : : : "memory");
210}
211
212
219{
220 uint32_t result;
221
222 __ASM volatile ("MRS %0, control" : "=r" (result) );
223 return(result);
224}
225
226
227#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
233__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void)
234{
235 uint32_t result;
236
237 __ASM volatile ("MRS %0, control_ns" : "=r" (result) );
238 return(result);
239}
240#endif
241
242
248__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control)
249{
250 __ASM volatile ("MSR control, %0" : : "r" (control) : "memory");
251}
252
253
254#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
260__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control)
261{
262 __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory");
263}
264#endif
265
266
272__STATIC_FORCEINLINE uint32_t __get_IPSR(void)
273{
274 uint32_t result;
275
276 __ASM volatile ("MRS %0, ipsr" : "=r" (result) );
277 return(result);
278}
279
280
286__STATIC_FORCEINLINE uint32_t __get_APSR(void)
287{
288 uint32_t result;
289
290 __ASM volatile ("MRS %0, apsr" : "=r" (result) );
291 return(result);
292}
293
294
300__STATIC_FORCEINLINE uint32_t __get_xPSR(void)
301{
302 uint32_t result;
303
304 __ASM volatile ("MRS %0, xpsr" : "=r" (result) );
305 return(result);
306}
307
308
314__STATIC_FORCEINLINE uint32_t __get_PSP(void)
315{
316 uint32_t result;
317
318 __ASM volatile ("MRS %0, psp" : "=r" (result) );
319 return(result);
320}
321
322
323#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
329__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void)
330{
331 uint32_t result;
332
333 __ASM volatile ("MRS %0, psp_ns" : "=r" (result) );
334 return(result);
335}
336#endif
337
338
344__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack)
345{
346 __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : );
347}
348
349
350#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
356__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack)
357{
358 __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : );
359}
360#endif
361
362
368__STATIC_FORCEINLINE uint32_t __get_MSP(void)
369{
370 uint32_t result;
371
372 __ASM volatile ("MRS %0, msp" : "=r" (result) );
373 return(result);
374}
375
376
377#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
383__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void)
384{
385 uint32_t result;
386
387 __ASM volatile ("MRS %0, msp_ns" : "=r" (result) );
388 return(result);
389}
390#endif
391
392
398__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack)
399{
400 __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : );
401}
402
403
404#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
410__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack)
411{
412 __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : );
413}
414#endif
415
416
417#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
423__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void)
424{
425 uint32_t result;
426
427 __ASM volatile ("MRS %0, sp_ns" : "=r" (result) );
428 return(result);
429}
430
431
437__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack)
438{
439 __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : );
440}
441#endif
442
443
450{
451 uint32_t result;
452
453 __ASM volatile ("MRS %0, primask" : "=r" (result) );
454 return(result);
455}
456
457
458#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
464__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void)
465{
466 uint32_t result;
467
468 __ASM volatile ("MRS %0, primask_ns" : "=r" (result) );
469 return(result);
470}
471#endif
472
473
479__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask)
480{
481 __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory");
482}
483
484
485#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
491__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask)
492{
493 __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory");
494}
495#endif
496
497
498#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
499 (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
500 (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) )
506__STATIC_FORCEINLINE void __enable_fault_irq(void)
507{
508 __ASM volatile ("cpsie f" : : : "memory");
509}
510
511
517__STATIC_FORCEINLINE void __disable_fault_irq(void)
518{
519 __ASM volatile ("cpsid f" : : : "memory");
520}
521
522
528__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void)
529{
530 uint32_t result;
531
532 __ASM volatile ("MRS %0, basepri" : "=r" (result) );
533 return(result);
534}
535
536
537#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
543__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void)
544{
545 uint32_t result;
546
547 __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) );
548 return(result);
549}
550#endif
551
552
558__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri)
559{
560 __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory");
561}
562
563
564#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
570__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri)
571{
572 __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory");
573}
574#endif
575
576
583__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri)
584{
585 __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory");
586}
587
588
594__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void)
595{
596 uint32_t result;
597
598 __ASM volatile ("MRS %0, faultmask" : "=r" (result) );
599 return(result);
600}
601
602
603#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
609__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void)
610{
611 uint32_t result;
612
613 __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) );
614 return(result);
615}
616#endif
617
618
624__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask)
625{
626 __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory");
627}
628
629
630#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
636__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask)
637{
638 __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory");
639}
640#endif
641
642#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
643 (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
644 (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */
645
646
647#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
648 (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) )
649
659__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void)
660{
661#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
662 (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
663 // without main extensions, the non-secure PSPLIM is RAZ/WI
664 return 0U;
665#else
666 uint32_t result;
667 __ASM volatile ("MRS %0, psplim" : "=r" (result) );
668 return result;
669#endif
670}
671
672#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3))
681__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void)
682{
683#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)))
684 // without main extensions, the non-secure PSPLIM is RAZ/WI
685 return 0U;
686#else
687 uint32_t result;
688 __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) );
689 return result;
690#endif
691}
692#endif
693
694
704__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit)
705{
706#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
707 (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
708 // without main extensions, the non-secure PSPLIM is RAZ/WI
709 (void)ProcStackPtrLimit;
710#else
711 __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit));
712#endif
713}
714
715
716#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
725__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit)
726{
727#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)))
728 // without main extensions, the non-secure PSPLIM is RAZ/WI
729 (void)ProcStackPtrLimit;
730#else
731 __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit));
732#endif
733}
734#endif
735
736
746__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void)
747{
748#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
749 (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
750 // without main extensions, the non-secure MSPLIM is RAZ/WI
751 return 0U;
752#else
753 uint32_t result;
754 __ASM volatile ("MRS %0, msplim" : "=r" (result) );
755 return result;
756#endif
757}
758
759
760#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
769__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void)
770{
771#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)))
772 // without main extensions, the non-secure MSPLIM is RAZ/WI
773 return 0U;
774#else
775 uint32_t result;
776 __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) );
777 return result;
778#endif
779}
780#endif
781
782
792__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit)
793{
794#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
795 (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
796 // without main extensions, the non-secure MSPLIM is RAZ/WI
797 (void)MainStackPtrLimit;
798#else
799 __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit));
800#endif
801}
802
803
804#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
813__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit)
814{
815#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)))
816 // without main extensions, the non-secure MSPLIM is RAZ/WI
817 (void)MainStackPtrLimit;
818#else
819 __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit));
820#endif
821}
822#endif
823
824#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
825 (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */
826
827
834{
835#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
836 (defined (__FPU_USED ) && (__FPU_USED == 1U)) )
837#if __has_builtin(__builtin_arm_get_fpscr)
838// Re-enable using built-in when GCC has been fixed
839// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2)
840 /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */
841 return __builtin_arm_get_fpscr();
842#else
843 uint32_t result;
844
845 __ASM volatile ("VMRS %0, fpscr" : "=r" (result) );
846 return(result);
847#endif
848#else
849 return(0U);
850#endif
851}
852
853
859__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr)
860{
861#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
862 (defined (__FPU_USED ) && (__FPU_USED == 1U)) )
863#if __has_builtin(__builtin_arm_set_fpscr)
864// Re-enable using built-in when GCC has been fixed
865// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2)
866 /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */
867 __builtin_arm_set_fpscr(fpscr);
868#else
869 __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc", "memory");
870#endif
871#else
872 (void)fpscr;
873#endif
874}
875
876
880/* ########################## Core Instruction Access ######################### */
886/* Define macros for porting to both thumb1 and thumb2.
887 * For thumb1, use low register (r0-r7), specified by constraint "l"
888 * Otherwise, use general registers, specified by constraint "r" */
889#if defined (__thumb__) && !defined (__thumb2__)
890#define __CMSIS_GCC_OUT_REG(r) "=l" (r)
891#define __CMSIS_GCC_RW_REG(r) "+l" (r)
892#define __CMSIS_GCC_USE_REG(r) "l" (r)
893#else
894#define __CMSIS_GCC_OUT_REG(r) "=r" (r)
895#define __CMSIS_GCC_RW_REG(r) "+r" (r)
896#define __CMSIS_GCC_USE_REG(r) "r" (r)
897#endif
898
903#define __NOP() __ASM volatile ("nop")
904
909#define __WFI() __ASM volatile ("wfi":::"memory")
910
911
917#define __WFE() __ASM volatile ("wfe":::"memory")
918
919
924#define __SEV() __ASM volatile ("sev")
925
926
934{
935 __ASM volatile ("isb 0xF":::"memory");
936}
937
938
945{
946 __ASM volatile ("dsb 0xF":::"memory");
947}
948
949
956{
957 __ASM volatile ("dmb 0xF":::"memory");
958}
959
960
967__STATIC_FORCEINLINE uint32_t __REV(uint32_t value)
968{
969#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
970 return __builtin_bswap32(value);
971#else
972 uint32_t result;
973
974 __ASM ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
975 return result;
976#endif
977}
978
979
986__STATIC_FORCEINLINE uint32_t __REV16(uint32_t value)
987{
988 uint32_t result;
989
990 __ASM ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
991 return result;
992}
993
994
1001__STATIC_FORCEINLINE int16_t __REVSH(int16_t value)
1002{
1003#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
1004 return (int16_t)__builtin_bswap16(value);
1005#else
1006 int16_t result;
1007
1008 __ASM ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
1009 return result;
1010#endif
1011}
1012
1013
1021__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
1022{
1023 op2 %= 32U;
1024 if (op2 == 0U)
1025 {
1026 return op1;
1027 }
1028 return (op1 >> op2) | (op1 << (32U - op2));
1029}
1030
1031
1039#define __BKPT(value) __ASM volatile ("bkpt "#value)
1040
1041
1048__STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value)
1049{
1050 uint32_t result;
1051
1052#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
1053 (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
1054 (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) )
1055 __ASM ("rbit %0, %1" : "=r" (result) : "r" (value) );
1056#else
1057 uint32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */
1058
1059 result = value; /* r will be reversed bits of v; first get LSB of v */
1060 for (value >>= 1U; value != 0U; value >>= 1U)
1061 {
1062 result <<= 1U;
1063 result |= value & 1U;
1064 s--;
1065 }
1066 result <<= s; /* shift when v's highest bits are zero */
1067#endif
1068 return result;
1069}
1070
1071
1078__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value)
1079{
1080 /* Even though __builtin_clz produces a CLZ instruction on ARM, formally
1081 __builtin_clz(0) is undefined behaviour, so handle this case specially.
1082 This guarantees ARM-compatible results if happening to compile on a non-ARM
1083 target, and ensures the compiler doesn't decide to activate any
1084 optimisations using the logic "value was passed to __builtin_clz, so it
1085 is non-zero".
1086 ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a
1087 single CLZ instruction.
1088 */
1089 if (value == 0U)
1090 {
1091 return 32U;
1092 }
1093 return __builtin_clz(value);
1094}
1095
1096
1097#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
1098 (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
1099 (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
1100 (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) )
1107__STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr)
1108{
1109 uint32_t result;
1110
1111#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
1112 __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) );
1113#else
1114 /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
1115 accepted by assembler. So has to use following less efficient pattern.
1116 */
1117 __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
1118#endif
1119 return ((uint8_t) result); /* Add explicit type cast here */
1120}
1121
1122
1129__STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr)
1130{
1131 uint32_t result;
1132
1133#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
1134 __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) );
1135#else
1136 /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
1137 accepted by assembler. So has to use following less efficient pattern.
1138 */
1139 __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
1140#endif
1141 return ((uint16_t) result); /* Add explicit type cast here */
1142}
1143
1144
1151__STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr)
1152{
1153 uint32_t result;
1154
1155 __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) );
1156 return(result);
1157}
1158
1159
1168__STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr)
1169{
1170 uint32_t result;
1171
1172 __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
1173 return(result);
1174}
1175
1176
1185__STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr)
1186{
1187 uint32_t result;
1188
1189 __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
1190 return(result);
1191}
1192
1193
1202__STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr)
1203{
1204 uint32_t result;
1205
1206 __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) );
1207 return(result);
1208}
1209
1210
1215__STATIC_FORCEINLINE void __CLREX(void)
1216{
1217 __ASM volatile ("clrex" ::: "memory");
1218}
1219
1220#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
1221 (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
1222 (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
1223 (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */
1224
1225
1226#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
1227 (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
1228 (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) )
1236#define __SSAT(ARG1, ARG2) \
1237__extension__ \
1238({ \
1239 int32_t __RES, __ARG1 = (ARG1); \
1240 __ASM volatile ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \
1241 __RES; \
1242 })
1243
1244
1252#define __USAT(ARG1, ARG2) \
1253 __extension__ \
1254({ \
1255 uint32_t __RES, __ARG1 = (ARG1); \
1256 __ASM volatile ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \
1257 __RES; \
1258 })
1259
1260
1268__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value)
1269{
1270 uint32_t result;
1271
1272 __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
1273 return(result);
1274}
1275
1276
1283__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr)
1284{
1285 uint32_t result;
1286
1287#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
1288 __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) );
1289#else
1290 /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
1291 accepted by assembler. So has to use following less efficient pattern.
1292 */
1293 __ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" );
1294#endif
1295 return ((uint8_t) result); /* Add explicit type cast here */
1296}
1297
1298
1305__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr)
1306{
1307 uint32_t result;
1308
1309#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
1310 __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) );
1311#else
1312 /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
1313 accepted by assembler. So has to use following less efficient pattern.
1314 */
1315 __ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" );
1316#endif
1317 return ((uint16_t) result); /* Add explicit type cast here */
1318}
1319
1320
1327__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr)
1328{
1329 uint32_t result;
1330
1331 __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) );
1332 return(result);
1333}
1334
1335
1342__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr)
1343{
1344 __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) );
1345}
1346
1347
1354__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr)
1355{
1356 __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) );
1357}
1358
1359
1366__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr)
1367{
1368 __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) );
1369}
1370
1371#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
1372 (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
1373 (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */
1374
1382__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat)
1383{
1384 if ((sat >= 1U) && (sat <= 32U))
1385 {
1386 const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U);
1387 const int32_t min = -1 - max ;
1388 if (val > max)
1389 {
1390 return max;
1391 }
1392 else if (val < min)
1393 {
1394 return min;
1395 }
1396 }
1397 return val;
1398}
1399
1407__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat)
1408{
1409 if (sat <= 31U)
1410 {
1411 const uint32_t max = ((1U << sat) - 1U);
1412 if (val > (int32_t)max)
1413 {
1414 return max;
1415 }
1416 else if (val < 0)
1417 {
1418 return 0U;
1419 }
1420 }
1421 return (uint32_t)val;
1422}
1423
1424#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
1425 (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
1426 (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */
1427
1428
1429#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
1430 (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) )
1437__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr)
1438{
1439 uint32_t result;
1440
1441 __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
1442 return ((uint8_t) result);
1443}
1444
1445
1452__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr)
1453{
1454 uint32_t result;
1455
1456 __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
1457 return ((uint16_t) result);
1458}
1459
1460
1467__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr)
1468{
1469 uint32_t result;
1470
1471 __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
1472 return(result);
1473}
1474
1475
1482__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr)
1483{
1484 __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
1485}
1486
1487
1494__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr)
1495{
1496 __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
1497}
1498
1499
1506__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr)
1507{
1508 __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
1509}
1510
1511
1518__STATIC_FORCEINLINE uint8_t __LDAEXB(volatile uint8_t *ptr)
1519{
1520 uint32_t result;
1521
1522 __ASM volatile ("ldaexb %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
1523 return ((uint8_t) result);
1524}
1525
1526
1533__STATIC_FORCEINLINE uint16_t __LDAEXH(volatile uint16_t *ptr)
1534{
1535 uint32_t result;
1536
1537 __ASM volatile ("ldaexh %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
1538 return ((uint16_t) result);
1539}
1540
1541
1548__STATIC_FORCEINLINE uint32_t __LDAEX(volatile uint32_t *ptr)
1549{
1550 uint32_t result;
1551
1552 __ASM volatile ("ldaex %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
1553 return(result);
1554}
1555
1556
1565__STATIC_FORCEINLINE uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr)
1566{
1567 uint32_t result;
1568
1569 __ASM volatile ("stlexb %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
1570 return(result);
1571}
1572
1573
1582__STATIC_FORCEINLINE uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr)
1583{
1584 uint32_t result;
1585
1586 __ASM volatile ("stlexh %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
1587 return(result);
1588}
1589
1590
1599__STATIC_FORCEINLINE uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr)
1600{
1601 uint32_t result;
1602
1603 __ASM volatile ("stlex %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
1604 return(result);
1605}
1606
1607#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
1608 (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */
1609 /* end of group CMSIS_Core_InstructionInterface */
1611
1612
1613/* ################### Compiler specific Intrinsics ########################### */
1619#if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1))
1620
1621__STATIC_FORCEINLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
1622{
1623 uint32_t result;
1624
1625 __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1626 return(result);
1627}
1628
1629__STATIC_FORCEINLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
1630{
1631 uint32_t result;
1632
1633 __ASM ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1634 return(result);
1635}
1636
1637__STATIC_FORCEINLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
1638{
1639 uint32_t result;
1640
1641 __ASM ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1642 return(result);
1643}
1644
1645__STATIC_FORCEINLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
1646{
1647 uint32_t result;
1648
1649 __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1650 return(result);
1651}
1652
1653__STATIC_FORCEINLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
1654{
1655 uint32_t result;
1656
1657 __ASM ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1658 return(result);
1659}
1660
1661__STATIC_FORCEINLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
1662{
1663 uint32_t result;
1664
1665 __ASM ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1666 return(result);
1667}
1668
1669
1670__STATIC_FORCEINLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
1671{
1672 uint32_t result;
1673
1674 __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1675 return(result);
1676}
1677
1678__STATIC_FORCEINLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
1679{
1680 uint32_t result;
1681
1682 __ASM ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1683 return(result);
1684}
1685
1686__STATIC_FORCEINLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
1687{
1688 uint32_t result;
1689
1690 __ASM ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1691 return(result);
1692}
1693
1694__STATIC_FORCEINLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
1695{
1696 uint32_t result;
1697
1698 __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1699 return(result);
1700}
1701
1702__STATIC_FORCEINLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
1703{
1704 uint32_t result;
1705
1706 __ASM ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1707 return(result);
1708}
1709
1710__STATIC_FORCEINLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
1711{
1712 uint32_t result;
1713
1714 __ASM ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1715 return(result);
1716}
1717
1718
1719__STATIC_FORCEINLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
1720{
1721 uint32_t result;
1722
1723 __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1724 return(result);
1725}
1726
1727__STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
1728{
1729 uint32_t result;
1730
1731 __ASM ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1732 return(result);
1733}
1734
1735__STATIC_FORCEINLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
1736{
1737 uint32_t result;
1738
1739 __ASM ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1740 return(result);
1741}
1742
1743__STATIC_FORCEINLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
1744{
1745 uint32_t result;
1746
1747 __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1748 return(result);
1749}
1750
1751__STATIC_FORCEINLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
1752{
1753 uint32_t result;
1754
1755 __ASM ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1756 return(result);
1757}
1758
1759__STATIC_FORCEINLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
1760{
1761 uint32_t result;
1762
1763 __ASM ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1764 return(result);
1765}
1766
1767__STATIC_FORCEINLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
1768{
1769 uint32_t result;
1770
1771 __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1772 return(result);
1773}
1774
1775__STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
1776{
1777 uint32_t result;
1778
1779 __ASM ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1780 return(result);
1781}
1782
1783__STATIC_FORCEINLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
1784{
1785 uint32_t result;
1786
1787 __ASM ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1788 return(result);
1789}
1790
1791__STATIC_FORCEINLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
1792{
1793 uint32_t result;
1794
1795 __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1796 return(result);
1797}
1798
1799__STATIC_FORCEINLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
1800{
1801 uint32_t result;
1802
1803 __ASM ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1804 return(result);
1805}
1806
1807__STATIC_FORCEINLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
1808{
1809 uint32_t result;
1810
1811 __ASM ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1812 return(result);
1813}
1814
1815__STATIC_FORCEINLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
1816{
1817 uint32_t result;
1818
1819 __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1820 return(result);
1821}
1822
1823__STATIC_FORCEINLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
1824{
1825 uint32_t result;
1826
1827 __ASM ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1828 return(result);
1829}
1830
1831__STATIC_FORCEINLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
1832{
1833 uint32_t result;
1834
1835 __ASM ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1836 return(result);
1837}
1838
1839__STATIC_FORCEINLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
1840{
1841 uint32_t result;
1842
1843 __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1844 return(result);
1845}
1846
1847__STATIC_FORCEINLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
1848{
1849 uint32_t result;
1850
1851 __ASM ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1852 return(result);
1853}
1854
1855__STATIC_FORCEINLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
1856{
1857 uint32_t result;
1858
1859 __ASM ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1860 return(result);
1861}
1862
1863__STATIC_FORCEINLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
1864{
1865 uint32_t result;
1866
1867 __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1868 return(result);
1869}
1870
1871__STATIC_FORCEINLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
1872{
1873 uint32_t result;
1874
1875 __ASM ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1876 return(result);
1877}
1878
1879__STATIC_FORCEINLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
1880{
1881 uint32_t result;
1882
1883 __ASM ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1884 return(result);
1885}
1886
1887__STATIC_FORCEINLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
1888{
1889 uint32_t result;
1890
1891 __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1892 return(result);
1893}
1894
1895__STATIC_FORCEINLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
1896{
1897 uint32_t result;
1898
1899 __ASM ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1900 return(result);
1901}
1902
1903__STATIC_FORCEINLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
1904{
1905 uint32_t result;
1906
1907 __ASM ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1908 return(result);
1909}
1910
1911__STATIC_FORCEINLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
1912{
1913 uint32_t result;
1914
1915 __ASM ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1916 return(result);
1917}
1918
1919__STATIC_FORCEINLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
1920{
1921 uint32_t result;
1922
1923 __ASM ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1924 return(result);
1925}
1926
1927#define __SSAT16(ARG1, ARG2) \
1928({ \
1929 int32_t __RES, __ARG1 = (ARG1); \
1930 __ASM volatile ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \
1931 __RES; \
1932 })
1933
1934#define __USAT16(ARG1, ARG2) \
1935({ \
1936 uint32_t __RES, __ARG1 = (ARG1); \
1937 __ASM volatile ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \
1938 __RES; \
1939 })
1940
1941__STATIC_FORCEINLINE uint32_t __UXTB16(uint32_t op1)
1942{
1943 uint32_t result;
1944
1945 __ASM ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
1946 return(result);
1947}
1948
1949__STATIC_FORCEINLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
1950{
1951 uint32_t result;
1952
1953 __ASM ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1954 return(result);
1955}
1956
1957__STATIC_FORCEINLINE uint32_t __SXTB16(uint32_t op1)
1958{
1959 uint32_t result;
1960
1961 __ASM ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
1962 return(result);
1963}
1964
1965__STATIC_FORCEINLINE uint32_t __SXTB16_RORn(uint32_t op1, uint32_t rotate)
1966{
1967 uint32_t result;
1968
1969 __ASM ("sxtb16 %0, %1, ROR %2" : "=r" (result) : "r" (op1), "i" (rotate) );
1970
1971 return result;
1972}
1973
1974__STATIC_FORCEINLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
1975{
1976 uint32_t result;
1977
1978 __ASM ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1979 return(result);
1980}
1981
1982__STATIC_FORCEINLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
1983{
1984 uint32_t result;
1985
1986 __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1987 return(result);
1988}
1989
1990__STATIC_FORCEINLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
1991{
1992 uint32_t result;
1993
1994 __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1995 return(result);
1996}
1997
1998__STATIC_FORCEINLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
1999{
2000 uint32_t result;
2001
2002 __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
2003 return(result);
2004}
2005
2006__STATIC_FORCEINLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
2007{
2008 uint32_t result;
2009
2010 __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
2011 return(result);
2012}
2013
2014__STATIC_FORCEINLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc)
2015{
2016 union llreg_u{
2017 uint32_t w32[2];
2018 uint64_t w64;
2019 } llr;
2020 llr.w64 = acc;
2021
2022#ifndef __ARMEB__ /* Little endian */
2023 __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
2024#else /* Big endian */
2025 __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
2026#endif
2027
2028 return(llr.w64);
2029}
2030
2031__STATIC_FORCEINLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc)
2032{
2033 union llreg_u{
2034 uint32_t w32[2];
2035 uint64_t w64;
2036 } llr;
2037 llr.w64 = acc;
2038
2039#ifndef __ARMEB__ /* Little endian */
2040 __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
2041#else /* Big endian */
2042 __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
2043#endif
2044
2045 return(llr.w64);
2046}
2047
2048__STATIC_FORCEINLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)
2049{
2050 uint32_t result;
2051
2052 __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
2053 return(result);
2054}
2055
2056__STATIC_FORCEINLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
2057{
2058 uint32_t result;
2059
2060 __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
2061 return(result);
2062}
2063
2064__STATIC_FORCEINLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
2065{
2066 uint32_t result;
2067
2068 __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
2069 return(result);
2070}
2071
2072__STATIC_FORCEINLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
2073{
2074 uint32_t result;
2075
2076 __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
2077 return(result);
2078}
2079
2080__STATIC_FORCEINLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc)
2081{
2082 union llreg_u{
2083 uint32_t w32[2];
2084 uint64_t w64;
2085 } llr;
2086 llr.w64 = acc;
2087
2088#ifndef __ARMEB__ /* Little endian */
2089 __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
2090#else /* Big endian */
2091 __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
2092#endif
2093
2094 return(llr.w64);
2095}
2096
2097__STATIC_FORCEINLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc)
2098{
2099 union llreg_u{
2100 uint32_t w32[2];
2101 uint64_t w64;
2102 } llr;
2103 llr.w64 = acc;
2104
2105#ifndef __ARMEB__ /* Little endian */
2106 __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
2107#else /* Big endian */
2108 __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
2109#endif
2110
2111 return(llr.w64);
2112}
2113
2114__STATIC_FORCEINLINE uint32_t __SEL (uint32_t op1, uint32_t op2)
2115{
2116 uint32_t result;
2117
2118 __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
2119 return(result);
2120}
2121
2122__STATIC_FORCEINLINE int32_t __QADD( int32_t op1, int32_t op2)
2123{
2124 int32_t result;
2125
2126 __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
2127 return(result);
2128}
2129
2130__STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2)
2131{
2132 int32_t result;
2133
2134 __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
2135 return(result);
2136}
2137
2138#if 0
2139#define __PKHBT(ARG1,ARG2,ARG3) \
2140({ \
2141 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
2142 __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
2143 __RES; \
2144 })
2145
2146#define __PKHTB(ARG1,ARG2,ARG3) \
2147({ \
2148 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
2149 if (ARG3 == 0) \
2150 __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \
2151 else \
2152 __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
2153 __RES; \
2154 })
2155#endif
2156
2157#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \
2158 ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) )
2159
2160#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \
2161 ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) )
2162
2163__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
2164{
2165 int32_t result;
2166
2167 __ASM ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
2168 return(result);
2169}
2170
2171#endif /* (__ARM_FEATURE_DSP == 1) */
2175#pragma GCC diagnostic pop
2176
2177#endif /* __CMSIS_GCC_H */
#define __ASM
Definition cmsis_gcc.h:41
#define __NO_RETURN
Definition cmsis_gcc.h:53
#define __PACKED_STRUCT
Definition cmsis_gcc.h:65
__PACKED_STRUCT T_UINT16_READ
Definition cmsis_gcc.h:90
__STATIC_FORCEINLINE __NO_RETURN void __cmsis_start(void)
Initializes data and bss sections.
Definition cmsis_gcc.h:131
__PACKED_STRUCT T_UINT32_READ
Definition cmsis_gcc.h:106
#define __STATIC_FORCEINLINE
Definition cmsis_gcc.h:50
__PACKED_STRUCT T_UINT32_WRITE
Definition cmsis_gcc.h:98
__PACKED_STRUCT T_UINT16_WRITE
Definition cmsis_gcc.h:82
struct __attribute__((packed)) T_UINT32
Definition cmsis_gcc.h:74
#define __SXTB16_RORn(ARG1, ARG2)
Definition cmsis_iccarm.h:964
__IAR_FT uint32_t __STREXW(uint32_t value, uint32_t volatile *ptr)
Definition cmsis_iccarm.h:586
__IAR_FT uint32_t __LDREXW(uint32_t volatile *ptr)
Definition cmsis_iccarm.h:581
#define __REV
Reverse byte order (32 bit)
Definition cmsis_armcc.h:470
__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat)
Signed Saturate.
Definition cmsis_armclang.h:1163
#define __CLZ
Count leading zeros.
Definition cmsis_armcc.h:557
__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat)
Unsigned Saturate.
Definition cmsis_armclang.h:1188
__STATIC_FORCEINLINE void __DSB(void)
Data Synchronization Barrier.
Definition cmsis_gcc.h:944
#define __ROR
Rotate Right in unsigned value (32 bit)
Definition cmsis_armcc.h:510
#define __CMSIS_GCC_USE_REG(r)
Definition cmsis_gcc.h:896
uint32_t sat
Definition cmsis_armcc.h:754
__STATIC_FORCEINLINE void __DMB(void)
Data Memory Barrier.
Definition cmsis_gcc.h:955
#define __RBIT
Reverse bit order of value.
Definition cmsis_armclang.h:929
#define __CMSIS_GCC_OUT_REG(r)
Definition cmsis_gcc.h:894
#define __REV16(value)
Reverse byte order (16 bit)
Definition cmsis_armclang.h:883
__STATIC_FORCEINLINE void __ISB(void)
Instruction Synchronization Barrier.
Definition cmsis_gcc.h:933
#define __REVSH(value)
Reverse byte order (16 bit)
Definition cmsis_armclang.h:892
__STATIC_INLINE void __set_CONTROL(uint32_t control)
Set Control Register.
Definition cmsis_armcc.h:171
__STATIC_INLINE void __set_MSP(uint32_t topOfMainStack)
Set Main Stack Pointer.
Definition cmsis_armcc.h:255
__STATIC_INLINE void __set_PSP(uint32_t topOfProcStack)
Set Process Stack Pointer.
Definition cmsis_armcc.h:231
__STATIC_FORCEINLINE void __disable_irq(void)
Disable IRQ Interrupts.
Definition cmsis_gcc.h:207
#define __set_FPSCR(x)
Set FPSCR.
Definition cmsis_armclang.h:790
#define __get_FPSCR()
Get FPSCR.
Definition cmsis_armclang.h:778
__STATIC_INLINE uint32_t __get_PRIMASK(void)
Get Priority Mask.
Definition cmsis_armcc.h:267
__STATIC_INLINE uint32_t __get_MSP(void)
Get Main Stack Pointer.
Definition cmsis_armcc.h:243
__STATIC_INLINE uint32_t __get_CONTROL(void)
Enable IRQ Interrupts.
Definition cmsis_armcc.h:159
__STATIC_INLINE uint32_t __get_PSP(void)
Get Process Stack Pointer.
Definition cmsis_armcc.h:219
__STATIC_INLINE uint32_t __get_APSR(void)
Get APSR Register.
Definition cmsis_armcc.h:195
#define __get_xPSR
Get xPSR Register.
Definition cmsis_iccarm.h:577
__STATIC_FORCEINLINE void __enable_irq(void)
Enable IRQ Interrupts.
Definition cmsis_gcc.h:196
__STATIC_INLINE uint32_t __get_IPSR(void)
Get IPSR Register.
Definition cmsis_armcc.h:183
__STATIC_INLINE void __set_PRIMASK(uint32_t priMask)
Set Priority Mask.
Definition cmsis_armcc.h:279