diff options
Diffstat (limited to 'Libraries')
18 files changed, 2364 insertions, 0 deletions
diff --git a/Libraries/CMSIS/Device/Include/cmsis_gcc.h b/Libraries/CMSIS/Device/Include/cmsis_gcc.h new file mode 100644 index 0000000..d868f2e --- /dev/null +++ b/Libraries/CMSIS/Device/Include/cmsis_gcc.h @@ -0,0 +1,1373 @@ +/**************************************************************************//**
+ * @file cmsis_gcc.h
+ * @brief CMSIS Cortex-M Core Function/Instruction Header File
+ * @version V4.30
+ * @date 20. October 2015
+ ******************************************************************************/
+/* Copyright (c) 2009 - 2015 ARM LIMITED
+
+ All rights reserved.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+ - Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ - Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ - Neither the name of ARM nor the names of its contributors may be used
+ to endorse or promote products derived from this software without
+ specific prior written permission.
+ *
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+ ---------------------------------------------------------------------------*/
+
+
+#ifndef __CMSIS_GCC_H
+#define __CMSIS_GCC_H
+
+/* ignore some GCC warnings */
+#if defined ( __GNUC__ )
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wsign-conversion"
+#pragma GCC diagnostic ignored "-Wconversion"
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+
+/* ########################### Core Function Access ########################### */
+/** \ingroup CMSIS_Core_FunctionInterface
+ \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions
+ @{
+ */
+
+/**
+ \brief Enable IRQ Interrupts
+ \details Enables IRQ interrupts by clearing the I-bit in the CPSR.
+ Can only be executed in Privileged modes.
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_irq(void)
+{
+ __ASM volatile ("cpsie i" : : : "memory");
+}
+
+
+/**
+ \brief Disable IRQ Interrupts
+ \details Disables IRQ interrupts by setting the I-bit in the CPSR.
+ Can only be executed in Privileged modes.
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_irq(void)
+{
+ __ASM volatile ("cpsid i" : : : "memory");
+}
+
+
+/**
+ \brief Get Control Register
+ \details Returns the content of the Control Register.
+ \return Control Register value
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_CONTROL(void)
+{
+ uint32_t result;
+
+ __ASM volatile ("MRS %0, control" : "=r" (result) );
+ return(result);
+}
+
+
+/**
+ \brief Set Control Register
+ \details Writes the given value to the Control Register.
+ \param [in] control Control Register value to set
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_CONTROL(uint32_t control)
+{
+ __ASM volatile ("MSR control, %0" : : "r" (control) : "memory");
+}
+
+
+/**
+ \brief Get IPSR Register
+ \details Returns the content of the IPSR Register.
+ \return IPSR Register value
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_IPSR(void)
+{
+ uint32_t result;
+
+ __ASM volatile ("MRS %0, ipsr" : "=r" (result) );
+ return(result);
+}
+
+
+/**
+ \brief Get APSR Register
+ \details Returns the content of the APSR Register.
+ \return APSR Register value
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_APSR(void)
+{
+ uint32_t result;
+
+ __ASM volatile ("MRS %0, apsr" : "=r" (result) );
+ return(result);
+}
+
+
+/**
+ \brief Get xPSR Register
+ \details Returns the content of the xPSR Register.
+
+ \return xPSR Register value
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_xPSR(void)
+{
+ uint32_t result;
+
+ __ASM volatile ("MRS %0, xpsr" : "=r" (result) );
+ return(result);
+}
+
+
+/**
+ \brief Get Process Stack Pointer
+ \details Returns the current value of the Process Stack Pointer (PSP).
+ \return PSP Register value
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_PSP(void)
+{
+ register uint32_t result;
+
+ __ASM volatile ("MRS %0, psp\n" : "=r" (result) );
+ return(result);
+}
+
+
+/**
+ \brief Set Process Stack Pointer
+ \details Assigns the given value to the Process Stack Pointer (PSP).
+ \param [in] topOfProcStack Process Stack Pointer value to set
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_PSP(uint32_t topOfProcStack)
+{
+ __ASM volatile ("MSR psp, %0\n" : : "r" (topOfProcStack) : "sp");
+}
+
+
+/**
+ \brief Get Main Stack Pointer
+ \details Returns the current value of the Main Stack Pointer (MSP).
+ \return MSP Register value
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_MSP(void)
+{
+ register uint32_t result;
+
+ __ASM volatile ("MRS %0, msp\n" : "=r" (result) );
+ return(result);
+}
+
+
+/**
+ \brief Set Main Stack Pointer
+ \details Assigns the given value to the Main Stack Pointer (MSP).
+
+ \param [in] topOfMainStack Main Stack Pointer value to set
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_MSP(uint32_t topOfMainStack)
+{
+ __ASM volatile ("MSR msp, %0\n" : : "r" (topOfMainStack) : "sp");
+}
+
+
+/**
+ \brief Get Priority Mask
+ \details Returns the current state of the priority mask bit from the Priority Mask Register.
+ \return Priority Mask value
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_PRIMASK(void)
+{
+ uint32_t result;
+
+ __ASM volatile ("MRS %0, primask" : "=r" (result) );
+ return(result);
+}
+
+
+/**
+ \brief Set Priority Mask
+ \details Assigns the given value to the Priority Mask Register.
+ \param [in] priMask Priority Mask
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_PRIMASK(uint32_t priMask)
+{
+ __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory");
+}
+
+
+#if (__CORTEX_M >= 0x03U)
+
+/**
+ \brief Enable FIQ
+ \details Enables FIQ interrupts by clearing the F-bit in the CPSR.
+ Can only be executed in Privileged modes.
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_fault_irq(void)
+{
+ __ASM volatile ("cpsie f" : : : "memory");
+}
+
+
+/**
+ \brief Disable FIQ
+ \details Disables FIQ interrupts by setting the F-bit in the CPSR.
+ Can only be executed in Privileged modes.
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_fault_irq(void)
+{
+ __ASM volatile ("cpsid f" : : : "memory");
+}
+
+
+/**
+ \brief Get Base Priority
+ \details Returns the current value of the Base Priority register.
+ \return Base Priority register value
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_BASEPRI(void)
+{
+ uint32_t result;
+
+ __ASM volatile ("MRS %0, basepri" : "=r" (result) );
+ return(result);
+}
+
+
+/**
+ \brief Set Base Priority
+ \details Assigns the given value to the Base Priority register.
+ \param [in] basePri Base Priority value to set
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_BASEPRI(uint32_t value)
+{
+ __ASM volatile ("MSR basepri, %0" : : "r" (value) : "memory");
+}
+
+
+/**
+ \brief Set Base Priority with condition
+ \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled,
+ or the new value increases the BASEPRI priority level.
+ \param [in] basePri Base Priority value to set
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_BASEPRI_MAX(uint32_t value)
+{
+ __ASM volatile ("MSR basepri_max, %0" : : "r" (value) : "memory");
+}
+
+
+/**
+ \brief Get Fault Mask
+ \details Returns the current value of the Fault Mask register.
+ \return Fault Mask register value
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FAULTMASK(void)
+{
+ uint32_t result;
+
+ __ASM volatile ("MRS %0, faultmask" : "=r" (result) );
+ return(result);
+}
+
+
+/**
+ \brief Set Fault Mask
+ \details Assigns the given value to the Fault Mask register.
+ \param [in] faultMask Fault Mask value to set
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask)
+{
+ __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory");
+}
+
+#endif /* (__CORTEX_M >= 0x03U) */
+
+
+#if (__CORTEX_M == 0x04U) || (__CORTEX_M == 0x07U)
+
+/**
+ \brief Get FPSCR
+ \details Returns the current value of the Floating Point Status/Control register.
+ \return Floating Point Status/Control register value
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FPSCR(void)
+{
+#if (__FPU_PRESENT == 1U) && (__FPU_USED == 1U)
+ uint32_t result;
+
+ /* Empty asm statement works as a scheduling barrier */
+ __ASM volatile ("");
+ __ASM volatile ("VMRS %0, fpscr" : "=r" (result) );
+ __ASM volatile ("");
+ return(result);
+#else
+ return(0);
+#endif
+}
+
+
+/**
+ \brief Set FPSCR
+ \details Assigns the given value to the Floating Point Status/Control register.
+ \param [in] fpscr Floating Point Status/Control value to set
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
+{
+#if (__FPU_PRESENT == 1U) && (__FPU_USED == 1U)
+ /* Empty asm statement works as a scheduling barrier */
+ __ASM volatile ("");
+ __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc");
+ __ASM volatile ("");
+#endif
+}
+
+#endif /* (__CORTEX_M == 0x04U) || (__CORTEX_M == 0x07U) */
+
+
+
+/*@} end of CMSIS_Core_RegAccFunctions */
+
+
+/* ########################## Core Instruction Access ######################### */
+/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface
+ Access to dedicated instructions
+ @{
+*/
+
+/* Define macros for porting to both thumb1 and thumb2.
+ * For thumb1, use low register (r0-r7), specified by constraint "l"
+ * Otherwise, use general registers, specified by constraint "r" */
+#if defined (__thumb__) && !defined (__thumb2__)
+#define __CMSIS_GCC_OUT_REG(r) "=l" (r)
+#define __CMSIS_GCC_USE_REG(r) "l" (r)
+#else
+#define __CMSIS_GCC_OUT_REG(r) "=r" (r)
+#define __CMSIS_GCC_USE_REG(r) "r" (r)
+#endif
+
+/**
+ \brief No Operation
+ \details No Operation does nothing. This instruction can be used for code alignment purposes.
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __NOP(void)
+{
+ __ASM volatile ("nop");
+}
+
+
+/**
+ \brief Wait For Interrupt
+ \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs.
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __WFI(void)
+{
+ __ASM volatile ("wfi");
+}
+
+
+/**
+ \brief Wait For Event
+ \details Wait For Event is a hint instruction that permits the processor to enter
+ a low-power state until one of a number of events occurs.
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __WFE(void)
+{
+ __ASM volatile ("wfe");
+}
+
+
+/**
+ \brief Send Event
+ \details Send Event is a hint instruction. It causes an event to be signaled to the CPU.
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __SEV(void)
+{
+ __ASM volatile ("sev");
+}
+
+
+/**
+ \brief Instruction Synchronization Barrier
+ \details Instruction Synchronization Barrier flushes the pipeline in the processor,
+ so that all instructions following the ISB are fetched from cache or memory,
+ after the instruction has been completed.
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __ISB(void)
+{
+ __ASM volatile ("isb 0xF":::"memory");
+}
+
+
+/**
+ \brief Data Synchronization Barrier
+ \details Acts as a special kind of Data Memory Barrier.
+ It completes when all explicit memory accesses before this instruction complete.
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __DSB(void)
+{
+ __ASM volatile ("dsb 0xF":::"memory");
+}
+
+
+/**
+ \brief Data Memory Barrier
+ \details Ensures the apparent order of the explicit memory operations before
+ and after the instruction, without ensuring their completion.
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __DMB(void)
+{
+ __ASM volatile ("dmb 0xF":::"memory");
+}
+
+
+/**
+ \brief Reverse byte order (32 bit)
+ \details Reverses the byte order in integer value.
+ \param [in] value Value to reverse
+ \return Reversed value
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __REV(uint32_t value)
+{
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
+ return __builtin_bswap32(value);
+#else
+ uint32_t result;
+
+ __ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
+ return(result);
+#endif
+}
+
+
+/**
+ \brief Reverse byte order (16 bit)
+ \details Reverses the byte order in two unsigned short values.
+ \param [in] value Value to reverse
+ \return Reversed value
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __REV16(uint32_t value)
+{
+ uint32_t result;
+
+ __ASM volatile ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
+ return(result);
+}
+
+
+/**
+ \brief Reverse byte order in signed short value
+ \details Reverses the byte order in a signed short value with sign extension to integer.
+ \param [in] value Value to reverse
+ \return Reversed value
+ */
+__attribute__((always_inline)) __STATIC_INLINE int32_t __REVSH(int32_t value)
+{
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
+ return (short)__builtin_bswap16(value);
+#else
+ int32_t result;
+
+ __ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
+ return(result);
+#endif
+}
+
+
+/**
+ \brief Rotate Right in unsigned value (32 bit)
+ \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
+ \param [in] value Value to rotate
+ \param [in] value Number of Bits to rotate
+ \return Rotated value
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
+{
+ return (op1 >> op2) | (op1 << (32U - op2));
+}
+
+
+/**
+ \brief Breakpoint
+ \details Causes the processor to enter Debug state.
+ Debug tools can use this to investigate system state when the instruction at a particular address is reached.
+ \param [in] value is ignored by the processor.
+ If required, a debugger can use it to store additional information about the breakpoint.
+ */
+#define __BKPT(value) __ASM volatile ("bkpt "#value)
+
+
+/**
+ \brief Reverse bit order of value
+ \details Reverses the bit order of the given value.
+ \param [in] value Value to reverse
+ \return Reversed value
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __RBIT(uint32_t value)
+{
+ uint32_t result;
+
+#if (__CORTEX_M >= 0x03U) || (__CORTEX_SC >= 300U)
+ __ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) );
+#else
+ int32_t s = 4 /*sizeof(v)*/ * 8 - 1; /* extra shift needed at end */
+
+ result = value; /* r will be reversed bits of v; first get LSB of v */
+ for (value >>= 1U; value; value >>= 1U)
+ {
+ result <<= 1U;
+ result |= value & 1U;
+ s--;
+ }
+ result <<= s; /* shift when v's highest bits are zero */
+#endif
+ return(result);
+}
+
+
+/**
+ \brief Count leading zeros
+ \details Counts the number of leading zeros of a data value.
+ \param [in] value Value to count the leading zeros
+ \return number of leading zeros in value
+ */
+#define __CLZ __builtin_clz
+
+
+#if (__CORTEX_M >= 0x03U) || (__CORTEX_SC >= 300U)
+
+/**
+ \brief LDR Exclusive (8 bit)
+ \details Executes a exclusive LDR instruction for 8 bit value.
+ \param [in] ptr Pointer to data
+ \return value of type uint8_t at (*ptr)
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint8_t __LDREXB(volatile uint8_t *addr)
+{
+ uint32_t result;
+
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
+ __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) );
+#else
+ /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
+ accepted by assembler. So has to use following less efficient pattern.
+ */
+ __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
+#endif
+ return ((uint8_t) result); /* Add explicit type cast here */
+}
+
+
+/**
+ \brief LDR Exclusive (16 bit)
+ \details Executes a exclusive LDR instruction for 16 bit values.
+ \param [in] ptr Pointer to data
+ \return value of type uint16_t at (*ptr)
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint16_t __LDREXH(volatile uint16_t *addr)
+{
+ uint32_t result;
+
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
+ __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) );
+#else
+ /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
+ accepted by assembler. So has to use following less efficient pattern.
+ */
+ __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
+#endif
+ return ((uint16_t) result); /* Add explicit type cast here */
+}
+
+
+/**
+ \brief LDR Exclusive (32 bit)
+ \details Executes a exclusive LDR instruction for 32 bit values.
+ \param [in] ptr Pointer to data
+ \return value of type uint32_t at (*ptr)
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __LDREXW(volatile uint32_t *addr)
+{
+ uint32_t result;
+
+ __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) );
+ return(result);
+}
+
+
+/**
+ \brief STR Exclusive (8 bit)
+ \details Executes a exclusive STR instruction for 8 bit values.
+ \param [in] value Value to store
+ \param [in] ptr Pointer to location
+ \return 0 Function succeeded
+ \return 1 Function failed
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr)
+{
+ uint32_t result;
+
+ __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
+ return(result);
+}
+
+
+/**
+ \brief STR Exclusive (16 bit)
+ \details Executes a exclusive STR instruction for 16 bit values.
+ \param [in] value Value to store
+ \param [in] ptr Pointer to location
+ \return 0 Function succeeded
+ \return 1 Function failed
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr)
+{
+ uint32_t result;
+
+ __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
+ return(result);
+}
+
+
+/**
+ \brief STR Exclusive (32 bit)
+ \details Executes a exclusive STR instruction for 32 bit values.
+ \param [in] value Value to store
+ \param [in] ptr Pointer to location
+ \return 0 Function succeeded
+ \return 1 Function failed
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr)
+{
+ uint32_t result;
+
+ __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) );
+ return(result);
+}
+
+
+/**
+ \brief Remove the exclusive lock
+ \details Removes the exclusive lock which is created by LDREX.
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __CLREX(void)
+{
+ __ASM volatile ("clrex" ::: "memory");
+}
+
+
+/**
+ \brief Signed Saturate
+ \details Saturates a signed value.
+ \param [in] value Value to be saturated
+ \param [in] sat Bit position to saturate to (1..32)
+ \return Saturated value
+ */
+#define __SSAT(ARG1,ARG2) \
+({ \
+ uint32_t __RES, __ARG1 = (ARG1); \
+ __ASM ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
+ __RES; \
+ })
+
+
+/**
+ \brief Unsigned Saturate
+ \details Saturates an unsigned value.
+ \param [in] value Value to be saturated
+ \param [in] sat Bit position to saturate to (0..31)
+ \return Saturated value
+ */
+#define __USAT(ARG1,ARG2) \
+({ \
+ uint32_t __RES, __ARG1 = (ARG1); \
+ __ASM ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
+ __RES; \
+ })
+
+
+/**
+ \brief Rotate Right with Extend (32 bit)
+ \details Moves each bit of a bitstring right by one bit.
+ The carry input is shifted in at the left end of the bitstring.
+ \param [in] value Value to rotate
+ \return Rotated value
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __RRX(uint32_t value)
+{
+ uint32_t result;
+
+ __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
+ return(result);
+}
+
+
+/**
+ \brief LDRT Unprivileged (8 bit)
+ \details Executes a Unprivileged LDRT instruction for 8 bit value.
+ \param [in] ptr Pointer to data
+ \return value of type uint8_t at (*ptr)
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint8_t __LDRBT(volatile uint8_t *addr)
+{
+ uint32_t result;
+
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
+ __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*addr) );
+#else
+ /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
+ accepted by assembler. So has to use following less efficient pattern.
+ */
+ __ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
+#endif
+ return ((uint8_t) result); /* Add explicit type cast here */
+}
+
+
+/**
+ \brief LDRT Unprivileged (16 bit)
+ \details Executes a Unprivileged LDRT instruction for 16 bit values.
+ \param [in] ptr Pointer to data
+ \return value of type uint16_t at (*ptr)
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint16_t __LDRHT(volatile uint16_t *addr)
+{
+ uint32_t result;
+
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
+ __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*addr) );
+#else
+ /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
+ accepted by assembler. So has to use following less efficient pattern.
+ */
+ __ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
+#endif
+ return ((uint16_t) result); /* Add explicit type cast here */
+}
+
+
+/**
+ \brief LDRT Unprivileged (32 bit)
+ \details Executes a Unprivileged LDRT instruction for 32 bit values.
+ \param [in] ptr Pointer to data
+ \return value of type uint32_t at (*ptr)
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __LDRT(volatile uint32_t *addr)
+{
+ uint32_t result;
+
+ __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*addr) );
+ return(result);
+}
+
+
+/**
+ \brief STRT Unprivileged (8 bit)
+ \details Executes a Unprivileged STRT instruction for 8 bit values.
+ \param [in] value Value to store
+ \param [in] ptr Pointer to location
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __STRBT(uint8_t value, volatile uint8_t *addr)
+{
+ __ASM volatile ("strbt %1, %0" : "=Q" (*addr) : "r" ((uint32_t)value) );
+}
+
+
+/**
+ \brief STRT Unprivileged (16 bit)
+ \details Executes a Unprivileged STRT instruction for 16 bit values.
+ \param [in] value Value to store
+ \param [in] ptr Pointer to location
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __STRHT(uint16_t value, volatile uint16_t *addr)
+{
+ __ASM volatile ("strht %1, %0" : "=Q" (*addr) : "r" ((uint32_t)value) );
+}
+
+
+/**
+ \brief STRT Unprivileged (32 bit)
+ \details Executes a Unprivileged STRT instruction for 32 bit values.
+ \param [in] value Value to store
+ \param [in] ptr Pointer to location
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __STRT(uint32_t value, volatile uint32_t *addr)
+{
+ __ASM volatile ("strt %1, %0" : "=Q" (*addr) : "r" (value) );
+}
+
+#endif /* (__CORTEX_M >= 0x03U) || (__CORTEX_SC >= 300U) */
+
+/*@}*/ /* end of group CMSIS_Core_InstructionInterface */
+
+
+/* ################### Compiler specific Intrinsics ########################### */
+/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
+ Access to dedicated SIMD instructions
+ @{
+*/
+
+#if (__CORTEX_M >= 0x04U) /* only for Cortex-M4 and above */
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
+{
+ uint32_t result;
+
+ __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
+ return(result);
+}
+
+#define __SSAT16(ARG1,ARG2) \
+({ \
+ int32_t __RES, __ARG1 = (ARG1); \
+ __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
+ __RES; \
+ })
+
+#define __USAT16(ARG1,ARG2) \
+({ \
+ uint32_t __RES, __ARG1 = (ARG1); \
+ __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
+ __RES; \
+ })
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1)
+{
+ uint32_t result;
+
+ __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1)
+{
+ uint32_t result;
+
+ __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
+{
+ uint32_t result;
+
+ __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
+{
+ uint32_t result;
+
+ __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc)
+{
+ union llreg_u{
+ uint32_t w32[2];
+ uint64_t w64;
+ } llr;
+ llr.w64 = acc;
+
+#ifndef __ARMEB__ /* Little endian */
+ __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
+#else /* Big endian */
+ __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
+#endif
+
+ return(llr.w64);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc)
+{
+ union llreg_u{
+ uint32_t w32[2];
+ uint64_t w64;
+ } llr;
+ llr.w64 = acc;
+
+#ifndef __ARMEB__ /* Little endian */
+ __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
+#else /* Big endian */
+ __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
+#endif
+
+ return(llr.w64);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
+{
+ uint32_t result;
+
+ __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
+{
+ uint32_t result;
+
+ __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc)
+{
+ union llreg_u{
+ uint32_t w32[2];
+ uint64_t w64;
+ } llr;
+ llr.w64 = acc;
+
+#ifndef __ARMEB__ /* Little endian */
+ __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
+#else /* Big endian */
+ __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
+#endif
+
+ return(llr.w64);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc)
+{
+ union llreg_u{
+ uint32_t w32[2];
+ uint64_t w64;
+ } llr;
+ llr.w64 = acc;
+
+#ifndef __ARMEB__ /* Little endian */
+ __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
+#else /* Big endian */
+ __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
+#endif
+
+ return(llr.w64);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SEL (uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE int32_t __QADD( int32_t op1, int32_t op2)
+{
+ int32_t result;
+
+ __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE int32_t __QSUB( int32_t op1, int32_t op2)
+{
+ int32_t result;
+
+ __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+#define __PKHBT(ARG1,ARG2,ARG3) \
+({ \
+ uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
+ __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
+ __RES; \
+ })
+
+#define __PKHTB(ARG1,ARG2,ARG3) \
+({ \
+ uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
+ if (ARG3 == 0) \
+ __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \
+ else \
+ __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
+ __RES; \
+ })
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
+{
+ int32_t result;
+
+ __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
+ return(result);
+}
+
+#endif /* (__CORTEX_M >= 0x04) */
+/*@} end of group CMSIS_SIMD_intrinsics */
+
+
+#if defined ( __GNUC__ )
+#pragma GCC diagnostic pop
+#endif
+
+#endif /* __CMSIS_GCC_H */
diff --git a/Libraries/CMSIS/Device/Include/core_cm0.h b/Libraries/CMSIS/Device/Include/core_cm0.h new file mode 100644 index 0000000..fdee521 --- /dev/null +++ b/Libraries/CMSIS/Device/Include/core_cm0.h @@ -0,0 +1,798 @@ +/**************************************************************************//**
+ * @file core_cm0.h
+ * @brief CMSIS Cortex-M0 Core Peripheral Access Layer Header File
+ * @version V4.30
+ * @date 20. October 2015
+ ******************************************************************************/
+/* Copyright (c) 2009 - 2015 ARM LIMITED
+
+ All rights reserved.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+ - Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ - Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ - Neither the name of ARM nor the names of its contributors may be used
+ to endorse or promote products derived from this software without
+ specific prior written permission.
+ *
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+ ---------------------------------------------------------------------------*/
+
+
+#if defined ( __ICCARM__ )
+ #pragma system_include /* treat file as system include file for MISRA check */
+#elif defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
+ #pragma clang system_header /* treat file as system include file */
+#endif
+
+#ifndef __CORE_CM0_H_GENERIC
+#define __CORE_CM0_H_GENERIC
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+/**
+ \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions
+ CMSIS violates the following MISRA-C:2004 rules:
+
+ \li Required Rule 8.5, object/function definition in header file.<br>
+ Function definitions in header files are used to allow 'inlining'.
+
+ \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.<br>
+ Unions are used for effective representation of core registers.
+
+ \li Advisory Rule 19.7, Function-like macro defined.<br>
+ Function-like macros are used to allow more efficient code.
+ */
+
+
+/*******************************************************************************
+ * CMSIS definitions
+ ******************************************************************************/
+/**
+ \ingroup Cortex_M0
+ @{
+ */
+
+/* CMSIS CM0 definitions */
+#define __CM0_CMSIS_VERSION_MAIN (0x04U) /*!< [31:16] CMSIS HAL main version */
+#define __CM0_CMSIS_VERSION_SUB (0x1EU) /*!< [15:0] CMSIS HAL sub version */
+#define __CM0_CMSIS_VERSION ((__CM0_CMSIS_VERSION_MAIN << 16U) | \
+ __CM0_CMSIS_VERSION_SUB ) /*!< CMSIS HAL version number */
+
+#define __CORTEX_M (0x00U) /*!< Cortex-M Core */
+
+
+#if defined ( __CC_ARM )
+ #define __ASM __asm /*!< asm keyword for ARM Compiler */
+ #define __INLINE __inline /*!< inline keyword for ARM Compiler */
+ #define __STATIC_INLINE static __inline
+
+#elif defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
+ #define __ASM __asm /*!< asm keyword for ARM Compiler */
+ #define __INLINE __inline /*!< inline keyword for ARM Compiler */
+ #define __STATIC_INLINE static __inline
+
+#elif defined ( __GNUC__ )
+ #define __ASM __asm /*!< asm keyword for GNU Compiler */
+ #define __INLINE inline /*!< inline keyword for GNU Compiler */
+ #define __STATIC_INLINE static inline
+
+#elif defined ( __ICCARM__ )
+ #define __ASM __asm /*!< asm keyword for IAR Compiler */
+ #define __INLINE inline /*!< inline keyword for IAR Compiler. Only available in High optimization mode! */
+ #define __STATIC_INLINE static inline
+
+#elif defined ( __TMS470__ )
+ #define __ASM __asm /*!< asm keyword for TI CCS Compiler */
+ #define __STATIC_INLINE static inline
+
+#elif defined ( __TASKING__ )
+ #define __ASM __asm /*!< asm keyword for TASKING Compiler */
+ #define __INLINE inline /*!< inline keyword for TASKING Compiler */
+ #define __STATIC_INLINE static inline
+
+#elif defined ( __CSMC__ )
+ #define __packed
+ #define __ASM _asm /*!< asm keyword for COSMIC Compiler */
+ #define __INLINE inline /*!< inline keyword for COSMIC Compiler. Use -pc99 on compile line */
+ #define __STATIC_INLINE static inline
+
+#else
+ #error Unknown compiler
+#endif
+
+/** __FPU_USED indicates whether an FPU is used or not.
+ This core does not support an FPU at all
+*/
+#define __FPU_USED 0U
+
+#if defined ( __CC_ARM )
+ #if defined __TARGET_FPU_VFP
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+ #endif
+
+#elif defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
+ #if defined __ARM_PCS_VFP
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+ #endif
+
+#elif defined ( __GNUC__ )
+ #if defined (__VFP_FP__) && !defined(__SOFTFP__)
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+ #endif
+
+#elif defined ( __ICCARM__ )
+ #if defined __ARMVFP__
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+ #endif
+
+#elif defined ( __TMS470__ )
+ #if defined __TI_VFP_SUPPORT__
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+ #endif
+
+#elif defined ( __TASKING__ )
+ #if defined __FPU_VFP__
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+ #endif
+
+#elif defined ( __CSMC__ )
+ #if ( __CSMC__ & 0x400U)
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+ #endif
+
+#endif
+
+#include "core_cmInstr.h" /* Core Instruction Access */
+#include "core_cmFunc.h" /* Core Function Access */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_CM0_H_GENERIC */
+
+#ifndef __CMSIS_GENERIC
+
+#ifndef __CORE_CM0_H_DEPENDANT
+#define __CORE_CM0_H_DEPENDANT
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+/* check device defines and use defaults */
+#if defined __CHECK_DEVICE_DEFINES
+ #ifndef __CM0_REV
+ #define __CM0_REV 0x0000U
+ #warning "__CM0_REV not defined in device header file; using default!"
+ #endif
+
+ #ifndef __NVIC_PRIO_BITS
+ #define __NVIC_PRIO_BITS 2U
+ #warning "__NVIC_PRIO_BITS not defined in device header file; using default!"
+ #endif
+
+ #ifndef __Vendor_SysTickConfig
+ #define __Vendor_SysTickConfig 0U
+ #warning "__Vendor_SysTickConfig not defined in device header file; using default!"
+ #endif
+#endif
+
+/* IO definitions (access restrictions to peripheral registers) */
+/**
+ \defgroup CMSIS_glob_defs CMSIS Global Defines
+
+ <strong>IO Type Qualifiers</strong> are used
+ \li to specify the access to peripheral variables.
+ \li for automatic generation of peripheral register debug information.
+*/
+#ifdef __cplusplus
+ #define __I volatile /*!< Defines 'read only' permissions */
+#else
+ #define __I volatile const /*!< Defines 'read only' permissions */
+#endif
+#define __O volatile /*!< Defines 'write only' permissions */
+#define __IO volatile /*!< Defines 'read / write' permissions */
+
+/* following defines should be used for structure members */
+#define __IM volatile const /*! Defines 'read only' structure member permissions */
+#define __OM volatile /*! Defines 'write only' structure member permissions */
+#define __IOM volatile /*! Defines 'read / write' structure member permissions */
+
+/*@} end of group Cortex_M0 */
+
+
+
+/*******************************************************************************
+ * Register Abstraction
+ Core Register contain:
+ - Core Register
+ - Core NVIC Register
+ - Core SCB Register
+ - Core SysTick Register
+ ******************************************************************************/
+/**
+ \defgroup CMSIS_core_register Defines and Type Definitions
+ \brief Type definitions and defines for Cortex-M processor based devices.
+*/
+
+/**
+ \ingroup CMSIS_core_register
+ \defgroup CMSIS_CORE Status and Control Registers
+ \brief Core Register type definitions.
+ @{
+ */
+
+/**
+ \brief Union type to access the Application Program Status Register (APSR).
+ */
+typedef union
+{
+ struct
+ {
+ uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */
+ uint32_t V:1; /*!< bit: 28 Overflow condition code flag */
+ uint32_t C:1; /*!< bit: 29 Carry condition code flag */
+ uint32_t Z:1; /*!< bit: 30 Zero condition code flag */
+ uint32_t N:1; /*!< bit: 31 Negative condition code flag */
+ } b; /*!< Structure used for bit access */
+ uint32_t w; /*!< Type used for word access */
+} APSR_Type;
+
+/* APSR Register Definitions */
+#define APSR_N_Pos 31U /*!< APSR: N Position */
+#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */
+
+#define APSR_Z_Pos 30U /*!< APSR: Z Position */
+#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */
+
+#define APSR_C_Pos 29U /*!< APSR: C Position */
+#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */
+
+#define APSR_V_Pos 28U /*!< APSR: V Position */
+#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */
+
+
+/**
+ \brief Union type to access the Interrupt Program Status Register (IPSR).
+ */
+typedef union
+{
+ struct
+ {
+ uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */
+ uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */
+ } b; /*!< Structure used for bit access */
+ uint32_t w; /*!< Type used for word access */
+} IPSR_Type;
+
+/* IPSR Register Definitions */
+#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */
+#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */
+
+
+/**
+ \brief Union type to access the Special-Purpose Program Status Registers (xPSR).
+ */
+typedef union
+{
+ struct
+ {
+ uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */
+ uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */
+ uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */
+ uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */
+ uint32_t V:1; /*!< bit: 28 Overflow condition code flag */
+ uint32_t C:1; /*!< bit: 29 Carry condition code flag */
+ uint32_t Z:1; /*!< bit: 30 Zero condition code flag */
+ uint32_t N:1; /*!< bit: 31 Negative condition code flag */
+ } b; /*!< Structure used for bit access */
+ uint32_t w; /*!< Type used for word access */
+} xPSR_Type;
+
+/* xPSR Register Definitions */
+#define xPSR_N_Pos 31U /*!< xPSR: N Position */
+#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */
+
+#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */
+#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */
+
+#define xPSR_C_Pos 29U /*!< xPSR: C Position */
+#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */
+
+#define xPSR_V_Pos 28U /*!< xPSR: V Position */
+#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */
+
+#define xPSR_T_Pos 24U /*!< xPSR: T Position */
+#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */
+
+#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */
+#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */
+
+
+/**
+ \brief Union type to access the Control Registers (CONTROL).
+ */
+typedef union
+{
+ struct
+ {
+ uint32_t _reserved0:1; /*!< bit: 0 Reserved */
+ uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */
+ uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */
+ } b; /*!< Structure used for bit access */
+ uint32_t w; /*!< Type used for word access */
+} CONTROL_Type;
+
+/* CONTROL Register Definitions */
+#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */
+#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */
+
+/*@} end of group CMSIS_CORE */
+
+
+/**
+ \ingroup CMSIS_core_register
+ \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC)
+ \brief Type definitions for the NVIC Registers
+ @{
+ */
+
+/**
+ \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC).
+ */
+typedef struct
+{
+ __IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */
+ uint32_t RESERVED0[31U];
+ __IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */
+ uint32_t RSERVED1[31U];
+ __IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */
+ uint32_t RESERVED2[31U];
+ __IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */
+ uint32_t RESERVED3[31U];
+ uint32_t RESERVED4[64U];
+ __IOM uint32_t IP[8U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */
+} NVIC_Type;
+
+/*@} end of group CMSIS_NVIC */
+
+
+/**
+ \ingroup CMSIS_core_register
+ \defgroup CMSIS_SCB System Control Block (SCB)
+ \brief Type definitions for the System Control Block Registers
+ @{
+ */
+
+/**
+ \brief Structure type to access the System Control Block (SCB).
+ */
+typedef struct
+{
+ __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */
+ __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */
+ uint32_t RESERVED0;
+ __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */
+ __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */
+ __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */
+ uint32_t RESERVED1;
+ __IOM uint32_t SHP[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */
+ __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */
+} SCB_Type;
+
+/* SCB CPUID Register Definitions */
+#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */
+#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */
+
+#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */
+#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */
+
+#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */
+#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */
+
+#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */
+#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */
+
+#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */
+#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */
+
+/* SCB Interrupt Control State Register Definitions */
+#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */
+#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */
+
+#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */
+#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */
+
+#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */
+#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */
+
+#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */
+#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */
+
+#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */
+#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */
+
+#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */
+#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */
+
+#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */
+#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */
+
+#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */
+#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */
+
+#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */
+#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */
+
+/* SCB Application Interrupt and Reset Control Register Definitions */
+#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */
+#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */
+
+#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */
+#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */
+
+#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */
+#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */
+
+#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */
+#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */
+
+#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */
+#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */
+
+/* SCB System Control Register Definitions */
+#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */
+#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */
+
+#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */
+#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */
+
+#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */
+#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */
+
+/* SCB Configuration Control Register Definitions */
+#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */
+#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */
+
+#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */
+#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */
+
+/* SCB System Handler Control and State Register Definitions */
+#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */
+#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */
+
+/*@} end of group CMSIS_SCB */
+
+
+/**
+ \ingroup CMSIS_core_register
+ \defgroup CMSIS_SysTick System Tick Timer (SysTick)
+ \brief Type definitions for the System Timer Registers.
+ @{
+ */
+
+/**
+ \brief Structure type to access the System Timer (SysTick).
+ */
+typedef struct
+{
+ __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */
+ __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */
+ __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */
+ __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */
+} SysTick_Type;
+
+/* SysTick Control / Status Register Definitions */
+#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */
+#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */
+
+#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */
+#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */
+
+#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */
+#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */
+
+#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */
+#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */
+
+/* SysTick Reload Register Definitions */
+#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */
+#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */
+
+/* SysTick Current Register Definitions */
+#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */
+#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */
+
+/* SysTick Calibration Register Definitions */
+#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */
+#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */
+
+#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */
+#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */
+
+#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */
+#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */
+
+/*@} end of group CMSIS_SysTick */
+
+
+/**
+ \ingroup CMSIS_core_register
+ \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug)
+ \brief Cortex-M0 Core Debug Registers (DCB registers, SHCSR, and DFSR) are only accessible over DAP and not via processor.
+ Therefore they are not covered by the Cortex-M0 header file.
+ @{
+ */
+/*@} end of group CMSIS_CoreDebug */
+
+
+/**
+ \ingroup CMSIS_core_register
+ \defgroup CMSIS_core_bitfield Core register bit field macros
+ \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk).
+ @{
+ */
+
+/**
+ \brief Mask and shift a bit field value for use in a register bit range.
+ \param[in] field Name of the register bit field.
+ \param[in] value Value of the bit field.
+ \return Masked and shifted value.
+*/
+#define _VAL2FLD(field, value) ((value << field ## _Pos) & field ## _Msk)
+
+/**
+ \brief Mask and shift a register value to extract a bit filed value.
+ \param[in] field Name of the register bit field.
+ \param[in] value Value of register.
+ \return Masked and shifted bit field value.
+*/
+#define _FLD2VAL(field, value) ((value & field ## _Msk) >> field ## _Pos)
+
+/*@} end of group CMSIS_core_bitfield */
+
+
+/**
+ \ingroup CMSIS_core_register
+ \defgroup CMSIS_core_base Core Definitions
+ \brief Definitions for base addresses, unions, and structures.
+ @{
+ */
+
+/* Memory mapping of Cortex-M0 Hardware */
+#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */
+#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */
+#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */
+#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */
+
+#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */
+#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */
+#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */
+
+
+/*@} */
+
+
+
+/*******************************************************************************
+ * Hardware Abstraction Layer
+ Core Function Interface contains:
+ - Core NVIC Functions
+ - Core SysTick Functions
+ - Core Register Access Functions
+ ******************************************************************************/
+/**
+ \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference
+*/
+
+
+
+/* ########################## NVIC functions #################################### */
+/**
+ \ingroup CMSIS_Core_FunctionInterface
+ \defgroup CMSIS_Core_NVICFunctions NVIC Functions
+ \brief Functions that manage interrupts and exceptions via the NVIC.
+ @{
+ */
+
+/* Interrupt Priorities are WORD accessible only under ARMv6M */
+/* The following MACROS handle generation of the register offset and byte masks */
+#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL)
+#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) )
+#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) )
+
+
+/**
+ \brief Enable External Interrupt
+ \details Enables a device-specific interrupt in the NVIC interrupt controller.
+ \param [in] IRQn External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void NVIC_EnableIRQ(IRQn_Type IRQn)
+{
+ NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));
+}
+
+
+/**
+ \brief Disable External Interrupt
+ \details Disables a device-specific interrupt in the NVIC interrupt controller.
+ \param [in] IRQn External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void NVIC_DisableIRQ(IRQn_Type IRQn)
+{
+ NVIC->ICER[0U] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));
+}
+
+
+/**
+ \brief Get Pending Interrupt
+ \details Reads the pending register in the NVIC and returns the pending bit for the specified interrupt.
+ \param [in] IRQn Interrupt number.
+ \return 0 Interrupt status is not pending.
+ \return 1 Interrupt status is pending.
+ */
+__STATIC_INLINE uint32_t NVIC_GetPendingIRQ(IRQn_Type IRQn)
+{
+ return((uint32_t)(((NVIC->ISPR[0U] & (1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL));
+}
+
+
+/**
+ \brief Set Pending Interrupt
+ \details Sets the pending bit of an external interrupt.
+ \param [in] IRQn Interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void NVIC_SetPendingIRQ(IRQn_Type IRQn)
+{
+ NVIC->ISPR[0U] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));
+}
+
+
+/**
+ \brief Clear Pending Interrupt
+ \details Clears the pending bit of an external interrupt.
+ \param [in] IRQn External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void NVIC_ClearPendingIRQ(IRQn_Type IRQn)
+{
+ NVIC->ICPR[0U] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));
+}
+
+
+/**
+ \brief Set Interrupt Priority
+ \details Sets the priority of an interrupt.
+ \note The priority cannot be set for every core interrupt.
+ \param [in] IRQn Interrupt number.
+ \param [in] priority Priority to set.
+ */
+__STATIC_INLINE void NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority)
+{
+ if ((int32_t)(IRQn) < 0)
+ {
+ SCB->SHP[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHP[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) |
+ (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn)));
+ }
+ else
+ {
+ NVIC->IP[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IP[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) |
+ (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn)));
+ }
+}
+
+
+/**
+ \brief Get Interrupt Priority
+ \details Reads the priority of an interrupt.
+ The interrupt number can be positive to specify an external (device specific) interrupt,
+ or negative to specify an internal (core) interrupt.
+ \param [in] IRQn Interrupt number.
+ \return Interrupt Priority.
+ Value is aligned automatically to the implemented priority bits of the microcontroller.
+ */
+__STATIC_INLINE uint32_t NVIC_GetPriority(IRQn_Type IRQn)
+{
+
+ if ((int32_t)(IRQn) < 0)
+ {
+ return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS)));
+ }
+ else
+ {
+ return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS)));
+ }
+}
+
+
+/**
+ \brief System Reset
+ \details Initiates a system reset request to reset the MCU.
+ */
+__STATIC_INLINE void NVIC_SystemReset(void)
+{
+ __DSB(); /* Ensure all outstanding memory accesses included
+ buffered write are completed before reset */
+ SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) |
+ SCB_AIRCR_SYSRESETREQ_Msk);
+ __DSB(); /* Ensure completion of memory access */
+
+ for(;;) /* wait until reset */
+ {
+ __NOP();
+ }
+}
+
+/*@} end of CMSIS_Core_NVICFunctions */
+
+
+
+/* ################################## SysTick function ############################################ */
+/**
+ \ingroup CMSIS_Core_FunctionInterface
+ \defgroup CMSIS_Core_SysTickFunctions SysTick Functions
+ \brief Functions that configure the System.
+ @{
+ */
+
+#if (__Vendor_SysTickConfig == 0U)
+
+/**
+ \brief System Tick Configuration
+ \details Initializes the System Timer and its interrupt, and starts the System Tick Timer.
+ Counter is in free running mode to generate periodic interrupts.
+ \param [in] ticks Number of ticks between two interrupts.
+ \return 0 Function succeeded.
+ \return 1 Function failed.
+ \note When the variable <b>__Vendor_SysTickConfig</b> is set to 1, then the
+ function <b>SysTick_Config</b> is not included. In this case, the file <b><i>device</i>.h</b>
+ must contain a vendor-specific implementation of this function.
+ */
+__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks)
+{
+ if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk)
+ {
+ return (1UL); /* Reload value impossible */
+ }
+
+ SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */
+ NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */
+ SysTick->VAL = 0UL; /* Load the SysTick Counter Value */
+ SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk |
+ SysTick_CTRL_TICKINT_Msk |
+ SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */
+ return (0UL); /* Function successful */
+}
+
+#endif
+
+/*@} end of CMSIS_Core_SysTickFunctions */
+
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_CM0_H_DEPENDANT */
+
+#endif /* __CMSIS_GENERIC */
diff --git a/Libraries/CMSIS/Device/Include/core_cmFunc.h b/Libraries/CMSIS/Device/Include/core_cmFunc.h new file mode 100644 index 0000000..ca319a5 --- /dev/null +++ b/Libraries/CMSIS/Device/Include/core_cmFunc.h @@ -0,0 +1,87 @@ +/**************************************************************************//**
+ * @file core_cmFunc.h
+ * @brief CMSIS Cortex-M Core Function Access Header File
+ * @version V4.30
+ * @date 20. October 2015
+ ******************************************************************************/
+/* Copyright (c) 2009 - 2015 ARM LIMITED
+
+ All rights reserved.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+ - Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ - Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ - Neither the name of ARM nor the names of its contributors may be used
+ to endorse or promote products derived from this software without
+ specific prior written permission.
+ *
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+ ---------------------------------------------------------------------------*/
+
+
+#if defined ( __ICCARM__ )
+ #pragma system_include /* treat file as system include file for MISRA check */
+#elif defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
+ #pragma clang system_header /* treat file as system include file */
+#endif
+
+#ifndef __CORE_CMFUNC_H
+#define __CORE_CMFUNC_H
+
+
+/* ########################### Core Function Access ########################### */
+/** \ingroup CMSIS_Core_FunctionInterface
+ \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions
+ @{
+*/
+
+/*------------------ RealView Compiler -----------------*/
+#if defined ( __CC_ARM )
+ #include "cmsis_armcc.h"
+
+/*------------------ ARM Compiler V6 -------------------*/
+#elif defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
+ #include "cmsis_armcc_V6.h"
+
+/*------------------ GNU Compiler ----------------------*/
+#elif defined ( __GNUC__ )
+ #include "cmsis_gcc.h"
+
+/*------------------ ICC Compiler ----------------------*/
+#elif defined ( __ICCARM__ )
+ #include <cmsis_iar.h>
+
+/*------------------ TI CCS Compiler -------------------*/
+#elif defined ( __TMS470__ )
+ #include <cmsis_ccs.h>
+
+/*------------------ TASKING Compiler ------------------*/
+#elif defined ( __TASKING__ )
+ /*
+ * The CMSIS functions have been implemented as intrinsics in the compiler.
+ * Please use "carm -?i" to get an up to date list of all intrinsics,
+ * Including the CMSIS ones.
+ */
+
+/*------------------ COSMIC Compiler -------------------*/
+#elif defined ( __CSMC__ )
+ #include <cmsis_csm.h>
+
+#endif
+
+/*@} end of CMSIS_Core_RegAccFunctions */
+
+#endif /* __CORE_CMFUNC_H */
diff --git a/Libraries/CMSIS/Device/Include/core_cmInstr.h b/Libraries/CMSIS/Device/Include/core_cmInstr.h new file mode 100644 index 0000000..a0a5064 --- /dev/null +++ b/Libraries/CMSIS/Device/Include/core_cmInstr.h @@ -0,0 +1,87 @@ +/**************************************************************************//**
+ * @file core_cmInstr.h
+ * @brief CMSIS Cortex-M Core Instruction Access Header File
+ * @version V4.30
+ * @date 20. October 2015
+ ******************************************************************************/
+/* Copyright (c) 2009 - 2015 ARM LIMITED
+
+ All rights reserved.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+ - Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ - Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ - Neither the name of ARM nor the names of its contributors may be used
+ to endorse or promote products derived from this software without
+ specific prior written permission.
+ *
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+ ---------------------------------------------------------------------------*/
+
+
+#if defined ( __ICCARM__ )
+ #pragma system_include /* treat file as system include file for MISRA check */
+#elif defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
+ #pragma clang system_header /* treat file as system include file */
+#endif
+
+#ifndef __CORE_CMINSTR_H
+#define __CORE_CMINSTR_H
+
+
+/* ########################## Core Instruction Access ######################### */
+/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface
+ Access to dedicated instructions
+ @{
+*/
+
+/*------------------ RealView Compiler -----------------*/
+#if defined ( __CC_ARM )
+ #include "cmsis_armcc.h"
+
+/*------------------ ARM Compiler V6 -------------------*/
+#elif defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
+ #include "cmsis_armcc_V6.h"
+
+/*------------------ GNU Compiler ----------------------*/
+#elif defined ( __GNUC__ )
+ #include "cmsis_gcc.h"
+
+/*------------------ ICC Compiler ----------------------*/
+#elif defined ( __ICCARM__ )
+ #include <cmsis_iar.h>
+
+/*------------------ TI CCS Compiler -------------------*/
+#elif defined ( __TMS470__ )
+ #include <cmsis_ccs.h>
+
+/*------------------ TASKING Compiler ------------------*/
+#elif defined ( __TASKING__ )
+ /*
+ * The CMSIS functions have been implemented as intrinsics in the compiler.
+ * Please use "carm -?i" to get an up to date list of all intrinsics,
+ * Including the CMSIS ones.
+ */
+
+/*------------------ COSMIC Compiler -------------------*/
+#elif defined ( __CSMC__ )
+ #include <cmsis_csm.h>
+
+#endif
+
+/*@}*/ /* end of group CMSIS_Core_InstructionInterface */
+
+#endif /* __CORE_CMINSTR_H */
diff --git a/Libraries/Head_Task/head_task.c b/Libraries/Head_Task/head_task.c index ff70a9c..7df305d 100644 --- a/Libraries/Head_Task/head_task.c +++ b/Libraries/Head_Task/head_task.c @@ -11,6 +11,7 @@ #include "indicate_modes_task.h" #include "nixie_driver_process.h" #include "button_handler.h" +#include "stm32f0xx_rtc.h" // Номера функций меню diff --git a/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_adc.c b/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_adc.c index 0a8c56d..cd8bdbf 100644 --- a/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_adc.c +++ b/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_adc.c @@ -75,6 +75,7 @@ /* Includes ------------------------------------------------------------------*/ #include "stm32f0xx_adc.h" #include "stm32f0xx_rcc.h" +#include "stm32f0xx_conf.h" /** @addtogroup STM32F0xx_StdPeriph_Driver * @{ diff --git a/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_gpio.c b/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_gpio.c index 880508c..8b0012d 100644 --- a/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_gpio.c +++ b/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_gpio.c @@ -75,6 +75,7 @@ /* Includes ------------------------------------------------------------------*/ #include "stm32f0xx_gpio.h" #include "stm32f0xx_rcc.h" +#include "stm32f0xx_conf.h" /** @addtogroup STM32F0xx_StdPeriph_Driver * @{ diff --git a/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_misc.c b/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_misc.c index 1ace673..9308dc0 100644 --- a/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_misc.c +++ b/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_misc.c @@ -28,6 +28,7 @@ /* Includes ------------------------------------------------------------------*/ #include "stm32f0xx_misc.h" +#include "stm32f0xx_conf.h" /** @addtogroup STM32F0xx_StdPeriph_Driver * @{ diff --git a/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_pwr.c b/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_pwr.c index 7c13ce4..4893434 100644 --- a/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_pwr.c +++ b/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_pwr.c @@ -35,6 +35,7 @@ /* Includes ------------------------------------------------------------------*/ #include "stm32f0xx_pwr.h" #include "stm32f0xx_rcc.h" +#include "stm32f0xx_conf.h" /** @addtogroup STM32F0xx_StdPeriph_Driver * @{ diff --git a/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_rcc.c b/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_rcc.c index ef8d5fd..1394d19 100644 --- a/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_rcc.c +++ b/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_rcc.c @@ -56,6 +56,7 @@ /* Includes ------------------------------------------------------------------*/ #include "stm32f0xx_rcc.h" +#include "stm32f0xx_conf.h" /** @addtogroup STM32F0xx_StdPeriph_Driver * @{ diff --git a/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_rtc.c b/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_rtc.c index 58b9978..f0481d1 100644 --- a/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_rtc.c +++ b/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_rtc.c @@ -232,6 +232,7 @@ /* Includes ------------------------------------------------------------------*/ #include "stm32f0xx_rtc.h" +#include "stm32f0xx_conf.h" /** @addtogroup STM32F0xx_StdPeriph_Driver * @{ diff --git a/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_spi.c b/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_spi.c index e0c148b..3acb886 100644 --- a/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_spi.c +++ b/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_spi.c @@ -93,6 +93,7 @@ /* Includes ------------------------------------------------------------------*/ #include "stm32f0xx_spi.h" #include "stm32f0xx_rcc.h" +#include "stm32f0xx_conf.h" /** @addtogroup STM32F0xx_StdPeriph_Driver * @{ diff --git a/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_tim.c b/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_tim.c index f7b4287..0a207d1 100644 --- a/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_tim.c +++ b/Libraries/STM32F0xx_StdPeriph_Driver/src/stm32f0xx_tim.c @@ -116,6 +116,7 @@ /* Includes ------------------------------------------------------------------*/ #include "stm32f0xx_tim.h" #include "stm32f0xx_rcc.h" +#include "stm32f0xx_conf.h" /** @addtogroup STM32F0xx_StdPeriph_Driver * @{ diff --git a/Libraries/TouchSense/STMTouch_Driver/inc/tsl_time_stm32f0xx.h b/Libraries/TouchSense/STMTouch_Driver/inc/tsl_time_stm32f0xx.h index 363044d..6216974 100644 --- a/Libraries/TouchSense/STMTouch_Driver/inc/tsl_time_stm32f0xx.h +++ b/Libraries/TouchSense/STMTouch_Driver/inc/tsl_time_stm32f0xx.h @@ -31,6 +31,7 @@ /* Includes ------------------------------------------------------------------*/ #include "stm32f0xx.h" +#include "stm32f0xx_rcc.h" #include "tsl_conf_stm32f0xx.h" #include "tsl_types.h" diff --git a/Libraries/TouchSense/STMTouch_Driver/src/tsl_acq.c b/Libraries/TouchSense/STMTouch_Driver/src/tsl_acq.c index 45a9a10..85156ec 100644 --- a/Libraries/TouchSense/STMTouch_Driver/src/tsl_acq.c +++ b/Libraries/TouchSense/STMTouch_Driver/src/tsl_acq.c @@ -28,6 +28,7 @@ /* Includes ------------------------------------------------------------------*/ #include "tsl_acq.h" #include "tsl_globals.h" +#include "stm32f0xx_conf.h" /* Private typedefs ----------------------------------------------------------*/ /* Private defines -----------------------------------------------------------*/ diff --git a/Libraries/TouchSense/STMTouch_Driver/src/tsl_acq_stm32f0xx.c b/Libraries/TouchSense/STMTouch_Driver/src/tsl_acq_stm32f0xx.c index a0967a9..24271de 100644 --- a/Libraries/TouchSense/STMTouch_Driver/src/tsl_acq_stm32f0xx.c +++ b/Libraries/TouchSense/STMTouch_Driver/src/tsl_acq_stm32f0xx.c @@ -30,6 +30,10 @@ #include "tsl_acq_stm32f0xx.h" #include "tsl_globals.h" #include "stm32f0xx_it.h" +#include "stm32f0xx_gpio.h" +#include "stm32f0xx_conf.h" +#include "stm32f0xx_misc.h" +#include "stm32f0xx_rtc.h" /* Private typedefs ----------------------------------------------------------*/ /* Private defines -----------------------------------------------------------*/ diff --git a/Libraries/TouchSense/STMTouch_Driver/src/tsl_ecs.c b/Libraries/TouchSense/STMTouch_Driver/src/tsl_ecs.c index ae5e50a..39052fd 100644 --- a/Libraries/TouchSense/STMTouch_Driver/src/tsl_ecs.c +++ b/Libraries/TouchSense/STMTouch_Driver/src/tsl_ecs.c @@ -28,6 +28,7 @@ /* Includes ------------------------------------------------------------------*/ #include "tsl_ecs.h" #include "tsl_globals.h" +#include "stm32f0xx_conf.h" /* Private typedefs ----------------------------------------------------------*/ /* Private defines -----------------------------------------------------------*/ diff --git a/Libraries/TouchSense/STMTouch_Driver/src/tsl_time_stm32f0xx.c b/Libraries/TouchSense/STMTouch_Driver/src/tsl_time_stm32f0xx.c index c44bb34..4db154a 100644 --- a/Libraries/TouchSense/STMTouch_Driver/src/tsl_time_stm32f0xx.c +++ b/Libraries/TouchSense/STMTouch_Driver/src/tsl_time_stm32f0xx.c @@ -27,6 +27,9 @@ /* Includes ------------------------------------------------------------------*/ #include "tsl_time_stm32f0xx.h" +#include "stm32f0xx_misc.h" +#include "stm32f0xx_rtc.h" +#include "stm32f0xx_tim.h" /* Private typedefs ----------------------------------------------------------*/ /* Private defines -----------------------------------------------------------*/ |