Skip to content

Commit

Permalink
[fix] Add partial support for XTENSA processors (esp32 et al)
Browse files Browse the repository at this point in the history
  • Loading branch information
biojppm committed May 9, 2023
1 parent a63975e commit fa53042
Show file tree
Hide file tree
Showing 5 changed files with 81 additions and 64 deletions.
2 changes: 2 additions & 0 deletions changelog/current.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,14 @@
- `C4_RTTI`: defined when rtti is enabled
- `C4_IF_RTTI(code_with_rtti, code_without_rtti)`: select statements for rtti enabled/disabled
- `C4_IF_RTTI_(code_with_rtti, code_without_rtti)`: select code tokens for rtti enabled/disabled
- Add partial support for XTENSA processors (missing implementation of `c4::aalloc()`) ([PR#108](https://github.com/biojppm/c4core/pull/108)). See [rapidyaml#358](https://github.com/biojppm/rapidyaml/issues/358).


### Fixes

- [PR#115](https://github.com/biojppm/c4core/pull/115) - Refactor of `c4::blob`/`c4::cblob`. Use SFINAE to invalidate some of the constructors.
- [PR#110](https://github.com/biojppm/c4core/pull/110)/[PR#107](https://github.com/biojppm/c4core/pull/107) - Update fast_float.
- [PR#108](https://github.com/biojppm/c4core/pull/108) - Fix preprocessor pasting of strings in `C4_NOT_IMPLEMENTED_MSG()` and `C4_NOT_IMPLEMENTED_IF_MSG()`.
- [PR#106](https://github.com/biojppm/c4core/pull/106) - Fix include guard in the gcc 4.8 compatibility header, causing it to be missing from the amalgamated header.
- [PR#105](https://github.com/biojppm/c4core/pull/105) - Fix existing throw in `c4/ext/sg14/inplace_function.h`. Ensure tests run with exceptions disabled and RTTI disabled. Add examples of exceptional control flow with `setjmp()/std::longjmp()`.
- [PR#104](https://github.com/biojppm/c4core/pull/104)/[PR#112](https://github.com/biojppm/c4core/pull/112) - Fix pedantic warnings in gcc, clang and MSVC
Expand Down
132 changes: 71 additions & 61 deletions src/c4/cpu.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,126 +10,136 @@
// see http://code.qt.io/cgit/qt/qtbase.git/tree/src/corelib/global/qprocessordetection.h

#ifdef __ORDER_LITTLE_ENDIAN__
#define _C4EL __ORDER_LITTLE_ENDIAN__
# define _C4EL __ORDER_LITTLE_ENDIAN__
#else
#define _C4EL 1234
# define _C4EL 1234
#endif

#ifdef __ORDER_BIG_ENDIAN__
#define _C4EB __ORDER_BIG_ENDIAN__
# define _C4EB __ORDER_BIG_ENDIAN__
#else
#define _C4EB 4321
# define _C4EB 4321
#endif

// mixed byte order (eg, PowerPC or ia64)
#define _C4EM 1111

#if defined(__x86_64) || defined(__x86_64__) || defined(__amd64) || defined(_M_X64)
#define C4_CPU_X86_64
#define C4_WORDSIZE 8
#define C4_BYTE_ORDER _C4EL
# define C4_CPU_X86_64
# define C4_WORDSIZE 8
# define C4_BYTE_ORDER _C4EL

#elif defined(__i386) || defined(__i386__) || defined(_M_IX86)
#define C4_CPU_X86
#define C4_WORDSIZE 4
#define C4_BYTE_ORDER _C4EL
# define C4_CPU_X86
# define C4_WORDSIZE 4
# define C4_BYTE_ORDER _C4EL

#elif defined(__arm__) || defined(_M_ARM) \
|| defined(__TARGET_ARCH_ARM) || defined(__aarch64__) || defined(_M_ARM64)
#if defined(__aarch64__) || defined(_M_ARM64)
#define C4_CPU_ARM64
#define C4_CPU_ARMV8
#define C4_WORDSIZE 8
#else
#define C4_CPU_ARM
#define C4_WORDSIZE 4
#if defined(__ARM_ARCH_8__) || defined(__ARM_ARCH_8A__) \
# if defined(__aarch64__) || defined(_M_ARM64)
# define C4_CPU_ARM64
# define C4_CPU_ARMV8
# define C4_WORDSIZE 8
# else
# define C4_CPU_ARM
# define C4_WORDSIZE 4
# if defined(__ARM_ARCH_8__) || defined(__ARM_ARCH_8A__) \
|| (defined(__ARCH_ARM) && __ARCH_ARM >= 8)
|| (defined(__TARGET_ARCH_ARM) && __TARGET_ARCH_ARM >= 8) \
#define C4_CPU_ARMV8
#elif defined(__ARM_ARCH_7__) || defined(_ARM_ARCH_7) \
# define C4_CPU_ARMV8
# elif defined(__ARM_ARCH_7__) || defined(_ARM_ARCH_7) \
|| defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) \
|| defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) \
|| defined(__ARM_ARCH_7EM__) \
|| (defined(__TARGET_ARCH_ARM) && __TARGET_ARCH_ARM >= 7) \
|| (defined(_M_ARM) && _M_ARM >= 7)
#define C4_CPU_ARMV7
#elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
# define C4_CPU_ARMV7
# elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
|| defined(__ARM_ARCH_6T2__) || defined(__ARM_ARCH_6Z__) \
|| defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6ZK__) \
|| defined(__ARM_ARCH_6M__) || defined(__ARM_ARCH_6KZ__) \
|| (defined(__TARGET_ARCH_ARM) && __TARGET_ARCH_ARM >= 6)
#define C4_CPU_ARMV6
#elif defined(__ARM_ARCH_5TEJ__) \
# define C4_CPU_ARMV6
# elif defined(__ARM_ARCH_5TEJ__) \
|| defined(__ARM_ARCH_5TE__) \
|| (defined(__TARGET_ARCH_ARM) && __TARGET_ARCH_ARM >= 5)
#define C4_CPU_ARMV5
#elif defined(__ARM_ARCH_4T__) \
# define C4_CPU_ARMV5
# elif defined(__ARM_ARCH_4T__) \
|| (defined(__TARGET_ARCH_ARM) && __TARGET_ARCH_ARM >= 4)
#define C4_CPU_ARMV4
#else
#error "unknown CPU architecture: ARM"
#endif
#endif
#if defined(__ARMEL__) || defined(__LITTLE_ENDIAN__) || defined(__AARCH64EL__) \
# define C4_CPU_ARMV4
# else
# error "unknown CPU architecture: ARM"
# endif
# endif
# if defined(__ARMEL__) || defined(__LITTLE_ENDIAN__) || defined(__AARCH64EL__) \
|| (defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) \
|| defined(_MSC_VER) // winarm64 does not provide any of the above macros,
// but advises little-endianess:
// https://docs.microsoft.com/en-us/cpp/build/overview-of-arm-abi-conventions?view=msvc-170
// So if it is visual studio compiling, we'll assume little endian.
#define C4_BYTE_ORDER _C4EL
#elif defined(__ARMEB__) || defined(__BIG_ENDIAN__) || defined(__AARCH64EB__) \
# define C4_BYTE_ORDER _C4EL
# elif defined(__ARMEB__) || defined(__BIG_ENDIAN__) || defined(__AARCH64EB__) \
|| (defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))
#define C4_BYTE_ORDER _C4EB
#elif defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_PDP_ENDIAN__)
#define C4_BYTE_ORDER _C4EM
#else
#error "unknown endianness"
#endif
# define C4_BYTE_ORDER _C4EB
# elif defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_PDP_ENDIAN__)
# define C4_BYTE_ORDER _C4EM
# else
# error "unknown endianness"
# endif

#elif defined(__ia64) || defined(__ia64__) || defined(_M_IA64)
#define C4_CPU_IA64
#define C4_WORDSIZE 8
#define C4_BYTE_ORDER _C4EM
# define C4_CPU_IA64
# define C4_WORDSIZE 8
# define C4_BYTE_ORDER _C4EM
// itanium is bi-endian - check byte order below

#elif defined(__ppc__) || defined(__ppc) || defined(__powerpc__) \
|| defined(_ARCH_COM) || defined(_ARCH_PWR) || defined(_ARCH_PPC) \
|| defined(_M_MPPC) || defined(_M_PPC)
#if defined(__ppc64__) || defined(__powerpc64__) || defined(__64BIT__)
#define C4_CPU_PPC64
#define C4_WORDSIZE 8
#else
#define C4_CPU_PPC
#define C4_WORDSIZE 4
#endif
#define C4_BYTE_ORDER _C4EM
# if defined(__ppc64__) || defined(__powerpc64__) || defined(__64BIT__)
# define C4_CPU_PPC64
# define C4_WORDSIZE 8
# else
# define C4_CPU_PPC
# define C4_WORDSIZE 4
# endif
# define C4_BYTE_ORDER _C4EM
// ppc is bi-endian - check byte order below

#elif defined(__s390x__) || defined(__zarch__) || defined(__SYSC_ZARCH_)
# define C4_CPU_S390_X
# define C4_WORDSIZE 8
# define C4_BYTE_ORDER _C4EB

#elif defined(__xtensa__) || defined(__XTENSA__)
# define C4_CPU_XTENSA
# define C4_WORDSIZE 4
// not sure about this...
# if defined(__XTENSA_EL__) || defined(__xtensa_el__)
# define C4_BYTE_ORDER _C4EL
# else
# define C4_BYTE_ORDER _C4EB
# endif

#elif defined(__riscv)
#if __riscv_xlen == 64
#define C4_CPU_RISCV64
#define C4_WORDSIZE 8
#else
#define C4_CPU_RISCV32
#define C4_WORDSIZE 4
#endif
#define C4_BYTE_ORDER _C4EL
# if __riscv_xlen == 64
# define C4_CPU_RISCV64
# define C4_WORDSIZE 8
# else
# define C4_CPU_RISCV32
# define C4_WORDSIZE 4
# endif
# define C4_BYTE_ORDER _C4EL

#elif defined(__EMSCRIPTEN__)
# define C4_BYTE_ORDER _C4EL
# define C4_WORDSIZE 4

#elif defined(SWIG)
#error "please define CPU architecture macros when compiling with swig"
# error "please define CPU architecture macros when compiling with swig"

#else
#error "unknown CPU architecture"
# error "unknown CPU architecture"
#endif

#define C4_LITTLE_ENDIAN (C4_BYTE_ORDER == _C4EL)
Expand Down
6 changes: 3 additions & 3 deletions src/c4/error.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -346,12 +346,12 @@ struct srcloc
// Common error conditions

#define C4_NOT_IMPLEMENTED() C4_ERROR("NOT IMPLEMENTED")
#define C4_NOT_IMPLEMENTED_MSG(/*msg, */...) C4_ERROR("NOT IMPLEMENTED: " ## __VA_ARGS__)
#define C4_NOT_IMPLEMENTED_MSG(/*msg, */...) C4_ERROR("NOT IMPLEMENTED: " __VA_ARGS__)
#define C4_NOT_IMPLEMENTED_IF(condition) do { if(C4_UNLIKELY(condition)) { C4_ERROR("NOT IMPLEMENTED"); } } while(0)
#define C4_NOT_IMPLEMENTED_IF_MSG(condition, /*msg, */...) do { if(C4_UNLIKELY(condition)) { C4_ERROR("NOT IMPLEMENTED: " ## __VA_ARGS__); } } while(0)
#define C4_NOT_IMPLEMENTED_IF_MSG(condition, /*msg, */...) do { if(C4_UNLIKELY(condition)) { C4_ERROR("NOT IMPLEMENTED: " __VA_ARGS__); } } while(0)

#define C4_NEVER_REACH() do { C4_ERROR("never reach this point"); C4_UNREACHABLE(); } while(0)
#define C4_NEVER_REACH_MSG(/*msg, */...) do { C4_ERROR("never reach this point: " ## __VA_ARGS__); C4_UNREACHABLE(); } while(0)
#define C4_NEVER_REACH_MSG(/*msg, */...) do { C4_ERROR("never reach this point: " __VA_ARGS__); C4_UNREACHABLE(); } while(0)



Expand Down
3 changes: 3 additions & 0 deletions src/c4/memory_resource.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,9 @@ void* aalloc_impl(size_t size, size_t alignment)
return nullptr;
}
#else
(void)size;
(void)alignment;
mem = nullptr;
C4_NOT_IMPLEMENTED_MSG("need to implement an aligned allocation for this platform");
#endif
C4_ASSERT_MSG((uintptr_t(mem) & (alignment-1)) == 0, "address %p is not aligned to %zu boundary", mem, alignment);
Expand Down
2 changes: 2 additions & 0 deletions src/c4/platform.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@
# define C4_UNIX
#elif defined(__arm__) || defined(__aarch64__)
# define C4_ARM
#elif defined(__xtensa__) || defined(__XTENSA__)
# define C4_XTENSA
#elif defined(SWIG)
# define C4_SWIG
#else
Expand Down

0 comments on commit fa53042

Please sign in to comment.