syscall.h 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187
  1. /*
  2. * Copyright (c) 2018 Linaro Limited.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. /**
  7. * @file
  8. * @brief ARM AArch32 specific syscall header
  9. *
  10. * This header contains the ARM AArch32 specific syscall interface. It is
  11. * included by the syscall interface architecture-abstraction header
  12. * (include/arch/syscall.h)
  13. */
  14. #ifndef ZEPHYR_INCLUDE_ARCH_ARM_AARCH32_SYSCALL_H_
  15. #define ZEPHYR_INCLUDE_ARCH_ARM_AARCH32_SYSCALL_H_
  16. #define _SVC_CALL_CONTEXT_SWITCH 0
  17. #define _SVC_CALL_IRQ_OFFLOAD 1
  18. #define _SVC_CALL_RUNTIME_EXCEPT 2
  19. #define _SVC_CALL_SYSTEM_CALL 3
  20. #ifdef CONFIG_USERSPACE
  21. #ifndef _ASMLANGUAGE
  22. #include <zephyr/types.h>
  23. #include <stdbool.h>
  24. #include <arch/arm/aarch32/misc.h>
  25. #ifdef __cplusplus
  26. extern "C" {
  27. #endif
  28. /* Syscall invocation macros. arm-specific machine constraints used to ensure
  29. * args land in the proper registers.
  30. */
  31. static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
  32. uintptr_t arg3, uintptr_t arg4,
  33. uintptr_t arg5, uintptr_t arg6,
  34. uintptr_t call_id)
  35. {
  36. register uint32_t ret __asm__("r0") = arg1;
  37. register uint32_t r1 __asm__("r1") = arg2;
  38. register uint32_t r2 __asm__("r2") = arg3;
  39. register uint32_t r3 __asm__("r3") = arg4;
  40. register uint32_t r4 __asm__("r4") = arg5;
  41. register uint32_t r5 __asm__("r5") = arg6;
  42. register uint32_t r6 __asm__("r6") = call_id;
  43. __asm__ volatile("svc %[svid]\n"
  44. : "=r"(ret), "=r"(r1), "=r"(r2), "=r"(r3)
  45. : [svid] "i" (_SVC_CALL_SYSTEM_CALL),
  46. "r" (ret), "r" (r1), "r" (r2), "r" (r3),
  47. "r" (r4), "r" (r5), "r" (r6)
  48. : "r8", "memory", "ip");
  49. return ret;
  50. }
  51. static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
  52. uintptr_t arg3, uintptr_t arg4,
  53. uintptr_t arg5,
  54. uintptr_t call_id)
  55. {
  56. register uint32_t ret __asm__("r0") = arg1;
  57. register uint32_t r1 __asm__("r1") = arg2;
  58. register uint32_t r2 __asm__("r2") = arg3;
  59. register uint32_t r3 __asm__("r3") = arg4;
  60. register uint32_t r4 __asm__("r4") = arg5;
  61. register uint32_t r6 __asm__("r6") = call_id;
  62. __asm__ volatile("svc %[svid]\n"
  63. : "=r"(ret), "=r"(r1), "=r"(r2), "=r"(r3)
  64. : [svid] "i" (_SVC_CALL_SYSTEM_CALL),
  65. "r" (ret), "r" (r1), "r" (r2), "r" (r3),
  66. "r" (r4), "r" (r6)
  67. : "r8", "memory", "ip");
  68. return ret;
  69. }
  70. static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
  71. uintptr_t arg3, uintptr_t arg4,
  72. uintptr_t call_id)
  73. {
  74. register uint32_t ret __asm__("r0") = arg1;
  75. register uint32_t r1 __asm__("r1") = arg2;
  76. register uint32_t r2 __asm__("r2") = arg3;
  77. register uint32_t r3 __asm__("r3") = arg4;
  78. register uint32_t r6 __asm__("r6") = call_id;
  79. __asm__ volatile("svc %[svid]\n"
  80. : "=r"(ret), "=r"(r1), "=r"(r2), "=r"(r3)
  81. : [svid] "i" (_SVC_CALL_SYSTEM_CALL),
  82. "r" (ret), "r" (r1), "r" (r2), "r" (r3),
  83. "r" (r6)
  84. : "r8", "memory", "ip");
  85. return ret;
  86. }
  87. static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
  88. uintptr_t arg3,
  89. uintptr_t call_id)
  90. {
  91. register uint32_t ret __asm__("r0") = arg1;
  92. register uint32_t r1 __asm__("r1") = arg2;
  93. register uint32_t r2 __asm__("r2") = arg3;
  94. register uint32_t r6 __asm__("r6") = call_id;
  95. __asm__ volatile("svc %[svid]\n"
  96. : "=r"(ret), "=r"(r1), "=r"(r2)
  97. : [svid] "i" (_SVC_CALL_SYSTEM_CALL),
  98. "r" (ret), "r" (r1), "r" (r2), "r" (r6)
  99. : "r8", "memory", "r3", "ip");
  100. return ret;
  101. }
  102. static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
  103. uintptr_t call_id)
  104. {
  105. register uint32_t ret __asm__("r0") = arg1;
  106. register uint32_t r1 __asm__("r1") = arg2;
  107. register uint32_t r6 __asm__("r6") = call_id;
  108. __asm__ volatile("svc %[svid]\n"
  109. : "=r"(ret), "=r"(r1)
  110. : [svid] "i" (_SVC_CALL_SYSTEM_CALL),
  111. "r" (ret), "r" (r1), "r" (r6)
  112. : "r8", "memory", "r2", "r3", "ip");
  113. return ret;
  114. }
  115. static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1,
  116. uintptr_t call_id)
  117. {
  118. register uint32_t ret __asm__("r0") = arg1;
  119. register uint32_t r6 __asm__("r6") = call_id;
  120. __asm__ volatile("svc %[svid]\n"
  121. : "=r"(ret)
  122. : [svid] "i" (_SVC_CALL_SYSTEM_CALL),
  123. "r" (ret), "r" (r6)
  124. : "r8", "memory", "r1", "r2", "r3", "ip");
  125. return ret;
  126. }
  127. static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id)
  128. {
  129. register uint32_t ret __asm__("r0");
  130. register uint32_t r6 __asm__("r6") = call_id;
  131. __asm__ volatile("svc %[svid]\n"
  132. : "=r"(ret)
  133. : [svid] "i" (_SVC_CALL_SYSTEM_CALL),
  134. "r" (ret), "r" (r6)
  135. : "r8", "memory", "r1", "r2", "r3", "ip");
  136. return ret;
  137. }
  138. static inline bool arch_is_user_context(void)
  139. {
  140. #if defined(CONFIG_CPU_CORTEX_M)
  141. uint32_t value;
  142. /* check for handler mode */
  143. __asm__ volatile("mrs %0, IPSR\n\t" : "=r"(value));
  144. if (value) {
  145. return false;
  146. }
  147. #endif
  148. return z_arm_thread_is_in_user_mode();
  149. }
  150. #ifdef __cplusplus
  151. }
  152. #endif
  153. #endif /* _ASMLANGUAGE */
  154. #endif /* CONFIG_USERSPACE */
  155. #endif /* ZEPHYR_INCLUDE_ARCH_ARM_AARCH32_SYSCALL_H_ */