libc-hooks.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568
  1. /*
  2. * Copyright (c) 2015, Intel Corporation.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <arch/cpu.h>
  7. #include <errno.h>
  8. #include <stdio.h>
  9. #include <malloc.h>
  10. #include <sys/__assert.h>
  11. #include <sys/stat.h>
  12. #include <linker/linker-defs.h>
  13. #include <sys/util.h>
  14. #include <sys/errno_private.h>
  15. #include <sys/libc-hooks.h>
  16. #include <syscall_handler.h>
  17. #include <app_memory/app_memdomain.h>
  18. #include <init.h>
  19. #include <sys/sem.h>
  20. #include <sys/mutex.h>
  21. #include <sys/mem_manage.h>
  22. #include <sys/time.h>
  23. #define LIBC_BSS K_APP_BMEM(z_libc_partition)
  24. #define LIBC_DATA K_APP_DMEM(z_libc_partition)
  25. /*
  26. * End result of this thorny set of ifdefs is to define:
  27. *
  28. * - HEAP_BASE base address of the heap arena
  29. * - MAX_HEAP_SIZE size of the heap arena
  30. */
  31. #ifdef CONFIG_MMU
  32. #ifdef CONFIG_USERSPACE
  33. struct k_mem_partition z_malloc_partition;
  34. #endif
  35. LIBC_BSS static unsigned char *heap_base;
  36. LIBC_BSS static size_t max_heap_size;
  37. #define HEAP_BASE heap_base
  38. #define MAX_HEAP_SIZE max_heap_size
  39. #define USE_MALLOC_PREPARE 1
  40. #elif CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE
  41. /* Arena size expressed in Kconfig, due to power-of-two size/align
  42. * requirements of certain MPUs.
  43. *
  44. * We use an automatic memory partition instead of setting this up
  45. * in malloc_prepare().
  46. */
  47. K_APPMEM_PARTITION_DEFINE(z_malloc_partition);
  48. #define MALLOC_BSS K_APP_BMEM(z_malloc_partition)
  49. /* Compiler will throw an error if the provided value isn't a
  50. * power of two
  51. */
  52. MALLOC_BSS static unsigned char
  53. __aligned(CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE)
  54. heap_base[CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE];
  55. #define MAX_HEAP_SIZE CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE
  56. #define HEAP_BASE heap_base
  57. #else /* Not MMU or CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE */
  58. #define USED_RAM_END_ADDR POINTER_TO_UINT(&_end)
  59. #ifdef Z_MALLOC_PARTITION_EXISTS
  60. /* Start of malloc arena needs to be aligned per MPU
  61. * requirements
  62. */
  63. struct k_mem_partition z_malloc_partition;
  64. #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
  65. #define HEAP_BASE ROUND_UP(USED_RAM_END_ADDR, \
  66. CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE)
  67. #elif defined(CONFIG_ARC)
  68. #define HEAP_BASE ROUND_UP(USED_RAM_END_ADDR, \
  69. Z_ARC_MPU_ALIGN)
  70. #else
  71. #error "Unsupported platform"
  72. #endif /* CONFIG_<arch> */
  73. #define USE_MALLOC_PREPARE 1
  74. #else
  75. /* End of kernel image */
  76. static unsigned char __aligned(4) heap_base[CONFIG_NEWLIB_LIBC_MIN_REQUIRED_HEAP_SIZE];
  77. //#define HEAP_BASE USED_RAM_END_ADDR
  78. #define HEAP_BASE heap_base
  79. #endif
  80. /* End of the malloc arena is the end of physical memory */
  81. #if defined(CONFIG_XTENSA)
  82. /* TODO: Why is xtensa a special case? */
  83. extern void *_heap_sentry;
  84. #define MAX_HEAP_SIZE (POINTER_TO_UINT(&_heap_sentry) - \
  85. HEAP_BASE)
  86. #else
  87. /*#define MAX_HEAP_SIZE (KB(CONFIG_PSRAM_SIZE) - (HEAP_BASE - \
  88. CONFIG_PSRAM_BASE_ADDRESS)) */
  89. #define MAX_HEAP_SIZE CONFIG_NEWLIB_LIBC_MIN_REQUIRED_HEAP_SIZE
  90. #endif /* CONFIG_XTENSA */
  91. #endif
  92. static int malloc_prepare(const struct device *unused)
  93. {
  94. ARG_UNUSED(unused);
  95. #ifdef USE_MALLOC_PREPARE
  96. #ifdef CONFIG_MMU
  97. max_heap_size = MIN(CONFIG_NEWLIB_LIBC_MAX_MAPPED_REGION_SIZE,
  98. k_mem_free_get());
  99. if (max_heap_size != 0) {
  100. heap_base = k_mem_map(max_heap_size, K_MEM_PERM_RW);
  101. __ASSERT(heap_base != NULL,
  102. "failed to allocate heap of size %zu", max_heap_size);
  103. }
  104. #endif /* CONFIG_MMU */
  105. #ifdef Z_MALLOC_PARTITION_EXISTS
  106. z_malloc_partition.start = (uintptr_t)HEAP_BASE;
  107. z_malloc_partition.size = (size_t)MAX_HEAP_SIZE;
  108. z_malloc_partition.attr = K_MEM_PARTITION_P_RW_U_RW;
  109. #endif /* Z_MALLOC_PARTITION_EXISTS */
  110. #endif /* USE_MALLOC_PREPARE */
  111. /*
  112. * Validate that the memory space available for the newlib heap is
  113. * greater than the minimum required size.
  114. */
  115. __ASSERT(MAX_HEAP_SIZE >= CONFIG_NEWLIB_LIBC_MIN_REQUIRED_HEAP_SIZE,
  116. "memory space available for newlib heap is less than the "
  117. "minimum required size specified by "
  118. "CONFIG_NEWLIB_LIBC_MIN_REQUIRED_HEAP_SIZE");
  119. return 0;
  120. }
  121. SYS_INIT(malloc_prepare, APPLICATION, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
  122. /* Current offset from HEAP_BASE of unused memory */
  123. LIBC_BSS static size_t heap_sz;
  124. static int _stdout_hook_default(int c)
  125. {
  126. (void)(c); /* Prevent warning about unused argument */
  127. return EOF;
  128. }
  129. static int (*_stdout_hook)(int) = _stdout_hook_default;
  130. void __stdout_hook_install(int (*hook)(int))
  131. {
  132. _stdout_hook = hook;
  133. }
  134. void *__stdout_get_hook(void)
  135. {
  136. return _stdout_hook;
  137. }
  138. static unsigned char _stdin_hook_default(void)
  139. {
  140. return 0;
  141. }
  142. static unsigned char (*_stdin_hook)(void) = _stdin_hook_default;
  143. void __stdin_hook_install(unsigned char (*hook)(void))
  144. {
  145. _stdin_hook = hook;
  146. }
  147. int z_impl_zephyr_read_stdin(char *buf, int nbytes)
  148. {
  149. int i = 0;
  150. for (i = 0; i < nbytes; i++) {
  151. *(buf + i) = _stdin_hook();
  152. if ((*(buf + i) == '\n') || (*(buf + i) == '\r')) {
  153. i++;
  154. break;
  155. }
  156. }
  157. return i;
  158. }
  159. #ifdef CONFIG_USERSPACE
  160. static inline int z_vrfy_zephyr_read_stdin(char *buf, int nbytes)
  161. {
  162. Z_OOPS(Z_SYSCALL_MEMORY_WRITE(buf, nbytes));
  163. return z_impl_zephyr_read_stdin((char *)buf, nbytes);
  164. }
  165. #include <syscalls/zephyr_read_stdin_mrsh.c>
  166. #endif
  167. int z_impl_zephyr_write_stdout(const void *buffer, int nbytes)
  168. {
  169. const char *buf = buffer;
  170. int i;
  171. for (i = 0; i < nbytes; i++) {
  172. if (*(buf + i) == '\n') {
  173. _stdout_hook('\r');
  174. }
  175. _stdout_hook(*(buf + i));
  176. }
  177. return nbytes;
  178. }
  179. #ifdef CONFIG_USERSPACE
  180. static inline int z_vrfy_zephyr_write_stdout(const void *buf, int nbytes)
  181. {
  182. Z_OOPS(Z_SYSCALL_MEMORY_READ(buf, nbytes));
  183. return z_impl_zephyr_write_stdout((const void *)buf, nbytes);
  184. }
  185. #include <syscalls/zephyr_write_stdout_mrsh.c>
  186. #endif
  187. #ifndef CONFIG_POSIX_API
  188. int _read(int fd, char *buf, int nbytes)
  189. {
  190. ARG_UNUSED(fd);
  191. return zephyr_read_stdin(buf, nbytes);
  192. }
  193. __weak FUNC_ALIAS(_read, read, int);
  194. int _write(int fd, const void *buf, int nbytes)
  195. {
  196. ARG_UNUSED(fd);
  197. return zephyr_write_stdout(buf, nbytes);
  198. }
  199. __weak FUNC_ALIAS(_write, write, int);
  200. int _open(const char *name, int mode)
  201. {
  202. return -1;
  203. }
  204. __weak FUNC_ALIAS(_open, open, int);
  205. int _close(int file)
  206. {
  207. return -1;
  208. }
  209. __weak FUNC_ALIAS(_close, close, int);
  210. int _lseek(int file, int ptr, int dir)
  211. {
  212. return 0;
  213. }
  214. __weak FUNC_ALIAS(_lseek, lseek, int);
  215. #else
  216. extern ssize_t write(int file, const char *buffer, size_t count);
  217. #define _write write
  218. #endif
  219. int _isatty(int file)
  220. {
  221. return file <= 2;
  222. }
  223. __weak FUNC_ALIAS(_isatty, isatty, int);
  224. int _kill(int i, int j)
  225. {
  226. return 0;
  227. }
  228. __weak FUNC_ALIAS(_kill, kill, int);
  229. int _getpid(void)
  230. {
  231. return 0;
  232. }
  233. __weak FUNC_ALIAS(_getpid, getpid, int);
  234. int _fstat(int file, struct stat *st)
  235. {
  236. st->st_mode = S_IFCHR;
  237. return 0;
  238. }
  239. __weak FUNC_ALIAS(_fstat, fstat, int);
  240. __weak void _exit(int status)
  241. {
  242. _write(1, "exit\n", 5);
  243. while (1) {
  244. ;
  245. }
  246. }
  247. void *_sbrk(intptr_t count)
  248. {
  249. void *ret, *ptr;
  250. ptr = ((char *)HEAP_BASE) + heap_sz;
  251. if ((heap_sz + count) < MAX_HEAP_SIZE) {
  252. heap_sz += count;
  253. ret = ptr;
  254. } else {
  255. ret = (void *)-1;
  256. }
  257. return ret;
  258. }
  259. __weak FUNC_ALIAS(_sbrk, sbrk, void *);
  260. #ifdef CONFIG_MULTITHREADING
  261. /*
  262. * Newlib Retargetable Locking Interface Implementation
  263. *
  264. * When multithreading is enabled, the newlib retargetable locking interface is
  265. * defined below to override the default void implementation and provide the
  266. * Zephyr-side locks.
  267. *
  268. * NOTE: `k_mutex` and `k_sem` are used instead of `sys_mutex` and `sys_sem`
  269. * because the latter do not support dynamic allocation for now.
  270. */
  271. /* Static locks */
  272. K_MUTEX_DEFINE(__lock___sinit_recursive_mutex);
  273. K_MUTEX_DEFINE(__lock___sfp_recursive_mutex);
  274. K_MUTEX_DEFINE(__lock___atexit_recursive_mutex);
  275. K_MUTEX_DEFINE(__lock___malloc_recursive_mutex);
  276. K_MUTEX_DEFINE(__lock___env_recursive_mutex);
  277. K_SEM_DEFINE(__lock___at_quick_exit_mutex, 1, 1);
  278. K_SEM_DEFINE(__lock___tz_mutex, 1, 1);
  279. K_SEM_DEFINE(__lock___dd_hash_mutex, 1, 1);
  280. K_SEM_DEFINE(__lock___arc4random_mutex, 1, 1);
  281. #ifdef CONFIG_USERSPACE
  282. /* Grant public access to all static locks after boot */
  283. static int newlib_locks_prepare(const struct device *unused)
  284. {
  285. ARG_UNUSED(unused);
  286. /* Initialise recursive locks */
  287. k_object_access_all_grant(&__lock___sinit_recursive_mutex);
  288. k_object_access_all_grant(&__lock___sfp_recursive_mutex);
  289. k_object_access_all_grant(&__lock___atexit_recursive_mutex);
  290. k_object_access_all_grant(&__lock___malloc_recursive_mutex);
  291. k_object_access_all_grant(&__lock___env_recursive_mutex);
  292. /* Initialise non-recursive locks */
  293. k_object_access_all_grant(&__lock___at_quick_exit_mutex);
  294. k_object_access_all_grant(&__lock___tz_mutex);
  295. k_object_access_all_grant(&__lock___dd_hash_mutex);
  296. k_object_access_all_grant(&__lock___arc4random_mutex);
  297. return 0;
  298. }
  299. SYS_INIT(newlib_locks_prepare, POST_KERNEL,
  300. CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
  301. #endif /* CONFIG_USERSPACE */
  302. /* Create a new dynamic non-recursive lock */
  303. void __retarget_lock_init(_LOCK_T *lock)
  304. {
  305. __ASSERT_NO_MSG(lock != NULL);
  306. /* Allocate semaphore object */
  307. #ifndef CONFIG_USERSPACE
  308. *lock = malloc(sizeof(struct k_sem));
  309. #else
  310. *lock = k_object_alloc(K_OBJ_SEM);
  311. #endif /* !CONFIG_USERSPACE */
  312. __ASSERT(*lock != NULL, "non-recursive lock allocation failed");
  313. k_sem_init((struct k_sem *)*lock, 1, 1);
  314. }
  315. /* Create a new dynamic recursive lock */
  316. void __retarget_lock_init_recursive(_LOCK_T *lock)
  317. {
  318. __ASSERT_NO_MSG(lock != NULL);
  319. /* Allocate mutex object */
  320. #ifndef CONFIG_USERSPACE
  321. *lock = malloc(sizeof(struct k_mutex));
  322. #else
  323. *lock = k_object_alloc(K_OBJ_MUTEX);
  324. #endif /* !CONFIG_USERSPACE */
  325. __ASSERT(*lock != NULL, "recursive lock allocation failed");
  326. k_mutex_init((struct k_mutex *)*lock);
  327. }
  328. /* Close dynamic non-recursive lock */
  329. void __retarget_lock_close(_LOCK_T lock)
  330. {
  331. __ASSERT_NO_MSG(lock != NULL);
  332. #ifndef CONFIG_USERSPACE
  333. free(lock);
  334. #else
  335. k_object_release(lock);
  336. #endif /* !CONFIG_USERSPACE */
  337. }
  338. /* Close dynamic recursive lock */
  339. void __retarget_lock_close_recursive(_LOCK_T lock)
  340. {
  341. __ASSERT_NO_MSG(lock != NULL);
  342. #ifndef CONFIG_USERSPACE
  343. free(lock);
  344. #else
  345. k_object_release(lock);
  346. #endif /* !CONFIG_USERSPACE */
  347. }
  348. /* Acquiure non-recursive lock */
  349. void __retarget_lock_acquire(_LOCK_T lock)
  350. {
  351. __ASSERT_NO_MSG(lock != NULL);
  352. k_sem_take((struct k_sem *)lock, K_FOREVER);
  353. }
  354. /* Acquiure recursive lock */
  355. void __retarget_lock_acquire_recursive(_LOCK_T lock)
  356. {
  357. __ASSERT_NO_MSG(lock != NULL);
  358. k_mutex_lock((struct k_mutex *)lock, K_FOREVER);
  359. }
  360. /* Try acquiring non-recursive lock */
  361. int __retarget_lock_try_acquire(_LOCK_T lock)
  362. {
  363. __ASSERT_NO_MSG(lock != NULL);
  364. return !k_sem_take((struct k_sem *)lock, K_NO_WAIT);
  365. }
  366. /* Try acquiring recursive lock */
  367. int __retarget_lock_try_acquire_recursive(_LOCK_T lock)
  368. {
  369. __ASSERT_NO_MSG(lock != NULL);
  370. return !k_mutex_lock((struct k_mutex *)lock, K_NO_WAIT);
  371. }
  372. /* Release non-recursive lock */
  373. void __retarget_lock_release(_LOCK_T lock)
  374. {
  375. __ASSERT_NO_MSG(lock != NULL);
  376. k_sem_give((struct k_sem *)lock);
  377. }
  378. /* Release recursive lock */
  379. void __retarget_lock_release_recursive(_LOCK_T lock)
  380. {
  381. __ASSERT_NO_MSG(lock != NULL);
  382. k_mutex_unlock((struct k_mutex *)lock);
  383. }
  384. #endif /* CONFIG_MULTITHREADING */
  385. __weak int *__errno(void)
  386. {
  387. return z_errno();
  388. }
  389. /* This function gets called if static buffer overflow detection is enabled
  390. * on stdlib side (Newlib here), in case such an overflow is detected. Newlib
  391. * provides an implementation not suitable for us, so we override it here.
  392. */
  393. __weak FUNC_NORETURN void __chk_fail(void)
  394. {
  395. static const char chk_fail_msg[] = "* buffer overflow detected *\n";
  396. _write(2, chk_fail_msg, sizeof(chk_fail_msg) - 1);
  397. k_oops();
  398. CODE_UNREACHABLE;
  399. }
  400. #if CONFIG_XTENSA
  401. extern int _read(int fd, char *buf, int nbytes);
  402. extern int _open(const char *name, int mode);
  403. extern int _close(int file);
  404. extern int _lseek(int file, int ptr, int dir);
  405. /* The Newlib in xtensa toolchain has a few missing functions for the
  406. * reentrant versions of the syscalls.
  407. */
  408. _ssize_t _read_r(struct _reent *r, int fd, void *buf, size_t nbytes)
  409. {
  410. ARG_UNUSED(r);
  411. return _read(fd, (char *)buf, nbytes);
  412. }
  413. _ssize_t _write_r(struct _reent *r, int fd, const void *buf, size_t nbytes)
  414. {
  415. ARG_UNUSED(r);
  416. return _write(fd, buf, nbytes);
  417. }
  418. int _open_r(struct _reent *r, const char *name, int flags, int mode)
  419. {
  420. ARG_UNUSED(r);
  421. ARG_UNUSED(flags);
  422. return _open(name, mode);
  423. }
  424. int _close_r(struct _reent *r, int file)
  425. {
  426. ARG_UNUSED(r);
  427. return _close(file);
  428. }
  429. _off_t _lseek_r(struct _reent *r, int file, _off_t ptr, int dir)
  430. {
  431. ARG_UNUSED(r);
  432. return _lseek(file, ptr, dir);
  433. }
  434. int _isatty_r(struct _reent *r, int file)
  435. {
  436. ARG_UNUSED(r);
  437. return _isatty(file);
  438. }
  439. int _kill_r(struct _reent *r, int i, int j)
  440. {
  441. ARG_UNUSED(r);
  442. return _kill(i, j);
  443. }
  444. int _getpid_r(struct _reent *r)
  445. {
  446. ARG_UNUSED(r);
  447. return _getpid();
  448. }
  449. int _fstat_r(struct _reent *r, int file, struct stat *st)
  450. {
  451. ARG_UNUSED(r);
  452. return _fstat(file, st);
  453. }
  454. void _exit_r(struct _reent *r, int status)
  455. {
  456. ARG_UNUSED(r);
  457. _exit(status);
  458. }
  459. void *_sbrk_r(struct _reent *r, int count)
  460. {
  461. ARG_UNUSED(r);
  462. return _sbrk(count);
  463. }
  464. #endif /* CONFIG_XTENSA */
  465. int _gettimeofday(struct timeval *__tp, void *__tzp)
  466. {
  467. return gettimeofday(__tp, __tzp);
  468. }
  469. void libc_heap_dump(void)
  470. {
  471. }