userspace.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923
  1. /*
  2. * Copyright (c) 2017 Intel Corporation
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <kernel.h>
  7. #include <string.h>
  8. #include <sys/math_extras.h>
  9. #include <sys/rb.h>
  10. #include <kernel_structs.h>
  11. #include <sys/sys_io.h>
  12. #include <ksched.h>
  13. #include <syscall.h>
  14. #include <syscall_handler.h>
  15. #include <device.h>
  16. #include <init.h>
  17. #include <stdbool.h>
  18. #include <app_memory/app_memdomain.h>
  19. #include <sys/libc-hooks.h>
  20. #include <sys/mutex.h>
  21. #include <inttypes.h>
  22. #include <linker/linker-defs.h>
  23. #ifdef Z_LIBC_PARTITION_EXISTS
  24. K_APPMEM_PARTITION_DEFINE(z_libc_partition);
  25. #endif
  26. /* TODO: Find a better place to put this. Since we pull the entire
  27. * lib..__modules__crypto__mbedtls.a globals into app shared memory
  28. * section, we can't put this in zephyr_init.c of the mbedtls module.
  29. */
  30. #ifdef CONFIG_MBEDTLS
  31. K_APPMEM_PARTITION_DEFINE(k_mbedtls_partition);
  32. #endif
  33. #include <logging/log.h>
  34. LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
  35. /* The originally synchronization strategy made heavy use of recursive
  36. * irq_locking, which ports poorly to spinlocks which are
  37. * non-recursive. Rather than try to redesign as part of
  38. * spinlockification, this uses multiple locks to preserve the
  39. * original semantics exactly. The locks are named for the data they
  40. * protect where possible, or just for the code that uses them where
  41. * not.
  42. */
  43. #ifdef CONFIG_DYNAMIC_OBJECTS
  44. static struct k_spinlock lists_lock; /* kobj rbtree/dlist */
  45. static struct k_spinlock objfree_lock; /* k_object_free */
  46. #endif
  47. static struct k_spinlock obj_lock; /* kobj struct data */
  48. #define MAX_THREAD_BITS (CONFIG_MAX_THREAD_BYTES * 8)
  49. #ifdef CONFIG_DYNAMIC_OBJECTS
  50. extern uint8_t _thread_idx_map[CONFIG_MAX_THREAD_BYTES];
  51. #endif
  52. static void clear_perms_cb(struct z_object *ko, void *ctx_ptr);
  53. const char *otype_to_str(enum k_objects otype)
  54. {
  55. const char *ret;
  56. /* -fdata-sections doesn't work right except in very very recent
  57. * GCC and these literal strings would appear in the binary even if
  58. * otype_to_str was omitted by the linker
  59. */
  60. #ifdef CONFIG_LOG
  61. switch (otype) {
  62. /* otype-to-str.h is generated automatically during build by
  63. * gen_kobject_list.py
  64. */
  65. case K_OBJ_ANY:
  66. ret = "generic";
  67. break;
  68. #include <otype-to-str.h>
  69. default:
  70. ret = "?";
  71. break;
  72. }
  73. #else
  74. ARG_UNUSED(otype);
  75. ret = NULL;
  76. #endif
  77. return ret;
  78. }
  79. struct perm_ctx {
  80. int parent_id;
  81. int child_id;
  82. struct k_thread *parent;
  83. };
  84. #ifdef CONFIG_GEN_PRIV_STACKS
  85. /* See write_gperf_table() in scripts/gen_kobject_list.py. The privilege
  86. * mode stacks are allocated as an array. The base of the array is
  87. * aligned to Z_PRIVILEGE_STACK_ALIGN, and all members must be as well.
  88. */
  89. uint8_t *z_priv_stack_find(k_thread_stack_t *stack)
  90. {
  91. struct z_object *obj = z_object_find(stack);
  92. __ASSERT(obj != NULL, "stack object not found");
  93. __ASSERT(obj->type == K_OBJ_THREAD_STACK_ELEMENT,
  94. "bad stack object");
  95. return obj->data.stack_data->priv;
  96. }
  97. #endif /* CONFIG_GEN_PRIV_STACKS */
  98. #ifdef CONFIG_DYNAMIC_OBJECTS
  99. /*
  100. * Note that dyn_obj->data is where the kernel object resides
  101. * so it is the one that actually needs to be aligned.
  102. * Due to the need to get the the fields inside struct dyn_obj
  103. * from kernel object pointers (i.e. from data[]), the offset
  104. * from data[] needs to be fixed at build time. Therefore,
  105. * data[] is declared with __aligned(), such that when dyn_obj
  106. * is allocated with alignment, data[] is also aligned.
  107. * Due to this requirement, data[] needs to be aligned with
  108. * the maximum alignment needed for all kernel objects
  109. * (hence the following DYN_OBJ_DATA_ALIGN).
  110. */
  111. #ifdef ARCH_DYMANIC_OBJ_K_THREAD_ALIGNMENT
  112. #define DYN_OBJ_DATA_ALIGN_K_THREAD (ARCH_DYMANIC_OBJ_K_THREAD_ALIGNMENT)
  113. #else
  114. #define DYN_OBJ_DATA_ALIGN_K_THREAD (sizeof(void *))
  115. #endif
  116. #define DYN_OBJ_DATA_ALIGN \
  117. MAX(DYN_OBJ_DATA_ALIGN_K_THREAD, (sizeof(void *)))
  118. struct dyn_obj {
  119. struct z_object kobj;
  120. sys_dnode_t dobj_list;
  121. struct rbnode node; /* must be immediately before data member */
  122. /* The object itself */
  123. uint8_t data[] __aligned(DYN_OBJ_DATA_ALIGN_K_THREAD);
  124. };
  125. extern struct z_object *z_object_gperf_find(const void *obj);
  126. extern void z_object_gperf_wordlist_foreach(_wordlist_cb_func_t func,
  127. void *context);
  128. static bool node_lessthan(struct rbnode *a, struct rbnode *b);
  129. /*
  130. * Red/black tree of allocated kernel objects, for reasonably fast lookups
  131. * based on object pointer values.
  132. */
  133. static struct rbtree obj_rb_tree = {
  134. .lessthan_fn = node_lessthan
  135. };
  136. /*
  137. * Linked list of allocated kernel objects, for iteration over all allocated
  138. * objects (and potentially deleting them during iteration).
  139. */
  140. static sys_dlist_t obj_list = SYS_DLIST_STATIC_INIT(&obj_list);
  141. /*
  142. * TODO: Write some hash table code that will replace both obj_rb_tree
  143. * and obj_list.
  144. */
  145. static size_t obj_size_get(enum k_objects otype)
  146. {
  147. size_t ret;
  148. switch (otype) {
  149. #include <otype-to-size.h>
  150. default:
  151. ret = sizeof(const struct device);
  152. break;
  153. }
  154. return ret;
  155. }
  156. static size_t obj_align_get(enum k_objects otype)
  157. {
  158. size_t ret;
  159. switch (otype) {
  160. case K_OBJ_THREAD:
  161. #ifdef ARCH_DYMANIC_OBJ_K_THREAD_ALIGNMENT
  162. ret = ARCH_DYMANIC_OBJ_K_THREAD_ALIGNMENT;
  163. #else
  164. ret = sizeof(void *);
  165. #endif
  166. break;
  167. default:
  168. ret = sizeof(void *);
  169. break;
  170. }
  171. return ret;
  172. }
  173. static bool node_lessthan(struct rbnode *a, struct rbnode *b)
  174. {
  175. return a < b;
  176. }
  177. static inline struct dyn_obj *node_to_dyn_obj(struct rbnode *node)
  178. {
  179. return CONTAINER_OF(node, struct dyn_obj, node);
  180. }
  181. static inline struct rbnode *dyn_obj_to_node(void *obj)
  182. {
  183. struct dyn_obj *dobj = CONTAINER_OF(obj, struct dyn_obj, data);
  184. return &dobj->node;
  185. }
  186. static struct dyn_obj *dyn_object_find(void *obj)
  187. {
  188. struct rbnode *node;
  189. struct dyn_obj *ret;
  190. /* For any dynamically allocated kernel object, the object
  191. * pointer is just a member of the containing struct dyn_obj,
  192. * so just a little arithmetic is necessary to locate the
  193. * corresponding struct rbnode
  194. */
  195. node = dyn_obj_to_node(obj);
  196. k_spinlock_key_t key = k_spin_lock(&lists_lock);
  197. if (rb_contains(&obj_rb_tree, node)) {
  198. ret = node_to_dyn_obj(node);
  199. } else {
  200. ret = NULL;
  201. }
  202. k_spin_unlock(&lists_lock, key);
  203. return ret;
  204. }
  205. /**
  206. * @internal
  207. *
  208. * @brief Allocate a new thread index for a new thread.
  209. *
  210. * This finds an unused thread index that can be assigned to a new
  211. * thread. If too many threads have been allocated, the kernel will
  212. * run out of indexes and this function will fail.
  213. *
  214. * Note that if an unused index is found, that index will be marked as
  215. * used after return of this function.
  216. *
  217. * @param tidx The new thread index if successful
  218. *
  219. * @return true if successful, false if failed
  220. **/
  221. static bool thread_idx_alloc(uintptr_t *tidx)
  222. {
  223. int i;
  224. int idx;
  225. int base;
  226. base = 0;
  227. for (i = 0; i < CONFIG_MAX_THREAD_BYTES; i++) {
  228. idx = find_lsb_set(_thread_idx_map[i]);
  229. if (idx != 0) {
  230. *tidx = base + (idx - 1);
  231. sys_bitfield_clear_bit((mem_addr_t)_thread_idx_map,
  232. *tidx);
  233. /* Clear permission from all objects */
  234. z_object_wordlist_foreach(clear_perms_cb,
  235. (void *)*tidx);
  236. return true;
  237. }
  238. base += 8;
  239. }
  240. return false;
  241. }
  242. /**
  243. * @internal
  244. *
  245. * @brief Free a thread index.
  246. *
  247. * This frees a thread index so it can be used by another
  248. * thread.
  249. *
  250. * @param tidx The thread index to be freed
  251. **/
  252. static void thread_idx_free(uintptr_t tidx)
  253. {
  254. /* To prevent leaked permission when index is recycled */
  255. z_object_wordlist_foreach(clear_perms_cb, (void *)tidx);
  256. sys_bitfield_set_bit((mem_addr_t)_thread_idx_map, tidx);
  257. }
  258. struct z_object *z_dynamic_object_aligned_create(size_t align, size_t size)
  259. {
  260. struct dyn_obj *dyn;
  261. dyn = z_thread_aligned_alloc(align, sizeof(*dyn) + size);
  262. if (dyn == NULL) {
  263. LOG_ERR("could not allocate kernel object, out of memory");
  264. return NULL;
  265. }
  266. dyn->kobj.name = &dyn->data;
  267. dyn->kobj.type = K_OBJ_ANY;
  268. dyn->kobj.flags = 0;
  269. (void)memset(dyn->kobj.perms, 0, CONFIG_MAX_THREAD_BYTES);
  270. k_spinlock_key_t key = k_spin_lock(&lists_lock);
  271. rb_insert(&obj_rb_tree, &dyn->node);
  272. sys_dlist_append(&obj_list, &dyn->dobj_list);
  273. k_spin_unlock(&lists_lock, key);
  274. return &dyn->kobj;
  275. }
  276. void *z_impl_k_object_alloc(enum k_objects otype)
  277. {
  278. struct z_object *zo;
  279. uintptr_t tidx = 0;
  280. if (otype <= K_OBJ_ANY || otype >= K_OBJ_LAST) {
  281. LOG_ERR("bad object type %d requested", otype);
  282. return NULL;
  283. }
  284. switch (otype) {
  285. case K_OBJ_THREAD:
  286. if (!thread_idx_alloc(&tidx)) {
  287. LOG_ERR("out of free thread indexes");
  288. return NULL;
  289. }
  290. break;
  291. /* The following are currently not allowed at all */
  292. case K_OBJ_FUTEX: /* Lives in user memory */
  293. case K_OBJ_SYS_MUTEX: /* Lives in user memory */
  294. case K_OBJ_THREAD_STACK_ELEMENT: /* No aligned allocator */
  295. case K_OBJ_NET_SOCKET: /* Indeterminate size */
  296. LOG_ERR("forbidden object type '%s' requested",
  297. otype_to_str(otype));
  298. return NULL;
  299. default:
  300. /* Remainder within bounds are permitted */
  301. break;
  302. }
  303. zo = z_dynamic_object_aligned_create(obj_align_get(otype),
  304. obj_size_get(otype));
  305. if (zo == NULL) {
  306. return NULL;
  307. }
  308. zo->type = otype;
  309. if (otype == K_OBJ_THREAD) {
  310. zo->data.thread_id = tidx;
  311. }
  312. /* The allocating thread implicitly gets permission on kernel objects
  313. * that it allocates
  314. */
  315. z_thread_perms_set(zo, _current);
  316. /* Activates reference counting logic for automatic disposal when
  317. * all permissions have been revoked
  318. */
  319. zo->flags |= K_OBJ_FLAG_ALLOC;
  320. return zo->name;
  321. }
  322. void k_object_free(void *obj)
  323. {
  324. struct dyn_obj *dyn;
  325. /* This function is intentionally not exposed to user mode.
  326. * There's currently no robust way to track that an object isn't
  327. * being used by some other thread
  328. */
  329. k_spinlock_key_t key = k_spin_lock(&objfree_lock);
  330. dyn = dyn_object_find(obj);
  331. if (dyn != NULL) {
  332. rb_remove(&obj_rb_tree, &dyn->node);
  333. sys_dlist_remove(&dyn->dobj_list);
  334. if (dyn->kobj.type == K_OBJ_THREAD) {
  335. thread_idx_free(dyn->kobj.data.thread_id);
  336. }
  337. }
  338. k_spin_unlock(&objfree_lock, key);
  339. if (dyn != NULL) {
  340. k_free(dyn);
  341. }
  342. }
  343. struct z_object *z_object_find(const void *obj)
  344. {
  345. struct z_object *ret;
  346. ret = z_object_gperf_find(obj);
  347. if (ret == NULL) {
  348. struct dyn_obj *dynamic_obj;
  349. /* The cast to pointer-to-non-const violates MISRA
  350. * 11.8 but is justified since we know dynamic objects
  351. * were not declared with a const qualifier.
  352. */
  353. dynamic_obj = dyn_object_find((void *)obj);
  354. if (dynamic_obj != NULL) {
  355. ret = &dynamic_obj->kobj;
  356. }
  357. }
  358. return ret;
  359. }
  360. void z_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
  361. {
  362. struct dyn_obj *obj, *next;
  363. z_object_gperf_wordlist_foreach(func, context);
  364. k_spinlock_key_t key = k_spin_lock(&lists_lock);
  365. SYS_DLIST_FOR_EACH_CONTAINER_SAFE(&obj_list, obj, next, dobj_list) {
  366. func(&obj->kobj, context);
  367. }
  368. k_spin_unlock(&lists_lock, key);
  369. }
  370. #endif /* CONFIG_DYNAMIC_OBJECTS */
  371. static unsigned int thread_index_get(struct k_thread *thread)
  372. {
  373. struct z_object *ko;
  374. ko = z_object_find(thread);
  375. if (ko == NULL) {
  376. return -1;
  377. }
  378. return ko->data.thread_id;
  379. }
  380. static void unref_check(struct z_object *ko, uintptr_t index)
  381. {
  382. k_spinlock_key_t key = k_spin_lock(&obj_lock);
  383. sys_bitfield_clear_bit((mem_addr_t)&ko->perms, index);
  384. #ifdef CONFIG_DYNAMIC_OBJECTS
  385. struct dyn_obj *dyn =
  386. CONTAINER_OF(ko, struct dyn_obj, kobj);
  387. if ((ko->flags & K_OBJ_FLAG_ALLOC) == 0U) {
  388. goto out;
  389. }
  390. for (int i = 0; i < CONFIG_MAX_THREAD_BYTES; i++) {
  391. if (ko->perms[i] != 0U) {
  392. goto out;
  393. }
  394. }
  395. /* This object has no more references. Some objects may have
  396. * dynamically allocated resources, require cleanup, or need to be
  397. * marked as uninitailized when all references are gone. What
  398. * specifically needs to happen depends on the object type.
  399. */
  400. switch (ko->type) {
  401. case K_OBJ_PIPE:
  402. k_pipe_cleanup((struct k_pipe *)ko->name);
  403. break;
  404. case K_OBJ_MSGQ:
  405. k_msgq_cleanup((struct k_msgq *)ko->name);
  406. break;
  407. case K_OBJ_STACK:
  408. k_stack_cleanup((struct k_stack *)ko->name);
  409. break;
  410. default:
  411. /* Nothing to do */
  412. break;
  413. }
  414. rb_remove(&obj_rb_tree, &dyn->node);
  415. sys_dlist_remove(&dyn->dobj_list);
  416. k_free(dyn);
  417. out:
  418. #endif
  419. k_spin_unlock(&obj_lock, key);
  420. }
  421. static void wordlist_cb(struct z_object *ko, void *ctx_ptr)
  422. {
  423. struct perm_ctx *ctx = (struct perm_ctx *)ctx_ptr;
  424. if (sys_bitfield_test_bit((mem_addr_t)&ko->perms, ctx->parent_id) &&
  425. (struct k_thread *)ko->name != ctx->parent) {
  426. sys_bitfield_set_bit((mem_addr_t)&ko->perms, ctx->child_id);
  427. }
  428. }
  429. void z_thread_perms_inherit(struct k_thread *parent, struct k_thread *child)
  430. {
  431. struct perm_ctx ctx = {
  432. thread_index_get(parent),
  433. thread_index_get(child),
  434. parent
  435. };
  436. if ((ctx.parent_id != -1) && (ctx.child_id != -1)) {
  437. z_object_wordlist_foreach(wordlist_cb, &ctx);
  438. }
  439. }
  440. void z_thread_perms_set(struct z_object *ko, struct k_thread *thread)
  441. {
  442. int index = thread_index_get(thread);
  443. if (index != -1) {
  444. sys_bitfield_set_bit((mem_addr_t)&ko->perms, index);
  445. }
  446. }
  447. void z_thread_perms_clear(struct z_object *ko, struct k_thread *thread)
  448. {
  449. int index = thread_index_get(thread);
  450. if (index != -1) {
  451. sys_bitfield_clear_bit((mem_addr_t)&ko->perms, index);
  452. unref_check(ko, index);
  453. }
  454. }
  455. static void clear_perms_cb(struct z_object *ko, void *ctx_ptr)
  456. {
  457. uintptr_t id = (uintptr_t)ctx_ptr;
  458. unref_check(ko, id);
  459. }
  460. void z_thread_perms_all_clear(struct k_thread *thread)
  461. {
  462. uintptr_t index = thread_index_get(thread);
  463. if ((int)index != -1) {
  464. z_object_wordlist_foreach(clear_perms_cb, (void *)index);
  465. }
  466. }
  467. static int thread_perms_test(struct z_object *ko)
  468. {
  469. int index;
  470. if ((ko->flags & K_OBJ_FLAG_PUBLIC) != 0U) {
  471. return 1;
  472. }
  473. index = thread_index_get(_current);
  474. if (index != -1) {
  475. return sys_bitfield_test_bit((mem_addr_t)&ko->perms, index);
  476. }
  477. return 0;
  478. }
  479. static void dump_permission_error(struct z_object *ko)
  480. {
  481. int index = thread_index_get(_current);
  482. LOG_ERR("thread %p (%d) does not have permission on %s %p",
  483. _current, index,
  484. otype_to_str(ko->type), ko->name);
  485. LOG_HEXDUMP_ERR(ko->perms, sizeof(ko->perms), "permission bitmap");
  486. }
  487. void z_dump_object_error(int retval, const void *obj, struct z_object *ko,
  488. enum k_objects otype)
  489. {
  490. switch (retval) {
  491. case -EBADF:
  492. LOG_ERR("%p is not a valid %s", obj, otype_to_str(otype));
  493. if (ko == NULL) {
  494. LOG_ERR("address is not a known kernel object");
  495. } else {
  496. LOG_ERR("address is actually a %s",
  497. otype_to_str(ko->type));
  498. }
  499. break;
  500. case -EPERM:
  501. dump_permission_error(ko);
  502. break;
  503. case -EINVAL:
  504. LOG_ERR("%p used before initialization", obj);
  505. break;
  506. case -EADDRINUSE:
  507. LOG_ERR("%p %s in use", obj, otype_to_str(otype));
  508. break;
  509. default:
  510. /* Not handled error */
  511. break;
  512. }
  513. }
  514. void z_impl_k_object_access_grant(const void *object, struct k_thread *thread)
  515. {
  516. struct z_object *ko = z_object_find(object);
  517. if (ko != NULL) {
  518. z_thread_perms_set(ko, thread);
  519. }
  520. }
  521. void k_object_access_revoke(const void *object, struct k_thread *thread)
  522. {
  523. struct z_object *ko = z_object_find(object);
  524. if (ko != NULL) {
  525. z_thread_perms_clear(ko, thread);
  526. }
  527. }
  528. void z_impl_k_object_release(const void *object)
  529. {
  530. k_object_access_revoke(object, _current);
  531. }
  532. void k_object_access_all_grant(const void *object)
  533. {
  534. struct z_object *ko = z_object_find(object);
  535. if (ko != NULL) {
  536. ko->flags |= K_OBJ_FLAG_PUBLIC;
  537. }
  538. }
  539. int z_object_validate(struct z_object *ko, enum k_objects otype,
  540. enum _obj_init_check init)
  541. {
  542. if (unlikely((ko == NULL) ||
  543. (otype != K_OBJ_ANY && ko->type != otype))) {
  544. return -EBADF;
  545. }
  546. /* Manipulation of any kernel objects by a user thread requires that
  547. * thread be granted access first, even for uninitialized objects
  548. */
  549. if (unlikely(thread_perms_test(ko) == 0)) {
  550. return -EPERM;
  551. }
  552. /* Initialization state checks. _OBJ_INIT_ANY, we don't care */
  553. if (likely(init == _OBJ_INIT_TRUE)) {
  554. /* Object MUST be initialized */
  555. if (unlikely((ko->flags & K_OBJ_FLAG_INITIALIZED) == 0U)) {
  556. return -EINVAL;
  557. }
  558. } else if (init == _OBJ_INIT_FALSE) { /* _OBJ_INIT_FALSE case */
  559. /* Object MUST NOT be initialized */
  560. if (unlikely((ko->flags & K_OBJ_FLAG_INITIALIZED) != 0U)) {
  561. return -EADDRINUSE;
  562. }
  563. } else {
  564. /* _OBJ_INIT_ANY */
  565. }
  566. return 0;
  567. }
  568. void z_object_init(const void *obj)
  569. {
  570. struct z_object *ko;
  571. /* By the time we get here, if the caller was from userspace, all the
  572. * necessary checks have been done in z_object_validate(), which takes
  573. * place before the object is initialized.
  574. *
  575. * This function runs after the object has been initialized and
  576. * finalizes it
  577. */
  578. ko = z_object_find(obj);
  579. if (ko == NULL) {
  580. /* Supervisor threads can ignore rules about kernel objects
  581. * and may declare them on stacks, etc. Such objects will never
  582. * be usable from userspace, but we shouldn't explode.
  583. */
  584. return;
  585. }
  586. /* Allows non-initialization system calls to be made on this object */
  587. ko->flags |= K_OBJ_FLAG_INITIALIZED;
  588. }
  589. void z_object_recycle(const void *obj)
  590. {
  591. struct z_object *ko = z_object_find(obj);
  592. if (ko != NULL) {
  593. (void)memset(ko->perms, 0, sizeof(ko->perms));
  594. z_thread_perms_set(ko, k_current_get());
  595. ko->flags |= K_OBJ_FLAG_INITIALIZED;
  596. }
  597. }
  598. void z_object_uninit(const void *obj)
  599. {
  600. struct z_object *ko;
  601. /* See comments in z_object_init() */
  602. ko = z_object_find(obj);
  603. if (ko == NULL) {
  604. return;
  605. }
  606. ko->flags &= ~K_OBJ_FLAG_INITIALIZED;
  607. }
  608. /*
  609. * Copy to/from helper functions used in syscall handlers
  610. */
  611. void *z_user_alloc_from_copy(const void *src, size_t size)
  612. {
  613. void *dst = NULL;
  614. /* Does the caller in user mode have access to read this memory? */
  615. if (Z_SYSCALL_MEMORY_READ(src, size)) {
  616. goto out_err;
  617. }
  618. dst = z_thread_malloc(size);
  619. if (dst == NULL) {
  620. LOG_ERR("out of thread resource pool memory (%zu)", size);
  621. goto out_err;
  622. }
  623. (void)memcpy(dst, src, size);
  624. out_err:
  625. return dst;
  626. }
  627. static int user_copy(void *dst, const void *src, size_t size, bool to_user)
  628. {
  629. int ret = EFAULT;
  630. /* Does the caller in user mode have access to this memory? */
  631. if (to_user ? Z_SYSCALL_MEMORY_WRITE(dst, size) :
  632. Z_SYSCALL_MEMORY_READ(src, size)) {
  633. goto out_err;
  634. }
  635. (void)memcpy(dst, src, size);
  636. ret = 0;
  637. out_err:
  638. return ret;
  639. }
  640. int z_user_from_copy(void *dst, const void *src, size_t size)
  641. {
  642. return user_copy(dst, src, size, false);
  643. }
  644. int z_user_to_copy(void *dst, const void *src, size_t size)
  645. {
  646. return user_copy(dst, src, size, true);
  647. }
  648. char *z_user_string_alloc_copy(const char *src, size_t maxlen)
  649. {
  650. size_t actual_len;
  651. int err;
  652. char *ret = NULL;
  653. actual_len = z_user_string_nlen(src, maxlen, &err);
  654. if (err != 0) {
  655. goto out;
  656. }
  657. if (actual_len == maxlen) {
  658. /* Not NULL terminated */
  659. LOG_ERR("string too long %p (%zu)", src, actual_len);
  660. goto out;
  661. }
  662. if (size_add_overflow(actual_len, 1, &actual_len)) {
  663. LOG_ERR("overflow");
  664. goto out;
  665. }
  666. ret = z_user_alloc_from_copy(src, actual_len);
  667. /* Someone may have modified the source string during the above
  668. * checks. Ensure what we actually copied is still terminated
  669. * properly.
  670. */
  671. if (ret != NULL) {
  672. ret[actual_len - 1U] = '\0';
  673. }
  674. out:
  675. return ret;
  676. }
  677. int z_user_string_copy(char *dst, const char *src, size_t maxlen)
  678. {
  679. size_t actual_len;
  680. int ret, err;
  681. actual_len = z_user_string_nlen(src, maxlen, &err);
  682. if (err != 0) {
  683. ret = EFAULT;
  684. goto out;
  685. }
  686. if (actual_len == maxlen) {
  687. /* Not NULL terminated */
  688. LOG_ERR("string too long %p (%zu)", src, actual_len);
  689. ret = EINVAL;
  690. goto out;
  691. }
  692. if (size_add_overflow(actual_len, 1, &actual_len)) {
  693. LOG_ERR("overflow");
  694. ret = EINVAL;
  695. goto out;
  696. }
  697. ret = z_user_from_copy(dst, src, actual_len);
  698. /* See comment above in z_user_string_alloc_copy() */
  699. dst[actual_len - 1] = '\0';
  700. out:
  701. return ret;
  702. }
  703. /*
  704. * Application memory region initialization
  705. */
  706. extern char __app_shmem_regions_start[];
  707. extern char __app_shmem_regions_end[];
  708. static int app_shmem_bss_zero(const struct device *unused)
  709. {
  710. struct z_app_region *region, *end;
  711. ARG_UNUSED(unused);
  712. end = (struct z_app_region *)&__app_shmem_regions_end;
  713. region = (struct z_app_region *)&__app_shmem_regions_start;
  714. for ( ; region < end; region++) {
  715. #if defined(CONFIG_DEMAND_PAGING) && !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
  716. /* When BSS sections are not present at boot, we need to wait for
  717. * paging mechanism to be initialized before we can zero out BSS.
  718. */
  719. extern bool z_sys_post_kernel;
  720. bool do_clear = z_sys_post_kernel;
  721. /* During pre-kernel init, z_sys_post_kernel == false, but
  722. * with pinned rodata region, so clear. Otherwise skip.
  723. * In post-kernel init, z_sys_post_kernel == true,
  724. * skip those in pinned rodata region as they have already
  725. * been cleared and possibly already in use. Otherwise clear.
  726. */
  727. if (((uint8_t *)region->bss_start >= (uint8_t *)_app_smem_pinned_start) &&
  728. ((uint8_t *)region->bss_start < (uint8_t *)_app_smem_pinned_end)) {
  729. do_clear = !do_clear;
  730. }
  731. if (do_clear)
  732. #endif /* CONFIG_DEMAND_PAGING && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
  733. {
  734. (void)memset(region->bss_start, 0, region->bss_size);
  735. }
  736. }
  737. return 0;
  738. }
  739. SYS_INIT(app_shmem_bss_zero, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
  740. #if defined(CONFIG_DEMAND_PAGING) && !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
  741. /* When BSS sections are not present at boot, we need to wait for
  742. * paging mechanism to be initialized before we can zero out BSS.
  743. */
  744. SYS_INIT(app_shmem_bss_zero, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
  745. #endif /* CONFIG_DEMAND_PAGING && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
  746. /*
  747. * Default handlers if otherwise unimplemented
  748. */
  749. static uintptr_t handler_bad_syscall(uintptr_t bad_id, uintptr_t arg2,
  750. uintptr_t arg3, uintptr_t arg4,
  751. uintptr_t arg5, uintptr_t arg6,
  752. void *ssf)
  753. {
  754. LOG_ERR("Bad system call id %" PRIuPTR " invoked", bad_id);
  755. arch_syscall_oops(ssf);
  756. CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
  757. }
  758. static uintptr_t handler_no_syscall(uintptr_t arg1, uintptr_t arg2,
  759. uintptr_t arg3, uintptr_t arg4,
  760. uintptr_t arg5, uintptr_t arg6, void *ssf)
  761. {
  762. LOG_ERR("Unimplemented system call");
  763. arch_syscall_oops(ssf);
  764. CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
  765. }
  766. #include <syscall_dispatch.c>