mem_domain.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291
  1. /*
  2. * Copyright (c) 2017 Linaro Limited
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <init.h>
  7. #include <kernel.h>
  8. #include <kernel_structs.h>
  9. #include <kernel_internal.h>
  10. #include <sys/__assert.h>
  11. #include <stdbool.h>
  12. #include <spinlock.h>
  13. #include <sys/libc-hooks.h>
  14. #include <logging/log.h>
  15. LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
  16. struct k_spinlock z_mem_domain_lock;
  17. static uint8_t max_partitions;
  18. struct k_mem_domain k_mem_domain_default;
  19. #if __ASSERT_ON
  20. static bool check_add_partition(struct k_mem_domain *domain,
  21. struct k_mem_partition *part)
  22. {
  23. int i;
  24. uintptr_t pstart, pend, dstart, dend;
  25. if (part == NULL) {
  26. LOG_ERR("NULL k_mem_partition provided");
  27. return false;
  28. }
  29. #ifdef CONFIG_EXECUTE_XOR_WRITE
  30. /* Arches where execution cannot be disabled should always return
  31. * false to this check
  32. */
  33. if (K_MEM_PARTITION_IS_EXECUTABLE(part->attr) &&
  34. K_MEM_PARTITION_IS_WRITABLE(part->attr)) {
  35. LOG_ERR("partition is writable and executable <start %lx>",
  36. part->start);
  37. return false;
  38. }
  39. #endif
  40. if (part->size == 0U) {
  41. LOG_ERR("zero sized partition at %p with base 0x%lx",
  42. part, part->start);
  43. return false;
  44. }
  45. pstart = part->start;
  46. pend = part->start + part->size;
  47. if (pend <= pstart) {
  48. LOG_ERR("invalid partition %p, wraparound detected. base 0x%lx size %zu",
  49. part, part->start, part->size);
  50. return false;
  51. }
  52. /* Check that this partition doesn't overlap any existing ones already
  53. * in the domain
  54. */
  55. for (i = 0; i < domain->num_partitions; i++) {
  56. struct k_mem_partition *dpart = &domain->partitions[i];
  57. if (dpart->size == 0U) {
  58. /* Unused slot */
  59. continue;
  60. }
  61. dstart = dpart->start;
  62. dend = dstart + dpart->size;
  63. if (pend > dstart && dend > pstart) {
  64. LOG_ERR("partition %p base %lx (size %zu) overlaps existing base %lx (size %zu)",
  65. part, part->start, part->size,
  66. dpart->start, dpart->size);
  67. return false;
  68. }
  69. }
  70. return true;
  71. }
  72. #endif
  73. void k_mem_domain_init(struct k_mem_domain *domain, uint8_t num_parts,
  74. struct k_mem_partition *parts[])
  75. {
  76. k_spinlock_key_t key;
  77. __ASSERT_NO_MSG(domain != NULL);
  78. __ASSERT(num_parts == 0U || parts != NULL,
  79. "parts array is NULL and num_parts is nonzero");
  80. __ASSERT(num_parts <= max_partitions,
  81. "num_parts of %d exceeds maximum allowable partitions (%d)",
  82. num_parts, max_partitions);
  83. key = k_spin_lock(&z_mem_domain_lock);
  84. domain->num_partitions = 0U;
  85. (void)memset(domain->partitions, 0, sizeof(domain->partitions));
  86. sys_dlist_init(&domain->mem_domain_q);
  87. #ifdef CONFIG_ARCH_MEM_DOMAIN_DATA
  88. int ret = arch_mem_domain_init(domain);
  89. /* TODO propagate return values, see #24609.
  90. *
  91. * Not using an assertion here as this is a memory allocation error
  92. */
  93. if (ret != 0) {
  94. LOG_ERR("architecture-specific initialization failed for domain %p with %d",
  95. domain, ret);
  96. k_panic();
  97. }
  98. #endif
  99. if (num_parts != 0U) {
  100. uint32_t i;
  101. for (i = 0U; i < num_parts; i++) {
  102. __ASSERT(check_add_partition(domain, parts[i]),
  103. "invalid partition index %d (%p)",
  104. i, parts[i]);
  105. domain->partitions[i] = *parts[i];
  106. domain->num_partitions++;
  107. #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
  108. arch_mem_domain_partition_add(domain, i);
  109. #endif
  110. }
  111. }
  112. k_spin_unlock(&z_mem_domain_lock, key);
  113. }
  114. void k_mem_domain_add_partition(struct k_mem_domain *domain,
  115. struct k_mem_partition *part)
  116. {
  117. int p_idx;
  118. k_spinlock_key_t key;
  119. __ASSERT_NO_MSG(domain != NULL);
  120. __ASSERT(check_add_partition(domain, part),
  121. "invalid partition %p", part);
  122. key = k_spin_lock(&z_mem_domain_lock);
  123. for (p_idx = 0; p_idx < max_partitions; p_idx++) {
  124. /* A zero-sized partition denotes it's a free partition */
  125. if (domain->partitions[p_idx].size == 0U) {
  126. break;
  127. }
  128. }
  129. __ASSERT(p_idx < max_partitions,
  130. "no free partition slots available");
  131. LOG_DBG("add partition base %lx size %zu to domain %p\n",
  132. part->start, part->size, domain);
  133. domain->partitions[p_idx].start = part->start;
  134. domain->partitions[p_idx].size = part->size;
  135. domain->partitions[p_idx].attr = part->attr;
  136. domain->num_partitions++;
  137. #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
  138. arch_mem_domain_partition_add(domain, p_idx);
  139. #endif
  140. k_spin_unlock(&z_mem_domain_lock, key);
  141. }
  142. void k_mem_domain_remove_partition(struct k_mem_domain *domain,
  143. struct k_mem_partition *part)
  144. {
  145. int p_idx;
  146. k_spinlock_key_t key;
  147. __ASSERT_NO_MSG(domain != NULL);
  148. __ASSERT_NO_MSG(part != NULL);
  149. key = k_spin_lock(&z_mem_domain_lock);
  150. /* find a partition that matches the given start and size */
  151. for (p_idx = 0; p_idx < max_partitions; p_idx++) {
  152. if (domain->partitions[p_idx].start == part->start &&
  153. domain->partitions[p_idx].size == part->size) {
  154. break;
  155. }
  156. }
  157. __ASSERT(p_idx < max_partitions, "no matching partition found");
  158. LOG_DBG("remove partition base %lx size %zu from domain %p\n",
  159. part->start, part->size, domain);
  160. #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
  161. arch_mem_domain_partition_remove(domain, p_idx);
  162. #endif
  163. /* A zero-sized partition denotes it's a free partition */
  164. domain->partitions[p_idx].size = 0U;
  165. domain->num_partitions--;
  166. k_spin_unlock(&z_mem_domain_lock, key);
  167. }
  168. static void add_thread_locked(struct k_mem_domain *domain,
  169. k_tid_t thread)
  170. {
  171. __ASSERT_NO_MSG(domain != NULL);
  172. __ASSERT_NO_MSG(thread != NULL);
  173. LOG_DBG("add thread %p to domain %p\n", thread, domain);
  174. sys_dlist_append(&domain->mem_domain_q,
  175. &thread->mem_domain_info.mem_domain_q_node);
  176. thread->mem_domain_info.mem_domain = domain;
  177. #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
  178. arch_mem_domain_thread_add(thread);
  179. #endif
  180. }
  181. static void remove_thread_locked(struct k_thread *thread)
  182. {
  183. __ASSERT_NO_MSG(thread != NULL);
  184. LOG_DBG("remove thread %p from memory domain %p\n",
  185. thread, thread->mem_domain_info.mem_domain);
  186. sys_dlist_remove(&thread->mem_domain_info.mem_domain_q_node);
  187. #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
  188. arch_mem_domain_thread_remove(thread);
  189. #endif
  190. }
  191. /* Called from thread object initialization */
  192. void z_mem_domain_init_thread(struct k_thread *thread)
  193. {
  194. k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock);
  195. /* New threads inherit memory domain configuration from parent */
  196. add_thread_locked(_current->mem_domain_info.mem_domain, thread);
  197. k_spin_unlock(&z_mem_domain_lock, key);
  198. }
  199. /* Called when thread aborts during teardown tasks. sched_spinlock is held */
  200. void z_mem_domain_exit_thread(struct k_thread *thread)
  201. {
  202. k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock);
  203. remove_thread_locked(thread);
  204. k_spin_unlock(&z_mem_domain_lock, key);
  205. }
  206. void k_mem_domain_add_thread(struct k_mem_domain *domain, k_tid_t thread)
  207. {
  208. k_spinlock_key_t key;
  209. key = k_spin_lock(&z_mem_domain_lock);
  210. if (thread->mem_domain_info.mem_domain != domain) {
  211. remove_thread_locked(thread);
  212. add_thread_locked(domain, thread);
  213. }
  214. k_spin_unlock(&z_mem_domain_lock, key);
  215. }
  216. static int init_mem_domain_module(const struct device *arg)
  217. {
  218. ARG_UNUSED(arg);
  219. max_partitions = arch_mem_domain_max_partitions_get();
  220. /*
  221. * max_partitions must be less than or equal to
  222. * CONFIG_MAX_DOMAIN_PARTITIONS, or would encounter array index
  223. * out of bounds error.
  224. */
  225. __ASSERT(max_partitions <= CONFIG_MAX_DOMAIN_PARTITIONS, "");
  226. k_mem_domain_init(&k_mem_domain_default, 0, NULL);
  227. #ifdef Z_LIBC_PARTITION_EXISTS
  228. k_mem_domain_add_partition(&k_mem_domain_default, &z_libc_partition);
  229. #endif /* Z_LIBC_PARTITION_EXISTS */
  230. return 0;
  231. }
  232. SYS_INIT(init_mem_domain_module, PRE_KERNEL_1,
  233. CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);