smp.c 2.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. /*
  2. * Copyright (c) 2018 Intel corporation
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <kernel.h>
  7. #include <kernel_structs.h>
  8. #include <spinlock.h>
  9. #include <kswap.h>
  10. #include <kernel_internal.h>
  11. static atomic_t global_lock;
  12. static atomic_t start_flag;
  13. unsigned int z_smp_global_lock(void)
  14. {
  15. unsigned int key = arch_irq_lock();
  16. if (!_current->base.global_lock_count) {
  17. while (!atomic_cas(&global_lock, 0, 1)) {
  18. }
  19. }
  20. _current->base.global_lock_count++;
  21. return key;
  22. }
  23. void z_smp_global_unlock(unsigned int key)
  24. {
  25. if (_current->base.global_lock_count) {
  26. _current->base.global_lock_count--;
  27. if (!_current->base.global_lock_count) {
  28. atomic_clear(&global_lock);
  29. }
  30. }
  31. arch_irq_unlock(key);
  32. }
  33. /* Called from within z_swap(), so assumes lock already held */
  34. void z_smp_release_global_lock(struct k_thread *thread)
  35. {
  36. if (!thread->base.global_lock_count) {
  37. atomic_clear(&global_lock);
  38. }
  39. }
  40. #if CONFIG_MP_NUM_CPUS > 1
  41. void z_smp_thread_init(void *arg, struct k_thread *thread)
  42. {
  43. atomic_t *cpu_start_flag = arg;
  44. /* Wait for the signal to begin scheduling */
  45. while (!atomic_get(cpu_start_flag)) {
  46. }
  47. z_dummy_thread_init(thread);
  48. }
  49. void z_smp_thread_swap(void)
  50. {
  51. z_swap_unlocked();
  52. }
  53. static inline FUNC_NORETURN void smp_init_top(void *arg)
  54. {
  55. struct k_thread dummy_thread;
  56. z_smp_thread_init(arg, &dummy_thread);
  57. smp_timer_init();
  58. z_swap_unlocked();
  59. CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
  60. }
  61. void z_smp_start_cpu(int id)
  62. {
  63. (void)atomic_clear(&start_flag);
  64. arch_start_cpu(id, z_interrupt_stacks[id], CONFIG_ISR_STACK_SIZE,
  65. smp_init_top, &start_flag);
  66. (void)atomic_set(&start_flag, 1);
  67. }
  68. #endif
  69. void z_smp_init(void)
  70. {
  71. (void)atomic_clear(&start_flag);
  72. #if CONFIG_MP_NUM_CPUS > 1 && !defined(CONFIG_SMP_BOOT_DELAY)
  73. for (int i = 1; i < CONFIG_MP_NUM_CPUS; i++) {
  74. arch_start_cpu(i, z_interrupt_stacks[i], CONFIG_ISR_STACK_SIZE,
  75. smp_init_top, &start_flag);
  76. }
  77. #endif
  78. (void)atomic_set(&start_flag, 1);
  79. }
  80. bool z_smp_cpu_mobile(void)
  81. {
  82. unsigned int k = arch_irq_lock();
  83. bool pinned = arch_is_in_isr() || !arch_irq_unlocked(k);
  84. arch_irq_unlock(k);
  85. return !pinned;
  86. }