sched.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680
  1. /*
  2. * Copyright (c) 2018 Intel Corporation
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <kernel.h>
  7. #include <ksched.h>
  8. #include <spinlock.h>
  9. #include <kernel/sched_priq.h>
  10. #include <wait_q.h>
  11. #include <kswap.h>
  12. #include <kernel_arch_func.h>
  13. #include <syscall_handler.h>
  14. #include <drivers/timer/system_timer.h>
  15. #include <stdbool.h>
  16. #include <kernel_internal.h>
  17. #include <logging/log.h>
  18. #include <sys/atomic.h>
  19. LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
  20. #if defined(CONFIG_SCHED_DUMB)
  21. #define _priq_run_add z_priq_dumb_add
  22. #define _priq_run_remove z_priq_dumb_remove
  23. # if defined(CONFIG_SCHED_CPU_MASK)
  24. # define _priq_run_best _priq_dumb_mask_best
  25. # else
  26. # define _priq_run_best z_priq_dumb_best
  27. # endif
  28. #elif defined(CONFIG_SCHED_SCALABLE)
  29. #define _priq_run_add z_priq_rb_add
  30. #define _priq_run_remove z_priq_rb_remove
  31. #define _priq_run_best z_priq_rb_best
  32. #elif defined(CONFIG_SCHED_MULTIQ)
  33. #define _priq_run_add z_priq_mq_add
  34. #define _priq_run_remove z_priq_mq_remove
  35. #define _priq_run_best z_priq_mq_best
  36. #endif
  37. #if defined(CONFIG_WAITQ_SCALABLE)
  38. #define z_priq_wait_add z_priq_rb_add
  39. #define _priq_wait_remove z_priq_rb_remove
  40. #define _priq_wait_best z_priq_rb_best
  41. #elif defined(CONFIG_WAITQ_DUMB)
  42. #define z_priq_wait_add z_priq_dumb_add
  43. #define _priq_wait_remove z_priq_dumb_remove
  44. #define _priq_wait_best z_priq_dumb_best
  45. #endif
  46. struct k_spinlock sched_spinlock;
  47. static void update_cache(int preempt_ok);
  48. static void end_thread(struct k_thread *thread);
  49. static inline int is_preempt(struct k_thread *thread)
  50. {
  51. /* explanation in kernel_struct.h */
  52. return thread->base.preempt <= _PREEMPT_THRESHOLD;
  53. }
  54. static inline int is_metairq(struct k_thread *thread)
  55. {
  56. #if CONFIG_NUM_METAIRQ_PRIORITIES > 0
  57. return (thread->base.prio - K_HIGHEST_THREAD_PRIO)
  58. < CONFIG_NUM_METAIRQ_PRIORITIES;
  59. #else
  60. return 0;
  61. #endif
  62. }
  63. #if CONFIG_ASSERT
  64. static inline bool is_thread_dummy(struct k_thread *thread)
  65. {
  66. return (thread->base.thread_state & _THREAD_DUMMY) != 0U;
  67. }
  68. #endif
  69. /*
  70. * Return value same as e.g. memcmp
  71. * > 0 -> thread 1 priority > thread 2 priority
  72. * = 0 -> thread 1 priority == thread 2 priority
  73. * < 0 -> thread 1 priority < thread 2 priority
  74. * Do not rely on the actual value returned aside from the above.
  75. * (Again, like memcmp.)
  76. */
  77. int32_t z_sched_prio_cmp(struct k_thread *thread_1,
  78. struct k_thread *thread_2)
  79. {
  80. /* `prio` is <32b, so the below cannot overflow. */
  81. int32_t b1 = thread_1->base.prio;
  82. int32_t b2 = thread_2->base.prio;
  83. if (b1 != b2) {
  84. return b2 - b1;
  85. }
  86. #ifdef CONFIG_SCHED_DEADLINE
  87. /* If we assume all deadlines live within the same "half" of
  88. * the 32 bit modulus space (this is a documented API rule),
  89. * then the latest deadline in the queue minus the earliest is
  90. * guaranteed to be (2's complement) non-negative. We can
  91. * leverage that to compare the values without having to check
  92. * the current time.
  93. */
  94. uint32_t d1 = thread_1->base.prio_deadline;
  95. uint32_t d2 = thread_2->base.prio_deadline;
  96. if (d1 != d2) {
  97. /* Sooner deadline means higher effective priority.
  98. * Doing the calculation with unsigned types and casting
  99. * to signed isn't perfect, but at least reduces this
  100. * from UB on overflow to impdef.
  101. */
  102. return (int32_t) (d2 - d1);
  103. }
  104. #endif
  105. return 0;
  106. }
  107. static ALWAYS_INLINE bool should_preempt(struct k_thread *thread,
  108. int preempt_ok)
  109. {
  110. /* Preemption is OK if it's being explicitly allowed by
  111. * software state (e.g. the thread called k_yield())
  112. */
  113. if (preempt_ok != 0) {
  114. return true;
  115. }
  116. __ASSERT(_current != NULL, "");
  117. /* Or if we're pended/suspended/dummy (duh) */
  118. if (z_is_thread_prevented_from_running(_current)) {
  119. return true;
  120. }
  121. /* Edge case on ARM where a thread can be pended out of an
  122. * interrupt handler before the "synchronous" swap starts
  123. * context switching. Platforms with atomic swap can never
  124. * hit this.
  125. */
  126. if (IS_ENABLED(CONFIG_SWAP_NONATOMIC)
  127. && z_is_thread_timeout_active(thread)) {
  128. return true;
  129. }
  130. /* Otherwise we have to be running a preemptible thread or
  131. * switching to a metairq
  132. */
  133. if (is_preempt(_current) || is_metairq(thread)) {
  134. return true;
  135. }
  136. return false;
  137. }
  138. #ifdef CONFIG_SCHED_CPU_MASK
  139. static ALWAYS_INLINE struct k_thread *_priq_dumb_mask_best(sys_dlist_t *pq)
  140. {
  141. /* With masks enabled we need to be prepared to walk the list
  142. * looking for one we can run
  143. */
  144. struct k_thread *thread;
  145. SYS_DLIST_FOR_EACH_CONTAINER(pq, thread, base.qnode_dlist) {
  146. if ((thread->base.cpu_mask & BIT(_current_cpu->id)) != 0) {
  147. return thread;
  148. }
  149. }
  150. return NULL;
  151. }
  152. #endif
  153. ALWAYS_INLINE void z_priq_dumb_add(sys_dlist_t *pq, struct k_thread *thread)
  154. {
  155. struct k_thread *t;
  156. __ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
  157. SYS_DLIST_FOR_EACH_CONTAINER(pq, t, base.qnode_dlist) {
  158. if (z_sched_prio_cmp(thread, t) > 0) {
  159. sys_dlist_insert(&t->base.qnode_dlist,
  160. &thread->base.qnode_dlist);
  161. return;
  162. }
  163. }
  164. sys_dlist_append(pq, &thread->base.qnode_dlist);
  165. }
  166. /* _current is never in the run queue until context switch on
  167. * SMP configurations, see z_requeue_current()
  168. */
  169. static inline bool should_queue_thread(struct k_thread *th)
  170. {
  171. return !IS_ENABLED(CONFIG_SMP) || th != _current;
  172. }
  173. static ALWAYS_INLINE void queue_thread(void *pq,
  174. struct k_thread *thread)
  175. {
  176. thread->base.thread_state |= _THREAD_QUEUED;
  177. if (should_queue_thread(thread)) {
  178. _priq_run_add(pq, thread);
  179. }
  180. #ifdef CONFIG_SMP
  181. if (thread == _current) {
  182. /* add current to end of queue means "yield" */
  183. _current_cpu->swap_ok = true;
  184. }
  185. #endif
  186. }
  187. static ALWAYS_INLINE void dequeue_thread(void *pq,
  188. struct k_thread *thread)
  189. {
  190. thread->base.thread_state &= ~_THREAD_QUEUED;
  191. if (should_queue_thread(thread)) {
  192. _priq_run_remove(pq, thread);
  193. }
  194. }
  195. #ifdef CONFIG_SMP
  196. /* Called out of z_swap() when CONFIG_SMP. The current thread can
  197. * never live in the run queue until we are inexorably on the context
  198. * switch path on SMP, otherwise there is a deadlock condition where a
  199. * set of CPUs pick a cycle of threads to run and wait for them all to
  200. * context switch forever.
  201. */
  202. void z_requeue_current(struct k_thread *curr)
  203. {
  204. if (z_is_thread_queued(curr)) {
  205. _priq_run_add(&_kernel.ready_q.runq, curr);
  206. }
  207. }
  208. #endif
  209. static inline bool is_aborting(struct k_thread *thread)
  210. {
  211. return (thread->base.thread_state & _THREAD_ABORTING) != 0U;
  212. }
  213. static ALWAYS_INLINE struct k_thread *next_up(void)
  214. {
  215. struct k_thread *thread;
  216. thread = _priq_run_best(&_kernel.ready_q.runq);
  217. #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && (CONFIG_NUM_COOP_PRIORITIES > 0)
  218. /* MetaIRQs must always attempt to return back to a
  219. * cooperative thread they preempted and not whatever happens
  220. * to be highest priority now. The cooperative thread was
  221. * promised it wouldn't be preempted (by non-metairq threads)!
  222. */
  223. struct k_thread *mirqp = _current_cpu->metairq_preempted;
  224. if (mirqp != NULL && (thread == NULL || !is_metairq(thread))) {
  225. if (!z_is_thread_prevented_from_running(mirqp)) {
  226. thread = mirqp;
  227. } else {
  228. _current_cpu->metairq_preempted = NULL;
  229. }
  230. }
  231. #endif
  232. #ifndef CONFIG_SMP
  233. /* In uniprocessor mode, we can leave the current thread in
  234. * the queue (actually we have to, otherwise the assembly
  235. * context switch code for all architectures would be
  236. * responsible for putting it back in z_swap and ISR return!),
  237. * which makes this choice simple.
  238. */
  239. return (thread != NULL) ? thread : _current_cpu->idle_thread;
  240. #else
  241. /* Under SMP, the "cache" mechanism for selecting the next
  242. * thread doesn't work, so we have more work to do to test
  243. * _current against the best choice from the queue. Here, the
  244. * thread selected above represents "the best thread that is
  245. * not current".
  246. *
  247. * Subtle note on "queued": in SMP mode, _current does not
  248. * live in the queue, so this isn't exactly the same thing as
  249. * "ready", it means "is _current already added back to the
  250. * queue such that we don't want to re-add it".
  251. */
  252. if (is_aborting(_current)) {
  253. end_thread(_current);
  254. }
  255. int queued = z_is_thread_queued(_current);
  256. int active = !z_is_thread_prevented_from_running(_current);
  257. if (thread == NULL) {
  258. thread = _current_cpu->idle_thread;
  259. }
  260. if (active) {
  261. int32_t cmp = z_sched_prio_cmp(_current, thread);
  262. /* Ties only switch if state says we yielded */
  263. if ((cmp > 0) || ((cmp == 0) && !_current_cpu->swap_ok)) {
  264. thread = _current;
  265. }
  266. if (!should_preempt(thread, _current_cpu->swap_ok)) {
  267. thread = _current;
  268. }
  269. }
  270. /* Put _current back into the queue */
  271. if (thread != _current && active &&
  272. !z_is_idle_thread_object(_current) && !queued) {
  273. queue_thread(&_kernel.ready_q.runq, _current);
  274. }
  275. /* Take the new _current out of the queue */
  276. if (z_is_thread_queued(thread)) {
  277. dequeue_thread(&_kernel.ready_q.runq, thread);
  278. }
  279. _current_cpu->swap_ok = false;
  280. return thread;
  281. #endif
  282. }
  283. static void move_thread_to_end_of_prio_q(struct k_thread *thread)
  284. {
  285. if (z_is_thread_queued(thread)) {
  286. dequeue_thread(&_kernel.ready_q.runq, thread);
  287. }
  288. queue_thread(&_kernel.ready_q.runq, thread);
  289. update_cache(thread == _current);
  290. }
  291. #ifdef CONFIG_TIMESLICING
  292. static int slice_time;
  293. static int slice_max_prio;
  294. #ifdef CONFIG_SWAP_NONATOMIC
  295. /* If z_swap() isn't atomic, then it's possible for a timer interrupt
  296. * to try to timeslice away _current after it has already pended
  297. * itself but before the corresponding context switch. Treat that as
  298. * a noop condition in z_time_slice().
  299. */
  300. static struct k_thread *pending_current;
  301. #endif
  302. void z_reset_time_slice(void)
  303. {
  304. /* Add the elapsed time since the last announced tick to the
  305. * slice count, as we'll see those "expired" ticks arrive in a
  306. * FUTURE z_time_slice() call.
  307. */
  308. if (slice_time != 0) {
  309. _current_cpu->slice_ticks = slice_time + sys_clock_elapsed();
  310. z_set_timeout_expiry(slice_time, false);
  311. }
  312. }
  313. void k_sched_time_slice_set(int32_t slice, int prio)
  314. {
  315. LOCKED(&sched_spinlock) {
  316. _current_cpu->slice_ticks = 0;
  317. slice_time = k_ms_to_ticks_ceil32(slice);
  318. if (IS_ENABLED(CONFIG_TICKLESS_KERNEL) && slice > 0) {
  319. /* It's not possible to reliably set a 1-tick
  320. * timeout if ticks aren't regular.
  321. */
  322. slice_time = MAX(2, slice_time);
  323. }
  324. slice_max_prio = prio;
  325. z_reset_time_slice();
  326. }
  327. }
  328. static inline int sliceable(struct k_thread *thread)
  329. {
  330. return is_preempt(thread)
  331. && !z_is_thread_prevented_from_running(thread)
  332. && !z_is_prio_higher(thread->base.prio, slice_max_prio)
  333. && !z_is_idle_thread_object(thread);
  334. }
  335. /* Called out of each timer interrupt */
  336. void z_time_slice(int ticks)
  337. {
  338. /* Hold sched_spinlock, so that activity on another CPU
  339. * (like a call to k_thread_abort() at just the wrong time)
  340. * won't affect the correctness of the decisions made here.
  341. * Also prevents any nested interrupts from changing
  342. * thread state to avoid similar issues, since this would
  343. * normally run with IRQs enabled.
  344. */
  345. k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
  346. #ifdef CONFIG_SWAP_NONATOMIC
  347. if (pending_current == _current) {
  348. z_reset_time_slice();
  349. k_spin_unlock(&sched_spinlock, key);
  350. return;
  351. }
  352. pending_current = NULL;
  353. #endif
  354. if (slice_time && sliceable(_current)) {
  355. if (ticks >= _current_cpu->slice_ticks) {
  356. move_thread_to_end_of_prio_q(_current);
  357. z_reset_time_slice();
  358. } else {
  359. _current_cpu->slice_ticks -= ticks;
  360. }
  361. } else {
  362. _current_cpu->slice_ticks = 0;
  363. }
  364. k_spin_unlock(&sched_spinlock, key);
  365. }
  366. #endif
  367. /* Track cooperative threads preempted by metairqs so we can return to
  368. * them specifically. Called at the moment a new thread has been
  369. * selected to run.
  370. */
  371. static void update_metairq_preempt(struct k_thread *thread)
  372. {
  373. #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && (CONFIG_NUM_COOP_PRIORITIES > 0)
  374. if (is_metairq(thread) && !is_metairq(_current) &&
  375. !is_preempt(_current)) {
  376. /* Record new preemption */
  377. _current_cpu->metairq_preempted = _current;
  378. } else if (!is_metairq(thread) && !z_is_idle_thread_object(thread)) {
  379. /* Returning from existing preemption */
  380. _current_cpu->metairq_preempted = NULL;
  381. }
  382. #endif
  383. }
  384. static void update_cache(int preempt_ok)
  385. {
  386. #ifndef CONFIG_SMP
  387. struct k_thread *thread = next_up();
  388. if (should_preempt(thread, preempt_ok)) {
  389. #ifdef CONFIG_TIMESLICING
  390. if (thread != _current) {
  391. z_reset_time_slice();
  392. }
  393. #endif
  394. update_metairq_preempt(thread);
  395. _kernel.ready_q.cache = thread;
  396. } else {
  397. _kernel.ready_q.cache = _current;
  398. }
  399. #else
  400. /* The way this works is that the CPU record keeps its
  401. * "cooperative swapping is OK" flag until the next reschedule
  402. * call or context switch. It doesn't need to be tracked per
  403. * thread because if the thread gets preempted for whatever
  404. * reason the scheduler will make the same decision anyway.
  405. */
  406. _current_cpu->swap_ok = preempt_ok;
  407. #endif
  408. }
  409. static bool thread_active_elsewhere(struct k_thread *thread)
  410. {
  411. /* True if the thread is currently running on another CPU.
  412. * There are more scalable designs to answer this question in
  413. * constant time, but this is fine for now.
  414. */
  415. #ifdef CONFIG_SMP
  416. int currcpu = _current_cpu->id;
  417. for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
  418. if ((i != currcpu) &&
  419. (_kernel.cpus[i].current == thread)) {
  420. return true;
  421. }
  422. }
  423. #endif
  424. return false;
  425. }
  426. static void ready_thread(struct k_thread *thread)
  427. {
  428. #ifdef CONFIG_KERNEL_COHERENCE
  429. __ASSERT_NO_MSG(arch_mem_coherent(thread));
  430. #endif
  431. /* If thread is queued already, do not try and added it to the
  432. * run queue again
  433. */
  434. if (!z_is_thread_queued(thread) && z_is_thread_ready(thread)) {
  435. SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_ready, thread);
  436. queue_thread(&_kernel.ready_q.runq, thread);
  437. update_cache(0);
  438. #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
  439. arch_sched_ipi();
  440. #endif
  441. }
  442. }
  443. void z_ready_thread(struct k_thread *thread)
  444. {
  445. LOCKED(&sched_spinlock) {
  446. if (!thread_active_elsewhere(thread)) {
  447. ready_thread(thread);
  448. }
  449. }
  450. }
  451. void z_move_thread_to_end_of_prio_q(struct k_thread *thread)
  452. {
  453. LOCKED(&sched_spinlock) {
  454. move_thread_to_end_of_prio_q(thread);
  455. }
  456. }
  457. void z_sched_start(struct k_thread *thread)
  458. {
  459. k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
  460. if (z_has_thread_started(thread)) {
  461. k_spin_unlock(&sched_spinlock, key);
  462. return;
  463. }
  464. z_mark_thread_as_started(thread);
  465. ready_thread(thread);
  466. z_reschedule(&sched_spinlock, key);
  467. }
  468. void z_impl_k_thread_suspend(struct k_thread *thread)
  469. {
  470. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, suspend, thread);
  471. (void)z_abort_thread_timeout(thread);
  472. LOCKED(&sched_spinlock) {
  473. if (z_is_thread_queued(thread)) {
  474. dequeue_thread(&_kernel.ready_q.runq, thread);
  475. }
  476. z_mark_thread_as_suspended(thread);
  477. update_cache(thread == _current);
  478. }
  479. if (thread == _current) {
  480. z_reschedule_unlocked();
  481. }
  482. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, suspend, thread);
  483. }
  484. #ifdef CONFIG_USERSPACE
  485. static inline void z_vrfy_k_thread_suspend(struct k_thread *thread)
  486. {
  487. Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
  488. z_impl_k_thread_suspend(thread);
  489. }
  490. #include <syscalls/k_thread_suspend_mrsh.c>
  491. #endif
  492. void z_impl_k_thread_resume(struct k_thread *thread)
  493. {
  494. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, resume, thread);
  495. k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
  496. /* Do not try to resume a thread that was not suspended */
  497. if (!z_is_thread_suspended(thread)) {
  498. k_spin_unlock(&sched_spinlock, key);
  499. return;
  500. }
  501. z_mark_thread_as_not_suspended(thread);
  502. ready_thread(thread);
  503. z_reschedule(&sched_spinlock, key);
  504. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, resume, thread);
  505. }
  506. #ifdef CONFIG_USERSPACE
  507. static inline void z_vrfy_k_thread_resume(struct k_thread *thread)
  508. {
  509. Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
  510. z_impl_k_thread_resume(thread);
  511. }
  512. #include <syscalls/k_thread_resume_mrsh.c>
  513. #endif
  514. static _wait_q_t *pended_on_thread(struct k_thread *thread)
  515. {
  516. __ASSERT_NO_MSG(thread->base.pended_on);
  517. return thread->base.pended_on;
  518. }
  519. static void unready_thread(struct k_thread *thread)
  520. {
  521. if (z_is_thread_queued(thread)) {
  522. dequeue_thread(&_kernel.ready_q.runq, thread);
  523. }
  524. update_cache(thread == _current);
  525. }
  526. /* sched_spinlock must be held */
  527. static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q)
  528. {
  529. unready_thread(thread);
  530. z_mark_thread_as_pending(thread);
  531. SYS_PORT_TRACING_FUNC(k_thread, sched_pend, thread);
  532. if (wait_q != NULL) {
  533. thread->base.pended_on = wait_q;
  534. z_priq_wait_add(&wait_q->waitq, thread);
  535. }
  536. }
  537. static void add_thread_timeout(struct k_thread *thread, k_timeout_t timeout)
  538. {
  539. if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
  540. z_add_thread_timeout(thread, timeout);
  541. }
  542. }
  543. static void pend(struct k_thread *thread, _wait_q_t *wait_q,
  544. k_timeout_t timeout)
  545. {
  546. #ifdef CONFIG_KERNEL_COHERENCE
  547. __ASSERT_NO_MSG(wait_q == NULL || arch_mem_coherent(wait_q));
  548. #endif
  549. LOCKED(&sched_spinlock) {
  550. add_to_waitq_locked(thread, wait_q);
  551. }
  552. add_thread_timeout(thread, timeout);
  553. }
  554. void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
  555. k_timeout_t timeout)
  556. {
  557. __ASSERT_NO_MSG(thread == _current || is_thread_dummy(thread));
  558. pend(thread, wait_q, timeout);
  559. }
  560. static inline void unpend_thread_no_timeout(struct k_thread *thread)
  561. {
  562. _priq_wait_remove(&pended_on_thread(thread)->waitq, thread);
  563. z_mark_thread_as_not_pending(thread);
  564. thread->base.pended_on = NULL;
  565. }
  566. ALWAYS_INLINE void z_unpend_thread_no_timeout(struct k_thread *thread)
  567. {
  568. LOCKED(&sched_spinlock) {
  569. unpend_thread_no_timeout(thread);
  570. }
  571. }
  572. #ifdef CONFIG_SYS_CLOCK_EXISTS
  573. /* Timeout handler for *_thread_timeout() APIs */
  574. void z_thread_timeout(struct _timeout *timeout)
  575. {
  576. struct k_thread *thread = CONTAINER_OF(timeout,
  577. struct k_thread, base.timeout);
  578. LOCKED(&sched_spinlock) {
  579. bool killed = ((thread->base.thread_state & _THREAD_DEAD) ||
  580. (thread->base.thread_state & _THREAD_ABORTING));
  581. if (!killed) {
  582. if (thread->base.pended_on != NULL) {
  583. unpend_thread_no_timeout(thread);
  584. }
  585. z_mark_thread_as_started(thread);
  586. z_mark_thread_as_not_suspended(thread);
  587. ready_thread(thread);
  588. }
  589. }
  590. }
  591. #endif
  592. int z_pend_curr_irqlock(uint32_t key, _wait_q_t *wait_q, k_timeout_t timeout)
  593. {
  594. pend(_current, wait_q, timeout);
  595. #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
  596. pending_current = _current;
  597. int ret = z_swap_irqlock(key);
  598. LOCKED(&sched_spinlock) {
  599. if (pending_current == _current) {
  600. pending_current = NULL;
  601. }
  602. }
  603. return ret;
  604. #else
  605. return z_swap_irqlock(key);
  606. #endif
  607. }
  608. int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
  609. _wait_q_t *wait_q, k_timeout_t timeout)
  610. {
  611. #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
  612. pending_current = _current;
  613. #endif
  614. pend(_current, wait_q, timeout);
  615. return z_swap(lock, key);
  616. }
  617. struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q)
  618. {
  619. struct k_thread *thread = NULL;
  620. LOCKED(&sched_spinlock) {
  621. thread = _priq_wait_best(&wait_q->waitq);
  622. if (thread != NULL) {
  623. unpend_thread_no_timeout(thread);
  624. }
  625. }
  626. return thread;
  627. }
  628. struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q)
  629. {
  630. struct k_thread *thread = NULL;
  631. LOCKED(&sched_spinlock) {
  632. thread = _priq_wait_best(&wait_q->waitq);
  633. if (thread != NULL) {
  634. unpend_thread_no_timeout(thread);
  635. (void)z_abort_thread_timeout(thread);
  636. }
  637. }
  638. return thread;
  639. }
  640. void z_unpend_thread(struct k_thread *thread)
  641. {
  642. z_unpend_thread_no_timeout(thread);
  643. (void)z_abort_thread_timeout(thread);
  644. }
  645. /* Priority set utility that does no rescheduling, it just changes the
  646. * run queue state, returning true if a reschedule is needed later.
  647. */
  648. bool z_set_prio(struct k_thread *thread, int prio)
  649. {
  650. bool need_sched = 0;
  651. LOCKED(&sched_spinlock) {
  652. need_sched = z_is_thread_ready(thread);
  653. if (need_sched) {
  654. /* Don't requeue on SMP if it's the running thread */
  655. if (!IS_ENABLED(CONFIG_SMP) || z_is_thread_queued(thread)) {
  656. dequeue_thread(&_kernel.ready_q.runq, thread);
  657. thread->base.prio = prio;
  658. queue_thread(&_kernel.ready_q.runq, thread);
  659. } else {
  660. thread->base.prio = prio;
  661. }
  662. update_cache(1);
  663. } else {
  664. thread->base.prio = prio;
  665. }
  666. }
  667. SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_priority_set, thread, prio);
  668. return need_sched;
  669. }
  670. void z_thread_priority_set(struct k_thread *thread, int prio)
  671. {
  672. bool need_sched = z_set_prio(thread, prio);
  673. #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
  674. arch_sched_ipi();
  675. #endif
  676. if (need_sched && _current->base.sched_locked == 0U) {
  677. z_reschedule_unlocked();
  678. }
  679. }
  680. static inline bool resched(uint32_t key)
  681. {
  682. #ifdef CONFIG_SMP
  683. _current_cpu->swap_ok = 0;
  684. #endif
  685. return arch_irq_unlocked(key) && !arch_is_in_isr();
  686. }
  687. /*
  688. * Check if the next ready thread is the same as the current thread
  689. * and save the trip if true.
  690. */
  691. static inline bool need_swap(void)
  692. {
  693. /* the SMP case will be handled in C based z_swap() */
  694. #ifdef CONFIG_SMP
  695. return true;
  696. #else
  697. struct k_thread *new_thread;
  698. /* Check if the next ready thread is the same as the current thread */
  699. new_thread = _kernel.ready_q.cache;
  700. return new_thread != _current;
  701. #endif
  702. }
  703. void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key)
  704. {
  705. if (resched(key.key) && need_swap()) {
  706. z_swap(lock, key);
  707. } else {
  708. k_spin_unlock(lock, key);
  709. }
  710. }
  711. void z_reschedule_irqlock(uint32_t key)
  712. {
  713. if (resched(key)) {
  714. z_swap_irqlock(key);
  715. } else {
  716. irq_unlock(key);
  717. }
  718. }
  719. void k_sched_lock(void)
  720. {
  721. LOCKED(&sched_spinlock) {
  722. SYS_PORT_TRACING_FUNC(k_thread, sched_lock);
  723. z_sched_lock();
  724. }
  725. }
  726. void k_sched_unlock(void)
  727. {
  728. LOCKED(&sched_spinlock) {
  729. __ASSERT(_current->base.sched_locked != 0U, "");
  730. __ASSERT(!arch_is_in_isr(), "");
  731. ++_current->base.sched_locked;
  732. update_cache(0);
  733. }
  734. LOG_DBG("scheduler unlocked (%p:%d)",
  735. _current, _current->base.sched_locked);
  736. SYS_PORT_TRACING_FUNC(k_thread, sched_unlock);
  737. z_reschedule_unlocked();
  738. }
  739. struct k_thread *z_swap_next_thread(void)
  740. {
  741. #ifdef CONFIG_SMP
  742. return next_up();
  743. #else
  744. return _kernel.ready_q.cache;
  745. #endif
  746. }
  747. /* Just a wrapper around _current = xxx with tracing */
  748. static inline void set_current(struct k_thread *new_thread)
  749. {
  750. z_thread_mark_switched_out();
  751. _current_cpu->current = new_thread;
  752. }
  753. #ifdef CONFIG_USE_SWITCH
  754. void *z_get_next_switch_handle(void *interrupted)
  755. {
  756. z_check_stack_sentinel();
  757. #ifdef CONFIG_SMP
  758. void *ret = NULL;
  759. LOCKED(&sched_spinlock) {
  760. struct k_thread *old_thread = _current, *new_thread;
  761. if (IS_ENABLED(CONFIG_SMP)) {
  762. old_thread->switch_handle = NULL;
  763. }
  764. new_thread = next_up();
  765. if (old_thread != new_thread) {
  766. update_metairq_preempt(new_thread);
  767. wait_for_switch(new_thread);
  768. arch_cohere_stacks(old_thread, interrupted, new_thread);
  769. #ifdef CONFIG_TIMESLICING
  770. z_reset_time_slice();
  771. #endif
  772. _current_cpu->swap_ok = 0;
  773. set_current(new_thread);
  774. #ifdef CONFIG_SPIN_VALIDATE
  775. /* Changed _current! Update the spinlock
  776. * bookkeeping so the validation doesn't get
  777. * confused when the "wrong" thread tries to
  778. * release the lock.
  779. */
  780. z_spin_lock_set_owner(&sched_spinlock);
  781. #endif
  782. /* A queued (runnable) old/current thread
  783. * needs to be added back to the run queue
  784. * here, and atomically with its switch handle
  785. * being set below. This is safe now, as we
  786. * will not return into it.
  787. */
  788. if (z_is_thread_queued(old_thread)) {
  789. _priq_run_add(&_kernel.ready_q.runq,
  790. old_thread);
  791. }
  792. }
  793. old_thread->switch_handle = interrupted;
  794. ret = new_thread->switch_handle;
  795. if (IS_ENABLED(CONFIG_SMP)) {
  796. /* Active threads MUST have a null here */
  797. new_thread->switch_handle = NULL;
  798. }
  799. }
  800. return ret;
  801. #else
  802. _current->switch_handle = interrupted;
  803. set_current(_kernel.ready_q.cache);
  804. return _current->switch_handle;
  805. #endif
  806. }
  807. #endif
  808. void z_priq_dumb_remove(sys_dlist_t *pq, struct k_thread *thread)
  809. {
  810. __ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
  811. sys_dlist_remove(&thread->base.qnode_dlist);
  812. }
  813. struct k_thread *z_priq_dumb_best(sys_dlist_t *pq)
  814. {
  815. struct k_thread *thread = NULL;
  816. sys_dnode_t *n = sys_dlist_peek_head(pq);
  817. if (n != NULL) {
  818. thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist);
  819. }
  820. return thread;
  821. }
  822. bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b)
  823. {
  824. struct k_thread *thread_a, *thread_b;
  825. int32_t cmp;
  826. thread_a = CONTAINER_OF(a, struct k_thread, base.qnode_rb);
  827. thread_b = CONTAINER_OF(b, struct k_thread, base.qnode_rb);
  828. cmp = z_sched_prio_cmp(thread_a, thread_b);
  829. if (cmp > 0) {
  830. return true;
  831. } else if (cmp < 0) {
  832. return false;
  833. } else {
  834. return thread_a->base.order_key < thread_b->base.order_key
  835. ? 1 : 0;
  836. }
  837. }
  838. void z_priq_rb_add(struct _priq_rb *pq, struct k_thread *thread)
  839. {
  840. struct k_thread *t;
  841. __ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
  842. thread->base.order_key = pq->next_order_key++;
  843. /* Renumber at wraparound. This is tiny code, and in practice
  844. * will almost never be hit on real systems. BUT on very
  845. * long-running systems where a priq never completely empties
  846. * AND that contains very large numbers of threads, it can be
  847. * a latency glitch to loop over all the threads like this.
  848. */
  849. if (!pq->next_order_key) {
  850. RB_FOR_EACH_CONTAINER(&pq->tree, t, base.qnode_rb) {
  851. t->base.order_key = pq->next_order_key++;
  852. }
  853. }
  854. rb_insert(&pq->tree, &thread->base.qnode_rb);
  855. }
  856. void z_priq_rb_remove(struct _priq_rb *pq, struct k_thread *thread)
  857. {
  858. __ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
  859. rb_remove(&pq->tree, &thread->base.qnode_rb);
  860. if (!pq->tree.root) {
  861. pq->next_order_key = 0;
  862. }
  863. }
  864. struct k_thread *z_priq_rb_best(struct _priq_rb *pq)
  865. {
  866. struct k_thread *thread = NULL;
  867. struct rbnode *n = rb_get_min(&pq->tree);
  868. if (n != NULL) {
  869. thread = CONTAINER_OF(n, struct k_thread, base.qnode_rb);
  870. }
  871. return thread;
  872. }
  873. #ifdef CONFIG_SCHED_MULTIQ
  874. # if (K_LOWEST_THREAD_PRIO - K_HIGHEST_THREAD_PRIO) > 31
  875. # error Too many priorities for multiqueue scheduler (max 32)
  876. # endif
  877. #endif
  878. ALWAYS_INLINE void z_priq_mq_add(struct _priq_mq *pq, struct k_thread *thread)
  879. {
  880. int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO;
  881. sys_dlist_append(&pq->queues[priority_bit], &thread->base.qnode_dlist);
  882. pq->bitmask |= BIT(priority_bit);
  883. }
  884. ALWAYS_INLINE void z_priq_mq_remove(struct _priq_mq *pq, struct k_thread *thread)
  885. {
  886. int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO;
  887. sys_dlist_remove(&thread->base.qnode_dlist);
  888. if (sys_dlist_is_empty(&pq->queues[priority_bit])) {
  889. pq->bitmask &= ~BIT(priority_bit);
  890. }
  891. }
  892. struct k_thread *z_priq_mq_best(struct _priq_mq *pq)
  893. {
  894. if (!pq->bitmask) {
  895. return NULL;
  896. }
  897. struct k_thread *thread = NULL;
  898. sys_dlist_t *l = &pq->queues[__builtin_ctz(pq->bitmask)];
  899. sys_dnode_t *n = sys_dlist_peek_head(l);
  900. if (n != NULL) {
  901. thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist);
  902. }
  903. return thread;
  904. }
  905. int z_unpend_all(_wait_q_t *wait_q)
  906. {
  907. int need_sched = 0;
  908. struct k_thread *thread;
  909. while ((thread = z_waitq_head(wait_q)) != NULL) {
  910. z_unpend_thread(thread);
  911. z_ready_thread(thread);
  912. need_sched = 1;
  913. }
  914. return need_sched;
  915. }
  916. void z_sched_init(void)
  917. {
  918. #ifdef CONFIG_SCHED_DUMB
  919. sys_dlist_init(&_kernel.ready_q.runq);
  920. #endif
  921. #ifdef CONFIG_SCHED_SCALABLE
  922. _kernel.ready_q.runq = (struct _priq_rb) {
  923. .tree = {
  924. .lessthan_fn = z_priq_rb_lessthan,
  925. }
  926. };
  927. #endif
  928. #ifdef CONFIG_SCHED_MULTIQ
  929. for (int i = 0; i < ARRAY_SIZE(_kernel.ready_q.runq.queues); i++) {
  930. sys_dlist_init(&_kernel.ready_q.runq.queues[i]);
  931. }
  932. #endif
  933. #ifdef CONFIG_TIMESLICING
  934. k_sched_time_slice_set(CONFIG_TIMESLICE_SIZE,
  935. CONFIG_TIMESLICE_PRIORITY);
  936. #endif
  937. }
  938. int z_impl_k_thread_priority_get(k_tid_t thread)
  939. {
  940. return thread->base.prio;
  941. }
  942. #ifdef CONFIG_USERSPACE
  943. static inline int z_vrfy_k_thread_priority_get(k_tid_t thread)
  944. {
  945. Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
  946. return z_impl_k_thread_priority_get(thread);
  947. }
  948. #include <syscalls/k_thread_priority_get_mrsh.c>
  949. #endif
  950. void z_impl_k_thread_priority_set(k_tid_t thread, int prio)
  951. {
  952. /*
  953. * Use NULL, since we cannot know what the entry point is (we do not
  954. * keep track of it) and idle cannot change its priority.
  955. */
  956. Z_ASSERT_VALID_PRIO(prio, NULL);
  957. __ASSERT(!arch_is_in_isr(), "");
  958. struct k_thread *th = (struct k_thread *)thread;
  959. z_thread_priority_set(th, prio);
  960. }
  961. #ifdef CONFIG_USERSPACE
  962. static inline void z_vrfy_k_thread_priority_set(k_tid_t thread, int prio)
  963. {
  964. Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
  965. Z_OOPS(Z_SYSCALL_VERIFY_MSG(_is_valid_prio(prio, NULL),
  966. "invalid thread priority %d", prio));
  967. Z_OOPS(Z_SYSCALL_VERIFY_MSG((int8_t)prio >= thread->base.prio,
  968. "thread priority may only be downgraded (%d < %d)",
  969. prio, thread->base.prio));
  970. z_impl_k_thread_priority_set(thread, prio);
  971. }
  972. #include <syscalls/k_thread_priority_set_mrsh.c>
  973. #endif
  974. #ifdef CONFIG_SCHED_DEADLINE
  975. void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline)
  976. {
  977. struct k_thread *thread = tid;
  978. LOCKED(&sched_spinlock) {
  979. thread->base.prio_deadline = k_cycle_get_32() + deadline;
  980. if (z_is_thread_queued(thread)) {
  981. dequeue_thread(&_kernel.ready_q.runq, thread);
  982. queue_thread(&_kernel.ready_q.runq, thread);
  983. }
  984. }
  985. }
  986. #ifdef CONFIG_USERSPACE
  987. static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline)
  988. {
  989. struct k_thread *thread = tid;
  990. Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
  991. Z_OOPS(Z_SYSCALL_VERIFY_MSG(deadline > 0,
  992. "invalid thread deadline %d",
  993. (int)deadline));
  994. z_impl_k_thread_deadline_set((k_tid_t)thread, deadline);
  995. }
  996. #include <syscalls/k_thread_deadline_set_mrsh.c>
  997. #endif
  998. #endif
  999. void z_impl_k_yield(void)
  1000. {
  1001. __ASSERT(!arch_is_in_isr(), "");
  1002. SYS_PORT_TRACING_FUNC(k_thread, yield);
  1003. k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
  1004. if (!IS_ENABLED(CONFIG_SMP) ||
  1005. z_is_thread_queued(_current)) {
  1006. dequeue_thread(&_kernel.ready_q.runq,
  1007. _current);
  1008. }
  1009. queue_thread(&_kernel.ready_q.runq, _current);
  1010. update_cache(1);
  1011. z_swap(&sched_spinlock, key);
  1012. }
  1013. #ifdef CONFIG_USERSPACE
  1014. static inline void z_vrfy_k_yield(void)
  1015. {
  1016. z_impl_k_yield();
  1017. }
  1018. #include <syscalls/k_yield_mrsh.c>
  1019. #endif
  1020. static int32_t z_tick_sleep(k_ticks_t ticks)
  1021. {
  1022. #ifdef CONFIG_MULTITHREADING
  1023. uint32_t expected_wakeup_ticks;
  1024. __ASSERT(!arch_is_in_isr(), "");
  1025. #ifndef CONFIG_TIMEOUT_64BIT
  1026. /* LOG subsys does not handle 64-bit values
  1027. * https://github.com/zephyrproject-rtos/zephyr/issues/26246
  1028. */
  1029. LOG_DBG("thread %p for %u ticks", _current, ticks);
  1030. #endif
  1031. /* wait of 0 ms is treated as a 'yield' */
  1032. if (ticks == 0) {
  1033. k_yield();
  1034. return 0;
  1035. }
  1036. k_timeout_t timeout = Z_TIMEOUT_TICKS(ticks);
  1037. if (Z_TICK_ABS(ticks) <= 0) {
  1038. expected_wakeup_ticks = ticks + sys_clock_tick_get_32();
  1039. } else {
  1040. expected_wakeup_ticks = Z_TICK_ABS(ticks);
  1041. }
  1042. k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
  1043. #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
  1044. pending_current = _current;
  1045. #endif
  1046. unready_thread(_current);
  1047. z_add_thread_timeout(_current, timeout);
  1048. z_mark_thread_as_suspended(_current);
  1049. (void)z_swap(&sched_spinlock, key);
  1050. __ASSERT(!z_is_thread_state_set(_current, _THREAD_SUSPENDED), "");
  1051. ticks = (k_ticks_t)expected_wakeup_ticks - sys_clock_tick_get_32();
  1052. if (ticks > 0) {
  1053. return ticks;
  1054. }
  1055. #endif
  1056. return 0;
  1057. }
  1058. int32_t z_impl_k_sleep(k_timeout_t timeout)
  1059. {
  1060. k_ticks_t ticks;
  1061. __ASSERT(!arch_is_in_isr(), "");
  1062. SYS_PORT_TRACING_FUNC_ENTER(k_thread, sleep, timeout);
  1063. /* in case of K_FOREVER, we suspend */
  1064. if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
  1065. k_thread_suspend(_current);
  1066. SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, (int32_t) K_TICKS_FOREVER);
  1067. return (int32_t) K_TICKS_FOREVER;
  1068. }
  1069. ticks = timeout.ticks;
  1070. ticks = z_tick_sleep(ticks);
  1071. int32_t ret = k_ticks_to_ms_floor64(ticks);
  1072. SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, ret);
  1073. return ret;
  1074. }
  1075. #ifdef CONFIG_USERSPACE
  1076. static inline int32_t z_vrfy_k_sleep(k_timeout_t timeout)
  1077. {
  1078. return z_impl_k_sleep(timeout);
  1079. }
  1080. #include <syscalls/k_sleep_mrsh.c>
  1081. #endif
  1082. int32_t z_impl_k_usleep(int us)
  1083. {
  1084. int32_t ticks;
  1085. SYS_PORT_TRACING_FUNC_ENTER(k_thread, usleep, us);
  1086. ticks = k_us_to_ticks_ceil64(us);
  1087. ticks = z_tick_sleep(ticks);
  1088. SYS_PORT_TRACING_FUNC_EXIT(k_thread, usleep, us, k_ticks_to_us_floor64(ticks));
  1089. return k_ticks_to_us_floor64(ticks);
  1090. }
  1091. #ifdef CONFIG_USERSPACE
  1092. static inline int32_t z_vrfy_k_usleep(int us)
  1093. {
  1094. return z_impl_k_usleep(us);
  1095. }
  1096. #include <syscalls/k_usleep_mrsh.c>
  1097. #endif
  1098. void z_impl_k_wakeup(k_tid_t thread)
  1099. {
  1100. SYS_PORT_TRACING_OBJ_FUNC(k_thread, wakeup, thread);
  1101. if (z_is_thread_pending(thread)) {
  1102. return;
  1103. }
  1104. if (z_abort_thread_timeout(thread) < 0) {
  1105. /* Might have just been sleeping forever */
  1106. if (thread->base.thread_state != _THREAD_SUSPENDED) {
  1107. return;
  1108. }
  1109. }
  1110. z_mark_thread_as_not_suspended(thread);
  1111. z_ready_thread(thread);
  1112. #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
  1113. arch_sched_ipi();
  1114. #endif
  1115. if (!arch_is_in_isr()) {
  1116. z_reschedule_unlocked();
  1117. }
  1118. }
  1119. #ifdef CONFIG_TRACE_SCHED_IPI
  1120. extern void z_trace_sched_ipi(void);
  1121. #endif
  1122. #ifdef CONFIG_SMP
  1123. void z_sched_ipi(void)
  1124. {
  1125. /* NOTE: When adding code to this, make sure this is called
  1126. * at appropriate location when !CONFIG_SCHED_IPI_SUPPORTED.
  1127. */
  1128. #ifdef CONFIG_TRACE_SCHED_IPI
  1129. z_trace_sched_ipi();
  1130. #endif
  1131. }
  1132. #endif
  1133. #ifdef CONFIG_USERSPACE
  1134. static inline void z_vrfy_k_wakeup(k_tid_t thread)
  1135. {
  1136. Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
  1137. z_impl_k_wakeup(thread);
  1138. }
  1139. #include <syscalls/k_wakeup_mrsh.c>
  1140. #endif
  1141. k_tid_t z_impl_z_current_get(void)
  1142. {
  1143. #ifdef CONFIG_SMP
  1144. /* In SMP, _current is a field read from _current_cpu, which
  1145. * can race with preemption before it is read. We must lock
  1146. * local interrupts when reading it.
  1147. */
  1148. unsigned int k = arch_irq_lock();
  1149. #endif
  1150. k_tid_t ret = _current_cpu->current;
  1151. #ifdef CONFIG_SMP
  1152. arch_irq_unlock(k);
  1153. #endif
  1154. return ret;
  1155. }
  1156. #ifdef CONFIG_USERSPACE
  1157. static inline k_tid_t z_vrfy_z_current_get(void)
  1158. {
  1159. return z_impl_z_current_get();
  1160. }
  1161. #include <syscalls/z_current_get_mrsh.c>
  1162. #endif
  1163. int z_impl_k_is_preempt_thread(void)
  1164. {
  1165. return !arch_is_in_isr() && is_preempt(_current);
  1166. }
  1167. #ifdef CONFIG_USERSPACE
  1168. static inline int z_vrfy_k_is_preempt_thread(void)
  1169. {
  1170. return z_impl_k_is_preempt_thread();
  1171. }
  1172. #include <syscalls/k_is_preempt_thread_mrsh.c>
  1173. #endif
  1174. #ifdef CONFIG_SCHED_CPU_MASK
  1175. # ifdef CONFIG_SMP
  1176. /* Right now we use a single byte for this mask */
  1177. BUILD_ASSERT(CONFIG_MP_NUM_CPUS <= 8, "Too many CPUs for mask word");
  1178. # endif
  1179. static int cpu_mask_mod(k_tid_t thread, uint32_t enable_mask, uint32_t disable_mask)
  1180. {
  1181. int ret = 0;
  1182. LOCKED(&sched_spinlock) {
  1183. if (z_is_thread_prevented_from_running(thread)) {
  1184. thread->base.cpu_mask |= enable_mask;
  1185. thread->base.cpu_mask &= ~disable_mask;
  1186. } else {
  1187. ret = -EINVAL;
  1188. }
  1189. }
  1190. return ret;
  1191. }
  1192. int k_thread_cpu_mask_clear(k_tid_t thread)
  1193. {
  1194. return cpu_mask_mod(thread, 0, 0xffffffff);
  1195. }
  1196. int k_thread_cpu_mask_enable_all(k_tid_t thread)
  1197. {
  1198. return cpu_mask_mod(thread, 0xffffffff, 0);
  1199. }
  1200. int k_thread_cpu_mask_enable(k_tid_t thread, int cpu)
  1201. {
  1202. return cpu_mask_mod(thread, BIT(cpu), 0);
  1203. }
  1204. int k_thread_cpu_mask_disable(k_tid_t thread, int cpu)
  1205. {
  1206. return cpu_mask_mod(thread, 0, BIT(cpu));
  1207. }
  1208. #endif /* CONFIG_SCHED_CPU_MASK */
  1209. static inline void unpend_all(_wait_q_t *wait_q)
  1210. {
  1211. struct k_thread *thread;
  1212. while ((thread = z_waitq_head(wait_q)) != NULL) {
  1213. unpend_thread_no_timeout(thread);
  1214. (void)z_abort_thread_timeout(thread);
  1215. arch_thread_return_value_set(thread, 0);
  1216. ready_thread(thread);
  1217. }
  1218. }
  1219. #ifdef CONFIG_CMSIS_RTOS_V1
  1220. extern void z_thread_cmsis_status_mask_clear(struct k_thread *thread);
  1221. #endif
  1222. static void end_thread(struct k_thread *thread)
  1223. {
  1224. /* We hold the lock, and the thread is known not to be running
  1225. * anywhere.
  1226. */
  1227. if ((thread->base.thread_state & _THREAD_DEAD) == 0U) {
  1228. thread->base.thread_state |= _THREAD_DEAD;
  1229. thread->base.thread_state &= ~_THREAD_ABORTING;
  1230. if (z_is_thread_queued(thread)) {
  1231. dequeue_thread(&_kernel.ready_q.runq, thread);
  1232. }
  1233. if (thread->base.pended_on != NULL) {
  1234. unpend_thread_no_timeout(thread);
  1235. }
  1236. (void)z_abort_thread_timeout(thread);
  1237. unpend_all(&thread->join_queue);
  1238. update_cache(1);
  1239. SYS_PORT_TRACING_FUNC(k_thread, sched_abort, thread);
  1240. z_thread_monitor_exit(thread);
  1241. #ifdef CONFIG_CMSIS_RTOS_V1
  1242. z_thread_cmsis_status_mask_clear(thread);
  1243. #endif
  1244. #ifdef CONFIG_USERSPACE
  1245. z_mem_domain_exit_thread(thread);
  1246. z_thread_perms_all_clear(thread);
  1247. z_object_uninit(thread->stack_obj);
  1248. z_object_uninit(thread);
  1249. #endif
  1250. }
  1251. }
  1252. void z_thread_abort(struct k_thread *thread)
  1253. {
  1254. k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
  1255. if ((thread->base.thread_state & _THREAD_DEAD) != 0U) {
  1256. k_spin_unlock(&sched_spinlock, key);
  1257. return;
  1258. }
  1259. #ifdef CONFIG_SMP
  1260. if (is_aborting(thread) && thread == _current && arch_is_in_isr()) {
  1261. /* Another CPU is spinning for us, don't deadlock */
  1262. end_thread(thread);
  1263. }
  1264. bool active = thread_active_elsewhere(thread);
  1265. if (active) {
  1266. /* It's running somewhere else, flag and poke */
  1267. thread->base.thread_state |= _THREAD_ABORTING;
  1268. #ifdef CONFIG_SCHED_IPI_SUPPORTED
  1269. arch_sched_ipi();
  1270. #endif
  1271. }
  1272. if (is_aborting(thread) && thread != _current) {
  1273. if (arch_is_in_isr()) {
  1274. /* ISRs can only spin waiting another CPU */
  1275. k_spin_unlock(&sched_spinlock, key);
  1276. while (is_aborting(thread)) {
  1277. }
  1278. } else if (active) {
  1279. /* Threads can join */
  1280. add_to_waitq_locked(_current, &thread->join_queue);
  1281. z_swap(&sched_spinlock, key);
  1282. }
  1283. return; /* lock has been released */
  1284. }
  1285. #endif
  1286. end_thread(thread);
  1287. if (thread == _current && !arch_is_in_isr()) {
  1288. z_swap(&sched_spinlock, key);
  1289. __ASSERT(false, "aborted _current back from dead");
  1290. }
  1291. k_spin_unlock(&sched_spinlock, key);
  1292. }
  1293. #if !defined(CONFIG_ARCH_HAS_THREAD_ABORT)
  1294. void z_impl_k_thread_abort(struct k_thread *thread)
  1295. {
  1296. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread);
  1297. z_thread_abort(thread);
  1298. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, abort, thread);
  1299. }
  1300. #endif
  1301. int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout)
  1302. {
  1303. k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
  1304. int ret = 0;
  1305. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, join, thread, timeout);
  1306. if ((thread->base.thread_state & _THREAD_DEAD) != 0U) {
  1307. ret = 0;
  1308. } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
  1309. ret = -EBUSY;
  1310. } else if ((thread == _current) ||
  1311. (thread->base.pended_on == &_current->join_queue)) {
  1312. ret = -EDEADLK;
  1313. } else {
  1314. __ASSERT(!arch_is_in_isr(), "cannot join in ISR");
  1315. add_to_waitq_locked(_current, &thread->join_queue);
  1316. add_thread_timeout(_current, timeout);
  1317. SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_thread, join, thread, timeout);
  1318. ret = z_swap(&sched_spinlock, key);
  1319. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret);
  1320. return ret;
  1321. }
  1322. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret);
  1323. k_spin_unlock(&sched_spinlock, key);
  1324. return ret;
  1325. }
  1326. #ifdef CONFIG_USERSPACE
  1327. /* Special case: don't oops if the thread is uninitialized. This is because
  1328. * the initialization bit does double-duty for thread objects; if false, means
  1329. * the thread object is truly uninitialized, or the thread ran and exited for
  1330. * some reason.
  1331. *
  1332. * Return true in this case indicating we should just do nothing and return
  1333. * success to the caller.
  1334. */
  1335. static bool thread_obj_validate(struct k_thread *thread)
  1336. {
  1337. struct z_object *ko = z_object_find(thread);
  1338. int ret = z_object_validate(ko, K_OBJ_THREAD, _OBJ_INIT_TRUE);
  1339. switch (ret) {
  1340. case 0:
  1341. return false;
  1342. case -EINVAL:
  1343. return true;
  1344. default:
  1345. #ifdef CONFIG_LOG
  1346. z_dump_object_error(ret, thread, ko, K_OBJ_THREAD);
  1347. #endif
  1348. Z_OOPS(Z_SYSCALL_VERIFY_MSG(ret, "access denied"));
  1349. }
  1350. CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
  1351. }
  1352. static inline int z_vrfy_k_thread_join(struct k_thread *thread,
  1353. k_timeout_t timeout)
  1354. {
  1355. if (thread_obj_validate(thread)) {
  1356. return 0;
  1357. }
  1358. return z_impl_k_thread_join(thread, timeout);
  1359. }
  1360. #include <syscalls/k_thread_join_mrsh.c>
  1361. static inline void z_vrfy_k_thread_abort(k_tid_t thread)
  1362. {
  1363. if (thread_obj_validate(thread)) {
  1364. return;
  1365. }
  1366. Z_OOPS(Z_SYSCALL_VERIFY_MSG(!(thread->base.user_options & K_ESSENTIAL),
  1367. "aborting essential thread %p", thread));
  1368. z_impl_k_thread_abort((struct k_thread *)thread);
  1369. }
  1370. #include <syscalls/k_thread_abort_mrsh.c>
  1371. #endif /* CONFIG_USERSPACE */
  1372. /*
  1373. * future scheduler.h API implementations
  1374. */
  1375. bool z_sched_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data)
  1376. {
  1377. struct k_thread *thread;
  1378. bool ret = false;
  1379. LOCKED(&sched_spinlock) {
  1380. thread = _priq_wait_best(&wait_q->waitq);
  1381. if (thread != NULL) {
  1382. z_thread_return_value_set_with_data(thread,
  1383. swap_retval,
  1384. swap_data);
  1385. unpend_thread_no_timeout(thread);
  1386. (void)z_abort_thread_timeout(thread);
  1387. ready_thread(thread);
  1388. ret = true;
  1389. }
  1390. }
  1391. return ret;
  1392. }
  1393. int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key,
  1394. _wait_q_t *wait_q, k_timeout_t timeout, void **data)
  1395. {
  1396. int ret = z_pend_curr(lock, key, wait_q, timeout);
  1397. if (data != NULL) {
  1398. *data = _current->base.swap_data;
  1399. }
  1400. return ret;
  1401. }