pthread_cond.c 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104
  1. /*
  2. * Copyright (c) 2017 Intel Corporation
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <kernel.h>
  7. #include <ksched.h>
  8. #include <wait_q.h>
  9. #include <posix/pthread.h>
  10. extern struct k_spinlock z_pthread_spinlock;
  11. int64_t timespec_to_timeoutms(const struct timespec *abstime);
  12. static int cond_wait(pthread_cond_t *cv, pthread_mutex_t *mut,
  13. k_timeout_t timeout)
  14. {
  15. __ASSERT(mut->lock_count == 1U, "");
  16. int ret;
  17. k_spinlock_key_t key = k_spin_lock(&z_pthread_spinlock);
  18. mut->lock_count = 0U;
  19. mut->owner = NULL;
  20. _ready_one_thread(&mut->wait_q);
  21. ret = z_sched_wait(&z_pthread_spinlock, key, &cv->wait_q, timeout, NULL);
  22. /* FIXME: this extra lock (and the potential context switch it
  23. * can cause) could be optimized out. At the point of the
  24. * signal/broadcast, it's possible to detect whether or not we
  25. * will be swapping back to this particular thread and lock it
  26. * (i.e. leave the lock variable unchanged) on our behalf.
  27. * But that requires putting scheduler intelligence into this
  28. * higher level abstraction and is probably not worth it.
  29. */
  30. pthread_mutex_lock(mut);
  31. return ret == -EAGAIN ? ETIMEDOUT : ret;
  32. }
  33. /* This implements a "fair" scheduling policy: at the end of a POSIX
  34. * thread call that might result in a change of the current maximum
  35. * priority thread, we always check and context switch if needed.
  36. * Note that there is significant dispute in the community over the
  37. * "right" way to do this and different systems do it differently by
  38. * default. Zephyr is an RTOS, so we choose latency over
  39. * throughput. See here for a good discussion of the broad issue:
  40. *
  41. * https://blog.mozilla.org/nfroyd/2017/03/29/on-mutex-performance-part-1/
  42. */
  43. int pthread_cond_signal(pthread_cond_t *cv)
  44. {
  45. if (cv == NULL)
  46. return EINVAL;
  47. if (cv->attr.type == -1)
  48. pthread_cond_init(cv, NULL);
  49. z_sched_wake(&cv->wait_q, 0, NULL);
  50. return 0;
  51. }
  52. int pthread_cond_broadcast(pthread_cond_t *cv)
  53. {
  54. if (cv == NULL)
  55. return EINVAL;
  56. if (cv->attr.type == -1)
  57. pthread_cond_init(cv, NULL);
  58. z_sched_wake_all(&cv->wait_q, 0, NULL);
  59. return 0;
  60. }
  61. int pthread_cond_wait(pthread_cond_t *cv, pthread_mutex_t *mut)
  62. {
  63. if (cv == NULL)
  64. return EINVAL;
  65. if (cv->attr.type == -1)
  66. pthread_cond_init(cv, NULL);
  67. return cond_wait(cv, mut, K_FOREVER);
  68. }
  69. int pthread_cond_timedwait(pthread_cond_t *cv, pthread_mutex_t *mut,
  70. const struct timespec *abstime)
  71. {
  72. int32_t timeout = (int32_t)timespec_to_timeoutms(abstime);
  73. if (cv == NULL)
  74. return EINVAL;
  75. if (cv->attr.type == -1)
  76. pthread_cond_init(cv, NULL);
  77. return cond_wait(cv, mut, K_MSEC(timeout));
  78. }