device_runtime.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249
  1. /*
  2. * Copyright (c) 2018 Intel Corporation.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <zephyr.h>
  7. #include <kernel.h>
  8. #include <device.h>
  9. #include <sys/__assert.h>
  10. #include <pm/device_runtime.h>
  11. #include <spinlock.h>
  12. #define LOG_LEVEL CONFIG_PM_LOG_LEVEL /* From power module Kconfig */
  13. #include <logging/log.h>
  14. LOG_MODULE_DECLARE(power);
  15. /* Device PM request type */
  16. #define PM_DEVICE_SYNC BIT(0)
  17. #define PM_DEVICE_ASYNC BIT(1)
  18. static void pm_device_runtime_state_set(struct pm_device *pm)
  19. {
  20. const struct device *dev = pm->dev;
  21. int ret = 0;
  22. /* Clear transitioning flags */
  23. atomic_clear_bit(&dev->pm->flags, PM_DEVICE_FLAG_TRANSITIONING);
  24. switch (dev->pm->state) {
  25. case PM_DEVICE_STATE_ACTIVE:
  26. if ((dev->pm->usage == 0) && dev->pm->enable) {
  27. ret = pm_device_state_set(dev, PM_DEVICE_STATE_SUSPENDED);
  28. }
  29. break;
  30. case PM_DEVICE_STATE_SUSPENDED:
  31. if ((dev->pm->usage > 0) || !dev->pm->enable) {
  32. ret = pm_device_state_set(dev, PM_DEVICE_STATE_ACTIVE);
  33. }
  34. break;
  35. default:
  36. LOG_ERR("Invalid state!!\n");
  37. }
  38. __ASSERT(ret == 0, "Set Power state error");
  39. /*
  40. * This function returns the number of woken threads on success. There
  41. * is nothing we can do with this information. Just ignoring it.
  42. */
  43. (void)k_condvar_broadcast(&dev->pm->condvar);
  44. }
  45. static void pm_work_handler(struct k_work *work)
  46. {
  47. struct pm_device *pm = CONTAINER_OF(work,
  48. struct pm_device, work);
  49. (void)k_mutex_lock(&pm->lock, K_FOREVER);
  50. pm_device_runtime_state_set(pm);
  51. (void)k_mutex_unlock(&pm->lock);
  52. }
  53. static int pm_device_request(const struct device *dev,
  54. enum pm_device_state state, uint32_t pm_flags)
  55. {
  56. int ret = 0;
  57. SYS_PORT_TRACING_FUNC_ENTER(pm, device_request, dev, state);
  58. __ASSERT((state == PM_DEVICE_STATE_ACTIVE) ||
  59. (state == PM_DEVICE_STATE_SUSPENDED),
  60. "Invalid device PM state requested");
  61. if (k_is_pre_kernel()) {
  62. if (state == PM_DEVICE_STATE_ACTIVE) {
  63. dev->pm->usage++;
  64. } else {
  65. dev->pm->usage--;
  66. }
  67. /* If we are being called before the kernel was initialized
  68. * we can assume that the system took care of initialized
  69. * devices properly. It means that all dependencies were
  70. * satisfied and this call just incremented the reference count
  71. * for this device.
  72. */
  73. /* Unfortunately this is not what is happening yet. There are
  74. * cases, for example, like the pinmux being initialized before
  75. * the gpio. Lets just power on/off the device.
  76. */
  77. if (dev->pm->usage == 1) {
  78. (void)pm_device_state_set(dev, PM_DEVICE_STATE_ACTIVE);
  79. } else if (dev->pm->usage == 0) {
  80. (void)pm_device_state_set(dev, PM_DEVICE_STATE_SUSPENDED);
  81. }
  82. goto out;
  83. }
  84. (void)k_mutex_lock(&dev->pm->lock, K_FOREVER);
  85. if (!dev->pm->enable) {
  86. ret = -ENOTSUP;
  87. goto out_unlock;
  88. }
  89. if (state == PM_DEVICE_STATE_ACTIVE) {
  90. dev->pm->usage++;
  91. if (dev->pm->usage > 1) {
  92. goto out_unlock;
  93. }
  94. } else {
  95. /* Check if it is already 0 to avoid an underflow */
  96. if (dev->pm->usage == 0) {
  97. goto out_unlock;
  98. }
  99. dev->pm->usage--;
  100. if (dev->pm->usage > 0) {
  101. goto out_unlock;
  102. }
  103. }
  104. /* Return in case of Async request */
  105. if (pm_flags & PM_DEVICE_ASYNC) {
  106. atomic_set_bit(&dev->pm->flags, PM_DEVICE_FLAG_TRANSITIONING);
  107. (void)k_work_schedule(&dev->pm->work, K_NO_WAIT);
  108. goto out_unlock;
  109. }
  110. while ((k_work_delayable_is_pending(&dev->pm->work)) ||
  111. atomic_test_bit(&dev->pm->flags, PM_DEVICE_FLAG_TRANSITIONING)) {
  112. ret = k_condvar_wait(&dev->pm->condvar, &dev->pm->lock,
  113. K_FOREVER);
  114. if (ret != 0) {
  115. break;
  116. }
  117. }
  118. pm_device_runtime_state_set(dev->pm);
  119. /*
  120. * dev->pm->state was set in pm_device_runtime_state_set(). As the
  121. * device may not have been properly changed to the state or
  122. * another thread we check it here before returning.
  123. */
  124. ret = state == dev->pm->state ? 0 : -EIO;
  125. out_unlock:
  126. (void)k_mutex_unlock(&dev->pm->lock);
  127. out:
  128. SYS_PORT_TRACING_FUNC_EXIT(pm, device_request, dev, ret);
  129. return ret;
  130. }
  131. int pm_device_get(const struct device *dev)
  132. {
  133. return pm_device_request(dev, PM_DEVICE_STATE_ACTIVE, 0);
  134. }
  135. int pm_device_get_async(const struct device *dev)
  136. {
  137. return pm_device_request(dev, PM_DEVICE_STATE_ACTIVE, PM_DEVICE_ASYNC);
  138. }
  139. int pm_device_put(const struct device *dev)
  140. {
  141. return pm_device_request(dev, PM_DEVICE_STATE_SUSPENDED, 0);
  142. }
  143. int pm_device_put_async(const struct device *dev)
  144. {
  145. return pm_device_request(dev, PM_DEVICE_STATE_SUSPENDED, PM_DEVICE_ASYNC);
  146. }
  147. void pm_device_enable(const struct device *dev)
  148. {
  149. SYS_PORT_TRACING_FUNC_ENTER(pm, device_enable, dev);
  150. if (k_is_pre_kernel()) {
  151. dev->pm->dev = dev;
  152. if (dev->pm_control != NULL) {
  153. dev->pm->enable = true;
  154. dev->pm->state = PM_DEVICE_STATE_SUSPENDED;
  155. k_work_init_delayable(&dev->pm->work, pm_work_handler);
  156. }
  157. goto out;
  158. }
  159. (void)k_mutex_lock(&dev->pm->lock, K_FOREVER);
  160. if (dev->pm_control == NULL) {
  161. dev->pm->enable = false;
  162. goto out_unlock;
  163. }
  164. dev->pm->enable = true;
  165. /* During the driver init, device can set the
  166. * PM state accordingly. For later cases we need
  167. * to check the usage and set the device PM state.
  168. */
  169. if (!dev->pm->dev) {
  170. dev->pm->dev = dev;
  171. dev->pm->state = PM_DEVICE_STATE_SUSPENDED;
  172. k_work_init_delayable(&dev->pm->work, pm_work_handler);
  173. } else {
  174. k_work_schedule(&dev->pm->work, K_NO_WAIT);
  175. }
  176. out_unlock:
  177. (void)k_mutex_unlock(&dev->pm->lock);
  178. out:
  179. SYS_PORT_TRACING_FUNC_EXIT(pm, device_enable, dev);
  180. }
  181. void pm_device_disable(const struct device *dev)
  182. {
  183. SYS_PORT_TRACING_FUNC_ENTER(pm, device_disable, dev);
  184. __ASSERT(k_is_pre_kernel() == false, "Device should not be disabled "
  185. "before kernel is initialized");
  186. (void)k_mutex_lock(&dev->pm->lock, K_FOREVER);
  187. if (dev->pm->enable) {
  188. dev->pm->enable = false;
  189. /* Bring up the device before disabling the Idle PM */
  190. k_work_schedule(&dev->pm->work, K_NO_WAIT);
  191. }
  192. (void)k_mutex_unlock(&dev->pm->lock);
  193. SYS_PORT_TRACING_FUNC_EXIT(pm, device_disable, dev);
  194. }
  195. int pm_device_wait(const struct device *dev, k_timeout_t timeout)
  196. {
  197. int ret = 0;
  198. k_mutex_lock(&dev->pm->lock, K_FOREVER);
  199. while ((k_work_delayable_is_pending(&dev->pm->work)) ||
  200. atomic_test_bit(&dev->pm->flags, PM_DEVICE_FLAG_TRANSITIONING)) {
  201. ret = k_condvar_wait(&dev->pm->condvar, &dev->pm->lock,
  202. timeout);
  203. if (ret != 0) {
  204. break;
  205. }
  206. }
  207. k_mutex_unlock(&dev->pm->lock);
  208. return ret;
  209. }