ui_surface.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911
  1. /*
  2. * Copyright (c) 2020 Actions Technology Co., Ltd
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #define LOG_MODULE_CUSTOMER
  7. #ifndef CONFIG_UI_SERVICE
  8. # undef CONFIG_TRACING
  9. #endif
  10. #include <os_common_api.h>
  11. #include <assert.h>
  12. #include <string.h>
  13. #include <mem_manager.h>
  14. #include <memory/mem_cache.h>
  15. #include <display/sw_draw.h>
  16. #include <display/ui_memsetcpy.h>
  17. #include <ui_mem.h>
  18. #include <ui_surface.h>
  19. #ifdef CONFIG_DMA2D_HAL
  20. # include <dma2d_hal.h>
  21. #endif
  22. #if defined(CONFIG_TRACING) && defined(CONFIG_UI_SERVICE)
  23. # include <tracing/tracing.h>
  24. # include <view_manager.h>
  25. #endif
  26. LOG_MODULE_REGISTER(surface, LOG_LEVEL_INF);
  27. /**********************
  28. * DEFINES
  29. **********************/
  30. #ifdef CONFIG_DMA2D_HAL
  31. # if defined(CONFIG_LV_COLOR_DEPTH_32) || defined(CONFIG_SURFACE_TRANSFORM_UPDATE)
  32. # define DMA2D_OPEN_MODE HAL_DMA2D_FULL_MODES /* may require color conversion */
  33. # else
  34. # define DMA2D_OPEN_MODE HAL_DMA2D_M2M
  35. #endif
  36. #endif /* CONFIG_DMA2D_HAL */
  37. /**********************
  38. * TYPEDEFS
  39. **********************/
  40. #ifdef CONFIG_DMA2D_HAL
  41. typedef struct {
  42. surface_t * surface;
  43. ui_region_t area;
  44. os_work work;
  45. os_sem sem;
  46. os_mutex mutex;
  47. uint16_t draw_seq;
  48. uint16_t cplt_seq;
  49. } surface_delayed_update_t;
  50. #endif /* CONFIG_DMA2D_HAL */
  51. /**********************
  52. * STATIC PROTOTYPES
  53. **********************/
  54. static void _surface_buffer_destroy_cb(struct graphic_buffer *buffer);
  55. #if CONFIG_SURFACE_MAX_BUFFER_COUNT > 0
  56. static int _surface_buffer_copy(graphic_buffer_t *dstbuf, const ui_region_t *dst_region,
  57. const uint8_t *src_buf, uint32_t src_pixel_format, uint16_t src_stride, uint8_t flags);
  58. #endif
  59. #if CONFIG_SURFACE_MAX_BUFFER_COUNT > 1
  60. static void _surface_swapbuf(surface_t *surface);
  61. #elif CONFIG_SURFACE_MAX_BUFFER_COUNT > 0
  62. static inline void _surface_swapbuf(surface_t *surface) { }
  63. #endif
  64. #if CONFIG_SURFACE_MAX_BUFFER_COUNT > 0 && defined(CONFIG_DMA2D_HAL)
  65. static void _surface_dma2d_init(void);
  66. static void _surface_dma2d_poll(void);
  67. static void _surface_draw_wait_finish(surface_t *surface);
  68. #else
  69. static inline void _surface_draw_wait_finish(surface_t *surface) { }
  70. #endif
  71. #if CONFIG_SURFACE_MAX_BUFFER_COUNT > 1 && defined(CONFIG_DMA2D_HAL)
  72. static void _surface_swapbuf_wait_finish(surface_t *surface);
  73. #else
  74. static inline void _surface_swapbuf_wait_finish(surface_t *surface) { }
  75. #endif
  76. static void _surface_frame_wait_end(surface_t *surface);
  77. static int _surface_begin_draw_internal(surface_t *surface, uint8_t flags, graphic_buffer_t **drawbuf);
  78. static int _surface_end_draw_internal(surface_t *surface, const ui_region_t *area,
  79. const void *buf, uint16_t stride, uint32_t pixel_format);
  80. static void _surface_invoke_draw_ready(surface_t *surface);
  81. static void _surface_invoke_post_start(surface_t *surface, const ui_region_t *area, uint8_t flags);
  82. /**********************
  83. * STATIC VARIABLES
  84. **********************/
  85. #if CONFIG_SURFACE_MAX_BUFFER_COUNT > 0 && defined(CONFIG_DMA2D_HAL)
  86. static bool dma2d_inited = false;
  87. static hal_dma2d_handle_t hdma2d __in_section_unique(ram.noinit.surface);
  88. static surface_delayed_update_t delayed_update;
  89. #endif /* CONFIG_DMA2D_HAL */
  90. /**********************
  91. * GLOBAL FUNCTIONS
  92. **********************/
  93. graphic_buffer_t *surface_buffer_create(uint16_t w, uint16_t h,
  94. uint32_t pixel_format, uint32_t usage)
  95. {
  96. graphic_buffer_t *buffer = NULL;
  97. uint32_t stride, data_size;
  98. uint8_t bits_per_pixel;
  99. uint8_t multiple;
  100. void *data;
  101. bits_per_pixel = hal_pixel_format_get_bits_per_pixel(pixel_format);
  102. if (bits_per_pixel == 0 || bits_per_pixel > 32) {
  103. return NULL;
  104. }
  105. #if 0
  106. /* require bytes per row is 4 bytes align */
  107. multiple = 32;
  108. uint8_t tmp_bpp = bits_per_pixel;
  109. while ((tmp_bpp & 0x1) == 0) {
  110. tmp_bpp >>= 1;
  111. multiple >>= 1;
  112. }
  113. #else
  114. /* 1 byte align is enough */
  115. multiple = (bits_per_pixel >= 8) ? 1 : (8 >> (bits_per_pixel >> 1));
  116. #endif
  117. stride = UI_ROUND_UP(w, multiple);
  118. data_size = stride * h * bits_per_pixel / 8;
  119. /* FIXME: if size too small, allocate it from GUI to save memory */
  120. if (data_size < CONFIG_UI_MEM_BLOCK_SIZE / 16) {
  121. data = ui_mem_alloc(MEM_RES, data_size, __func__);
  122. if (data == NULL) {
  123. data = ui_mem_alloc(MEM_GUI, data_size, __func__);
  124. }
  125. } else {
  126. data = ui_mem_alloc(MEM_FB, data_size, __func__);
  127. }
  128. if (data == NULL) {
  129. SYS_LOG_ERR("surface mem alloc failed");
  130. return NULL;
  131. }
  132. buffer = mem_malloc(sizeof(*buffer));
  133. if (buffer == NULL) {
  134. ui_mem_free2(data);
  135. return NULL;
  136. }
  137. if (graphic_buffer_init(buffer, w, h, pixel_format, usage, stride, data)) {
  138. ui_mem_free2(data);
  139. mem_free(buffer);
  140. return NULL;
  141. }
  142. graphic_buffer_set_destroy_cb(buffer, _surface_buffer_destroy_cb);
  143. return buffer;
  144. }
  145. surface_t *surface_create(uint16_t w, uint16_t h,
  146. uint32_t pixel_format, uint8_t buf_count, uint8_t flags)
  147. {
  148. surface_t *surface = mem_malloc(sizeof(*surface));
  149. if (surface == NULL) {
  150. return NULL;
  151. }
  152. memset(surface, 0, sizeof(*surface));
  153. surface->width = w;
  154. surface->height = h;
  155. surface->pixel_format = pixel_format;
  156. surface->create_flags = flags;
  157. atomic_set(&surface->refcount, 1);
  158. os_sem_init(&surface->post_sem, 0, 1);
  159. os_sem_init(&surface->frame_sem, 0, 1);
  160. #if CONFIG_SURFACE_MAX_BUFFER_COUNT > 0
  161. if (surface_set_min_buffer_count(surface, buf_count)) {
  162. surface_destroy(surface);
  163. return NULL;
  164. }
  165. #ifdef CONFIG_DMA2D_HAL
  166. _surface_dma2d_init();
  167. #endif
  168. #else /* CONFIG_SURFACE_MAX_BUFFER_COUNT > 0 */
  169. if (flags & SURFACE_POST_IN_SYNC_MODE) {
  170. mem_free(surface);
  171. return NULL;
  172. }
  173. surface_set_continuous_draw_count(surface, 2);
  174. if (surface->buffers == NULL) {
  175. mem_free(surface);
  176. return NULL;
  177. }
  178. #endif /* CONFIG_SURFACE_MAX_BUFFER_COUNT > 0 */
  179. return surface;
  180. }
  181. void surface_destroy(surface_t *surface)
  182. {
  183. if (surface == NULL) {
  184. return;
  185. }
  186. #if CONFIG_SURFACE_MAX_BUFFER_COUNT > 0
  187. surface_set_max_buffer_count(surface, 0);
  188. #else
  189. _surface_frame_wait_end(surface);
  190. while (atomic_get(&surface->post_cnt) > 0) {
  191. SYS_LOG_DBG("%p wait post", surface);
  192. os_sem_take(&surface->post_sem, OS_FOREVER);
  193. }
  194. mem_free(surface->buffers);
  195. #endif /* CONFIG_SURFACE_MAX_BUFFER_COUNT > 0 */
  196. if (atomic_dec(&surface->refcount) == 1) {
  197. mem_free(surface);
  198. }
  199. }
  200. void surface_register_callback(surface_t *surface,
  201. int callback_id, surface_callback_t callback_fn, void *user_data)
  202. {
  203. unsigned int key = os_irq_lock();
  204. surface->callback[callback_id] = callback_fn;
  205. surface->user_data[callback_id] = user_data;
  206. os_irq_unlock(key);
  207. }
  208. void surface_set_continuous_draw_count(surface_t *surface, uint8_t count)
  209. {
  210. #if CONFIG_SURFACE_MAX_BUFFER_COUNT == 0
  211. if (surface->buf_count < count) {
  212. graphic_buffer_t * buffers = mem_malloc(sizeof(graphic_buffer_t) * count);
  213. if (buffers) {
  214. mem_free(surface->buffers);
  215. surface->buffers = buffers;
  216. surface->buf_count = count;
  217. surface->draw_idx = 0;
  218. }
  219. }
  220. #endif
  221. }
  222. int surface_set_min_buffer_count(surface_t *surface, uint8_t min_count)
  223. {
  224. #if CONFIG_SURFACE_MAX_BUFFER_COUNT > 0
  225. uint8_t buf_count;
  226. int res = 0;
  227. if (min_count > CONFIG_SURFACE_MAX_BUFFER_COUNT)
  228. return -EDOM;
  229. if (min_count <= surface->buf_count)
  230. return 0;
  231. _surface_frame_wait_end(surface);
  232. _surface_draw_wait_finish(surface);
  233. _surface_swapbuf_wait_finish(surface);
  234. buf_count = surface->buf_count;
  235. for (int i = buf_count; i < min_count; i++) {
  236. surface->buffers[i] = surface_buffer_create(surface->width,
  237. surface->height, surface->pixel_format,
  238. GRAPHIC_BUFFER_SW_MASK | GRAPHIC_BUFFER_HW_MASK);
  239. if (surface->buffers[i] == NULL) {
  240. SYS_LOG_ERR("alloc failed %d ",i);
  241. res = -ENOMEM;
  242. break;
  243. }
  244. buf_count++;
  245. }
  246. if (buf_count != surface->buf_count) {
  247. /* make sure the front buffer index keep the same */
  248. os_sched_lock();
  249. surface->buf_count = buf_count;
  250. surface->draw_idx = buf_count - 1;
  251. os_sched_unlock();
  252. /* invalidate the new dirty area */
  253. ui_region_set(&surface->dirty_area, 0, 0, surface->width - 1, surface->height - 1);
  254. SYS_LOG_DBG("buf count %d", surface->buf_count);
  255. }
  256. return res;
  257. #else
  258. return (min_count > 0) ? -EDOM : 0;
  259. #endif /* CONFIG_SURFACE_MAX_BUFFER_COUNT > 0 */
  260. }
  261. int surface_set_max_buffer_count(surface_t *surface, uint8_t max_count)
  262. {
  263. #if CONFIG_SURFACE_MAX_BUFFER_COUNT > 0
  264. uint8_t buf_count = surface->buf_count;
  265. uint8_t max_post_cnt = (surface->create_flags & SURFACE_POST_IN_SYNC_MODE) ? 1 : max_count;
  266. if (max_count >= buf_count)
  267. return 0;
  268. _surface_frame_wait_end(surface);
  269. _surface_draw_wait_finish(surface);
  270. _surface_swapbuf_wait_finish(surface);
  271. /* synchronize */
  272. while (atomic_get(&surface->post_cnt) > max_post_cnt) {
  273. SYS_LOG_DBG("%p wait post", surface);
  274. os_sem_take(&surface->post_sem, OS_FOREVER);
  275. }
  276. /* make sure the front buffer index keep the same */
  277. os_sched_lock();
  278. #if CONFIG_SURFACE_MAX_BUFFER_COUNT > 1
  279. if (buf_count > 1) {
  280. /**
  281. * Keep the post buffer which has the latest content, and the result of
  282. * surface_get_post_buffer() unaffected which is used by view manager.
  283. */
  284. if (surface->draw_idx == 0) {
  285. graphic_buffer_t *tmp = surface->buffers[1];
  286. surface->buffers[1] = surface->buffers[0];
  287. surface->buffers[0] = tmp;
  288. }
  289. }
  290. #endif
  291. surface->draw_idx = 0;
  292. surface->post_idx = 0;
  293. surface->buf_count = max_count;
  294. os_sched_unlock();
  295. for (int i = max_count; i < buf_count; i++) {
  296. graphic_buffer_destroy(surface->buffers[i]);
  297. surface->buffers[i] = NULL;
  298. }
  299. #endif /* CONFIG_SURFACE_MAX_BUFFER_COUNT > 0 */
  300. return 0;
  301. }
  302. int surface_set_buffer_count(surface_t *surface, uint8_t buf_count)
  303. {
  304. #if CONFIG_SURFACE_MAX_BUFFER_COUNT > 0
  305. if (buf_count > surface->buf_count) {
  306. return surface_set_min_buffer_count(surface, buf_count);
  307. } else {
  308. return surface_set_max_buffer_count(surface, buf_count);
  309. }
  310. #else
  311. return (buf_count == 0) ? 0 : -EINVAL;
  312. #endif /* CONFIG_SURFACE_MAX_BUFFER_COUNT > 0 */
  313. }
  314. int surface_begin_frame(surface_t *surface)
  315. {
  316. #if CONFIG_SURFACE_MAX_BUFFER_COUNT > 0
  317. bool covered = true;
  318. if (surface->buf_count == 0) {
  319. return -ENOBUFS;
  320. }
  321. #if CONFIG_SURFACE_MAX_BUFFER_COUNT > 1
  322. if (surface->buf_count == 2) {
  323. surface_cover_check_data_t cover_check_data = {
  324. .area = &surface->dirty_area,
  325. .covered = ui_region_is_empty(&surface->dirty_area),
  326. };
  327. if (!cover_check_data.covered && surface->callback[SURFACE_CB_DRAW]) {
  328. surface->callback[SURFACE_CB_DRAW](SURFACE_EVT_DRAW_COVER_CHECK,
  329. &cover_check_data, surface->user_data[SURFACE_CB_DRAW]);
  330. }
  331. covered = cover_check_data.covered;
  332. SYS_LOG_DBG("dirty (%d %d %d %d), covered %d",
  333. surface->dirty_area.x1, surface->dirty_area.y1,
  334. surface->dirty_area.x2, surface->dirty_area.y2, covered);
  335. }
  336. #endif /* CONFIG_SURFACE_MAX_BUFFER_COUNT > 1 */
  337. /* surface_update() may called in another thread, so synchronization is required */
  338. _surface_frame_wait_end(surface);
  339. _surface_draw_wait_finish(surface);
  340. /* surface swap buffer ? */
  341. if (!covered) {
  342. /* wait backbuf available */
  343. while (atomic_get(&surface->post_cnt) >= surface->buf_count) {
  344. SYS_LOG_DBG("%p wait post", surface);
  345. os_sem_take(&surface->post_sem, OS_FOREVER);
  346. }
  347. _surface_swapbuf(surface);
  348. }
  349. ui_region_set(&surface->dirty_area, surface->width, surface->height, 0, 0);
  350. #else
  351. /* surface_update() may called in another thread, so synchronization is required */
  352. _surface_frame_wait_end(surface);
  353. #endif /* CONFIG_SURFACE_MAX_BUFFER_COUNT > 0 */
  354. surface->in_frame = 1;
  355. return 0;
  356. }
  357. int surface_end_frame(surface_t *surface)
  358. {
  359. surface->in_frame = 0;
  360. os_sem_give(&surface->frame_sem);
  361. return 0;
  362. }
  363. int surface_begin_draw(surface_t *surface, uint8_t flags, graphic_buffer_t **drawbuf)
  364. {
  365. #if CONFIG_SURFACE_MAX_BUFFER_COUNT > 0
  366. return _surface_begin_draw_internal(surface, flags, drawbuf);
  367. #else
  368. return -ENOBUFS;
  369. #endif
  370. }
  371. int surface_end_draw(surface_t *surface, const ui_region_t *area)
  372. {
  373. #if CONFIG_SURFACE_MAX_BUFFER_COUNT > 0
  374. return _surface_end_draw_internal(surface, area, NULL, 0, 0);
  375. #else
  376. return -ENOBUFS;
  377. #endif
  378. }
  379. int surface_update(surface_t *surface, uint8_t flags,
  380. const ui_region_t *area, const void *buf,
  381. uint16_t stride, uint32_t pixel_format)
  382. {
  383. graphic_buffer_t *drawbuf = NULL;
  384. int res;
  385. _surface_draw_wait_finish(surface);
  386. res = _surface_begin_draw_internal(surface, flags, &drawbuf);
  387. if (res) {
  388. return res;
  389. }
  390. #if CONFIG_SURFACE_MAX_BUFFER_COUNT > 0
  391. res = _surface_buffer_copy(drawbuf, area, buf, pixel_format, stride, flags);
  392. #ifdef CONFIG_DMA2D_HAL
  393. if (res >= 0) {
  394. bool completed;
  395. unsigned int key = os_irq_lock();
  396. completed = (res == delayed_update.cplt_seq) ? true : false;
  397. if (completed == false) {
  398. delayed_update.draw_seq = (uint16_t)res;
  399. delayed_update.surface = surface;
  400. memcpy(&delayed_update.area, area, sizeof(*area));
  401. }
  402. os_irq_unlock(key);
  403. if (completed) {
  404. res = _surface_end_draw_internal(surface, area, NULL, 0, 0);
  405. } else {
  406. res = 0;
  407. }
  408. } else {
  409. res = _surface_end_draw_internal(surface, area, NULL, 0, 0);
  410. }
  411. #else
  412. res = _surface_end_draw_internal(surface, area, NULL, 0, 0);
  413. #endif /* CONFIG_DMA2D_HAL */
  414. #else /* CONFIG_SURFACE_MAX_BUFFER_COUNT > 0 */
  415. res = _surface_end_draw_internal(surface, area, buf, stride, pixel_format);
  416. #endif /* CONFIG_SURFACE_MAX_BUFFER_COUNT > 0 */
  417. return res;
  418. }
  419. int surface_wait_for_update(surface_t *surface, int timeout)
  420. {
  421. _surface_frame_wait_end(surface);
  422. _surface_draw_wait_finish(surface);
  423. return 0;
  424. }
  425. int surface_wait_for_refresh(surface_t *surface, int timeout)
  426. {
  427. uint8_t max_post_cnt = (surface->create_flags & SURFACE_POST_IN_SYNC_MODE) ? 1 : 0;
  428. _surface_frame_wait_end(surface);
  429. _surface_draw_wait_finish(surface);
  430. _surface_swapbuf_wait_finish(surface);
  431. /* synchronize */
  432. while (atomic_get(&surface->post_cnt) > max_post_cnt) {
  433. SYS_LOG_DBG("%p wait post", surface);
  434. os_sem_take(&surface->post_sem, OS_FOREVER);
  435. }
  436. return 0;
  437. }
  438. void surface_complete_one_post(surface_t *surface)
  439. {
  440. atomic_dec(&surface->post_cnt);
  441. SYS_LOG_DBG("%p post cplt %d", surface, atomic_get(&surface->post_cnt));
  442. os_sem_give(&surface->post_sem);
  443. #if CONFIG_SURFACE_MAX_BUFFER_COUNT == 0
  444. _surface_invoke_draw_ready(surface);
  445. #endif
  446. if (atomic_dec(&surface->refcount) == 1) {
  447. mem_free(surface);
  448. }
  449. }
  450. graphic_buffer_t *surface_get_draw_buffer(surface_t *surface)
  451. {
  452. #if CONFIG_SURFACE_MAX_BUFFER_COUNT > 0
  453. return (surface->buf_count > 0) ? surface->buffers[surface->draw_idx] : NULL;
  454. #else
  455. return NULL;
  456. #endif
  457. }
  458. graphic_buffer_t *surface_get_post_buffer(surface_t *surface)
  459. {
  460. #if CONFIG_SURFACE_MAX_BUFFER_COUNT > 0
  461. return (surface->buf_count > 0) ? surface->buffers[surface->post_idx] : NULL;
  462. #else
  463. return &surface->buffers[surface->draw_idx];
  464. #endif
  465. }
  466. uint8_t surface_get_buffer_count(surface_t *surface)
  467. {
  468. #if CONFIG_SURFACE_MAX_BUFFER_COUNT > 0
  469. return surface->buf_count;
  470. #else
  471. return 0;
  472. #endif
  473. }
  474. uint8_t surface_get_max_possible_buffer_count(void)
  475. {
  476. return CONFIG_SURFACE_MAX_BUFFER_COUNT;
  477. }
  478. /**********************
  479. * STATIC FUNCTIONS
  480. **********************/
  481. static void _surface_buffer_destroy_cb(struct graphic_buffer *buffer)
  482. {
  483. assert(buffer != NULL && buffer->data != NULL);
  484. ui_mem_free2(buffer->data);
  485. mem_free(buffer);
  486. }
  487. static int _surface_begin_draw_internal(surface_t *surface, uint8_t flags, graphic_buffer_t **drawbuf)
  488. {
  489. #if CONFIG_SURFACE_MAX_BUFFER_COUNT > 0
  490. if (surface->buf_count == 0) {
  491. SYS_LOG_WRN("surface %p no buffer", surface);
  492. return -ENOBUFS;
  493. }
  494. if (flags & SURFACE_FIRST_DRAW) {
  495. /* wait backbuf available */
  496. while (atomic_get(&surface->post_cnt) >= surface->buf_count) {
  497. SYS_LOG_DBG("%p wait post", surface);
  498. os_sem_take(&surface->post_sem, OS_FOREVER);
  499. }
  500. _surface_swapbuf_wait_finish(surface);
  501. }
  502. *drawbuf = surface->buffers[surface->draw_idx];
  503. #endif /* CONFIG_SURFACE_MAX_BUFFER_COUNT > 0 */
  504. surface->draw_flags = flags;
  505. return 0;
  506. }
  507. static int _surface_end_draw_internal(surface_t *surface, const ui_region_t *area,
  508. const void *buf, uint16_t stride, uint32_t pixel_format)
  509. {
  510. #if defined(CONFIG_TRACING) && defined(CONFIG_UI_SERVICE)
  511. ui_view_context_t *view = surface->user_data[SURFACE_CB_POST];
  512. os_strace_end_call_u32(SYS_TRACE_ID_VIEW_DRAW, view->entry->id);
  513. #endif /* CONFIG_TRACING */
  514. #if CONFIG_SURFACE_MAX_BUFFER_COUNT > 0
  515. assert(buf == NULL);
  516. /* post based on frame */
  517. ui_region_merge(&surface->dirty_area, &surface->dirty_area, area);
  518. _surface_invoke_draw_ready(surface);
  519. if (surface->draw_flags & SURFACE_LAST_DRAW) {
  520. /* make sure the swap buffer not interrupted by posting */
  521. os_sched_lock();
  522. #if CONFIG_SURFACE_MAX_BUFFER_COUNT > 1
  523. if (surface->buf_count > 1) {
  524. surface->post_idx ^= 1;
  525. surface->draw_idx ^= 1;
  526. }
  527. #endif /* CONFIG_SURFACE_MAX_BUFFER_COUNT > 1 */
  528. _surface_invoke_post_start(surface, &surface->dirty_area, SURFACE_FIRST_DRAW | SURFACE_LAST_DRAW);
  529. os_sched_unlock();
  530. }
  531. #else /* CONFIG_SURFACE_MAX_BUFFER_COUNT > 0 */
  532. while (atomic_get(&surface->post_cnt) >= surface->buf_count) {
  533. SYS_LOG_DBG("%p wait post", surface);
  534. os_sem_take(&surface->post_sem, OS_FOREVER);
  535. }
  536. if (++surface->draw_idx == surface->buf_count)
  537. surface->draw_idx = 0;
  538. graphic_buffer_init(&surface->buffers[surface->draw_idx],
  539. ui_region_get_width(area), ui_region_get_height(area), pixel_format,
  540. GRAPHIC_BUFFER_HW_COMPOSER, stride, (void *)buf);
  541. _surface_invoke_post_start(surface, area, surface->draw_flags);
  542. #endif /* CONFIG_SURFACE_MAX_BUFFER_COUNT > 0 */
  543. return 0;
  544. }
  545. static void _surface_invoke_draw_ready(surface_t *surface)
  546. {
  547. if (surface->callback[SURFACE_CB_DRAW]) {
  548. surface->callback[SURFACE_CB_DRAW](SURFACE_EVT_DRAW_READY,
  549. NULL, surface->user_data[SURFACE_CB_DRAW]);
  550. }
  551. }
  552. static void _surface_invoke_post_start(surface_t *surface, const ui_region_t *area, uint8_t flags)
  553. {
  554. surface_post_data_t data = { .flags = flags, .area = area, };
  555. atomic_inc(&surface->refcount);
  556. atomic_inc(&surface->post_cnt);
  557. SYS_LOG_DBG("%p post inprog %d", surface, atomic_get(&surface->post_cnt));
  558. if (surface->callback[SURFACE_CB_POST]) {
  559. surface->callback[SURFACE_CB_POST](SURFACE_EVT_POST_START,
  560. &data, surface->user_data[SURFACE_CB_POST]);
  561. }
  562. }
  563. static void _surface_frame_wait_end(surface_t *surface)
  564. {
  565. while (surface->in_frame) {
  566. os_sem_take(&surface->frame_sem, OS_FOREVER);
  567. }
  568. }
  569. #if CONFIG_SURFACE_MAX_BUFFER_COUNT > 0
  570. #ifdef CONFIG_DMA2D_HAL
  571. static void _surface_draw_wait_finish(surface_t *surface)
  572. {
  573. os_mutex_lock(&delayed_update.mutex, OS_FOREVER);
  574. while (delayed_update.surface != NULL) {
  575. os_sem_take(&delayed_update.sem, OS_FOREVER);
  576. }
  577. os_mutex_unlock(&delayed_update.mutex);
  578. }
  579. static void _surface_delay_update_work_handler(os_work *work)
  580. {
  581. _surface_end_draw_internal(delayed_update.surface, &delayed_update.area, NULL, 0, 0);
  582. delayed_update.surface = NULL;
  583. os_sem_give(&delayed_update.sem);
  584. }
  585. static void _surface_dma2d_xfer_cb(hal_dma2d_handle_t *hdma2d, uint16_t cmd_seq, uint32_t error_code)
  586. {
  587. delayed_update.cplt_seq = cmd_seq;
  588. if (delayed_update.surface && cmd_seq == delayed_update.draw_seq) {
  589. os_work_q *queue = os_get_display_work_queue();
  590. if (queue) {
  591. os_work_submit_to_queue(queue, &delayed_update.work);
  592. } else {
  593. os_work_submit(&delayed_update.work);
  594. }
  595. }
  596. }
  597. static void _surface_dma2d_poll(void)
  598. {
  599. if (dma2d_inited) {
  600. os_mutex_lock(&delayed_update.mutex, OS_FOREVER);
  601. hal_dma2d_poll_transfer(&hdma2d, -1);
  602. os_mutex_unlock(&delayed_update.mutex);
  603. }
  604. }
  605. static void _surface_dma2d_init(void)
  606. {
  607. if (dma2d_inited)
  608. return;
  609. if (hal_dma2d_init(&hdma2d, DMA2D_OPEN_MODE) == 0) {
  610. /* register hdma2d callback */
  611. hal_dma2d_register_callback(&hdma2d, _surface_dma2d_xfer_cb);
  612. hdma2d.layer_cfg[1].input_alpha = 0xff000000;
  613. os_work_init(&delayed_update.work, _surface_delay_update_work_handler);
  614. os_sem_init(&delayed_update.sem, 0, 1);
  615. os_mutex_init(&delayed_update.mutex);
  616. dma2d_inited = true;
  617. }
  618. }
  619. #endif /* CONFIG_DMA2D_HAL */
  620. static int _surface_buffer_copy(graphic_buffer_t *dstbuf,
  621. const ui_region_t *dst_region, const uint8_t *src,
  622. uint32_t src_pixel_format, uint16_t src_stride, uint8_t flags)
  623. {
  624. uint8_t *dst = (uint8_t *)graphic_buffer_get_bufptr(dstbuf, dst_region->x1, dst_region->y1);
  625. uint8_t dst_px_size = graphic_buffer_get_bits_per_pixel(dstbuf) / 8;
  626. uint16_t dst_pitch = graphic_buffer_get_stride(dstbuf) * dst_px_size;
  627. uint16_t dst_w = ui_region_get_width(dst_region);
  628. uint16_t dst_h = ui_region_get_height(dst_region);
  629. uint8_t src_px_size = hal_pixel_format_get_bits_per_pixel(src_pixel_format) / 8;
  630. uint16_t src_pitch = src_stride * src_px_size;
  631. int res = -EINVAL;
  632. #ifdef CONFIG_DMA2D_HAL
  633. if (dma2d_inited) {
  634. os_mutex_lock(&delayed_update.mutex, OS_FOREVER);
  635. hdma2d.output_cfg.mode = (flags & SURFACE_ROTATED_MASK) ?
  636. HAL_DMA2D_M2M_TRANSFORM : HAL_DMA2D_M2M;
  637. hdma2d.output_cfg.output_pitch = dst_pitch;
  638. hdma2d.output_cfg.color_format = graphic_buffer_get_pixel_format(dstbuf);
  639. hal_dma2d_config_output(&hdma2d);
  640. if (flags & SURFACE_ROTATED_90) {
  641. hdma2d.layer_cfg[1].input_width = dst_h;
  642. hdma2d.layer_cfg[1].input_height = dst_w;
  643. } else {
  644. hdma2d.layer_cfg[1].input_width = dst_w;
  645. hdma2d.layer_cfg[1].input_height = dst_h;
  646. }
  647. hdma2d.layer_cfg[1].color_format = src_pixel_format;
  648. hdma2d.layer_cfg[1].input_pitch = src_pitch;
  649. hal_dma2d_config_layer(&hdma2d, HAL_DMA2D_FOREGROUND_LAYER);
  650. if (flags & SURFACE_ROTATED_MASK) {
  651. hdma2d.trans_cfg.mode = (flags & SURFACE_ROTATED_90) ? HAL_DMA2D_ROT_90 : 0;
  652. if (flags & SURFACE_ROTATED_180)
  653. hdma2d.trans_cfg.mode += HAL_DMA2D_ROT_180;
  654. hal_dma2d_config_transform(&hdma2d);
  655. res = hal_dma2d_transform_start(&hdma2d, (uint32_t)src,
  656. (uint32_t)dst, 0, 0, dst_w, dst_h);
  657. } else {
  658. res = hal_dma2d_start(&hdma2d, (uint32_t)src, (uint32_t)dst,
  659. hdma2d.layer_cfg[1].input_width, hdma2d.layer_cfg[1].input_height);
  660. }
  661. if (res < 0) {
  662. /* fallback to CPU, so make sure previous dma2d ops finished */
  663. hal_dma2d_poll_transfer(&hdma2d, -1);
  664. }
  665. os_mutex_unlock(&delayed_update.mutex);
  666. }
  667. #endif /* CONFIG_DMA2D_HAL */
  668. /**
  669. * TODO: add pixel format software convertion
  670. */
  671. if (res < 0) {
  672. if (flags & SURFACE_ROTATED_MASK) {
  673. SYS_LOG_ERR("no sw rotation");
  674. return -ENOSYS;
  675. }
  676. uint16_t copy_bytes = dst_w * src_px_size;
  677. dst = mem_addr_to_uncache(dst);
  678. if (src_pixel_format == graphic_buffer_get_pixel_format(dstbuf)) {
  679. if (copy_bytes == dst_pitch && copy_bytes == src_pitch) {
  680. ui_memcpy(dst, mem_addr_to_uncache(src), copy_bytes * dst_h);
  681. ui_memsetcpy_wait_finish(5000);
  682. } else {
  683. for (int j = dst_h; j > 0; j--) {
  684. mem_dcache_flush(src, copy_bytes);
  685. memcpy(dst, src, copy_bytes);
  686. dst += dst_pitch;
  687. src += src_pitch;
  688. }
  689. }
  690. } else {
  691. for (int j = dst_h; j > 0; j--) {
  692. mem_dcache_flush(src, copy_bytes);
  693. int ret = sw_convert_color_buffer(dst, graphic_buffer_get_pixel_format(dstbuf),
  694. src, src_pixel_format, dst_w);
  695. if (ret < 0) {
  696. SYS_LOG_ERR("sw pixel format convert failed: %x -> %x", src_pixel_format,
  697. graphic_buffer_get_pixel_format(dstbuf));
  698. break;
  699. }
  700. dst += dst_pitch;
  701. src += src_pitch;
  702. }
  703. }
  704. mem_writebuf_clean_all();
  705. }
  706. return res;
  707. }
  708. #endif /* CONFIG_SURFACE_MAX_BUFFER_COUNT > 0 */
  709. #if CONFIG_SURFACE_MAX_BUFFER_COUNT > 1
  710. static void _surface_swapbuf(surface_t *surface)
  711. {
  712. graphic_buffer_t *backbuf = surface->buffers[surface->draw_idx];
  713. graphic_buffer_t *frontbuf = surface->buffers[surface->draw_idx ? 0 : 1] ;
  714. uint8_t *frontptr;
  715. uint16_t pitch, stride;
  716. uint8_t bytes_per_pixel;
  717. #if defined(CONFIG_TRACING) && defined(CONFIG_UI_SERVICE)
  718. ui_view_context_t *view = surface->user_data[SURFACE_CB_POST];
  719. os_strace_u32(SYS_TRACE_ID_VIEW_SWAPBUF, view->entry->id);
  720. #endif
  721. frontptr = (uint8_t *)graphic_buffer_get_bufptr(
  722. frontbuf, surface->dirty_area.x1, surface->dirty_area.y1);
  723. bytes_per_pixel = graphic_buffer_get_bits_per_pixel(frontbuf) / 8;
  724. stride = graphic_buffer_get_stride(frontbuf);
  725. pitch = stride * bytes_per_pixel;
  726. #ifdef CONFIG_DMA2D_HAL
  727. surface->swapping = 1;
  728. SYS_LOG_DBG("%p swap pending", surface);
  729. #endif /* CONFIG_DMA2D_HAL */
  730. _surface_buffer_copy(backbuf, &surface->dirty_area, frontptr, surface->pixel_format, stride, 0);
  731. #if defined(CONFIG_TRACING) && defined(CONFIG_UI_SERVICE)
  732. os_strace_end_call_u32(SYS_TRACE_ID_VIEW_SWAPBUF, view->entry->id);
  733. #endif
  734. }
  735. #ifdef CONFIG_DMA2D_HAL
  736. static void _surface_swapbuf_wait_finish(surface_t *surface)
  737. {
  738. if (surface->swapping) {
  739. _surface_dma2d_poll();
  740. surface->swapping = 0;
  741. }
  742. }
  743. #endif /* CONFIG_DMA2D_HAL */
  744. #endif /* CONFIG_SURFACE_MAX_BUFFER_COUNT > 1 */