lvgl_virtual_display.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746
  1. /*
  2. * Copyright (c) 2020 Actions Technology Co., Ltd
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. /**********************
  7. * INCLUDES
  8. **********************/
  9. #include <os_common_api.h>
  10. #include <memory/mem_cache.h>
  11. #include <display/display_hal.h>
  12. #include <ui_mem.h>
  13. #include <lvgl/lvgl.h>
  14. #include <lvgl/porting/lvgl_porting.h>
  15. #include <lvgl/lvgl_memory.h>
  16. #include <lvgl/lvgl_virtual_display.h>
  17. #if defined(CONFIG_TRACING) && defined(CONFIG_UI_SERVICE)
  18. # include <tracing/tracing.h>
  19. # include <view_manager.h>
  20. #endif
  21. /**********************
  22. * DEFINES
  23. **********************/
  24. #ifdef _WIN32
  25. # define USE_GPU_WAIT_ASYNC 0
  26. #else
  27. # define USE_GPU_WAIT_ASYNC 1
  28. #endif
  29. /**********************
  30. * TYPEDEFS
  31. **********************/
  32. typedef struct lvgl_disp_drv_data {
  33. surface_t *surface;
  34. #if CONFIG_LV_VDB_NUM == 0
  35. os_sem wait_sem;
  36. #endif
  37. uint32_t flush_pixel_format;
  38. ui_region_t flush_area;
  39. uint8_t flush_idx;
  40. uint8_t flush_flag;
  41. #if defined(CONFIG_TRACING) && defined(CONFIG_UI_SERVICE)
  42. uint32_t frame_cnt;
  43. #endif
  44. lv_disp_drv_t *disp_drv;
  45. } lvgl_disp_drv_data_t;
  46. typedef struct lvgl_async_flush_ctx {
  47. bool initialized;
  48. bool enabled;
  49. bool flushing;
  50. lv_color_t * flush_buf;
  51. os_work flush_work;
  52. os_sem flush_sem;
  53. lvgl_disp_drv_data_t *disp_data;
  54. } lvgl_async_flush_ctx_t;
  55. /**********************
  56. * STATIC PROTOTYPES
  57. **********************/
  58. #if CONFIG_LV_VDB_NUM > 0
  59. static void _lvgl_draw_buf_init_shared_vdb(lv_disp_draw_buf_t * drawbuf);
  60. #endif
  61. static int _lvgl_draw_buf_alloc(lv_disp_drv_t * disp_drv, surface_t * surface);
  62. static void _lvgl_render_start_cb(lv_disp_drv_t * disp_drv);
  63. static void _lvgl_render_area_start_cb(lv_disp_drv_t * disp_drv, const lv_area_t * area);
  64. static void _lvgl_flush_cb(lv_disp_drv_t * disp_drv, const lv_area_t * area, lv_color_t * color_p);
  65. static void _lvgl_rounder_cb(lv_disp_drv_t * disp_drv, lv_area_t * area);
  66. static void _lvgl_wait_cb(lv_disp_drv_t * disp_drv);
  67. static void _lvgl_surface_draw_cb(uint32_t event, void * data, void * user_data);
  68. #if USE_GPU_WAIT_ASYNC
  69. static void _lvgl_init_async_flush_ctx(void);
  70. static void _lvgl_flush_async_cb(void * work);
  71. static void _lvgl_flush_async_lowerhalf_cb(os_work * work);
  72. #endif
  73. static uint8_t _lvgl_rotate_flag_from_surface(uint16_t rotation);
  74. #ifdef CONFIG_SURFACE_TRANSFORM_UPDATE
  75. static uint8_t _lvgl_rotate_flag_to_surface(lv_disp_drv_t *disp_drv);
  76. static void _lvgl_rotate_area_to_surface(lv_disp_drv_t *disp_drv,
  77. ui_region_t *region, const lv_area_t *area);
  78. #endif
  79. /**********************
  80. * STATIC VARIABLES
  81. **********************/
  82. #if CONFIG_LV_VDB_NUM > 0
  83. #if CONFIG_LV_VDB_NUM >= 4
  84. # define LV_VDB_NUM 4
  85. #elif CONFIG_LV_VDB_NUM >= 2
  86. # define LV_VDB_NUM 2
  87. #else
  88. # define LV_VDB_NUM 1
  89. #endif
  90. /* NOTE:
  91. * (1) depending on chosen color depth buffer may be accessed using uint8_t *,
  92. * uint16_t * or uint32_t *, therefore buffer needs to be aligned accordingly to
  93. * prevent unaligned memory accesses.
  94. * (2) must align each buffer address and size to psram cache line size (32 bytes)
  95. * if allocated in psram.
  96. * (3) Verisilicon vg_lite buffer memory requires 64 bytes aligned
  97. */
  98. #ifdef CONFIG_VG_LITE
  99. # define BUFFER_ALIGN 64
  100. #else
  101. # define BUFFER_ALIGN 32
  102. #endif
  103. #ifndef CONFIG_UI_MEM_VDB_SHARE_SURFACE_BUFFER
  104. #if CONFIG_LV_VDB_SIZE <= 0
  105. # error CONFIG_LV_VDB_SIZE must greater than 0
  106. #endif
  107. #if LV_COLOR_SCREEN_TRANSP
  108. # define BUFFER_SIZE (((CONFIG_LV_VDB_SIZE * LV_IMG_PX_SIZE_ALPHA_BYTE) + (BUFFER_ALIGN - 1)) & ~(BUFFER_ALIGN - 1))
  109. # define NBR_PIXELS_IN_BUFFER (BUFFER_SIZE / LV_IMG_PX_SIZE_ALPHA_BYTE)
  110. #else
  111. # define BUFFER_SIZE (((CONFIG_LV_VDB_SIZE * LV_COLOR_SIZE / 8) + (BUFFER_ALIGN - 1)) & ~(BUFFER_ALIGN - 1))
  112. # define NBR_PIXELS_IN_BUFFER (BUFFER_SIZE * 8 / LV_COLOR_SIZE)
  113. #endif
  114. static uint8_t vdb_buf_0[BUFFER_SIZE] __aligned(BUFFER_ALIGN) __in_section_unique(lvgl.noinit.vdb.0);
  115. #if LV_VDB_NUM >= 2
  116. static uint8_t vdb_buf_1[BUFFER_SIZE] __aligned(BUFFER_ALIGN) __in_section_unique(lvgl.noinit.vdb.1);
  117. #endif
  118. #if LV_VDB_NUM >= 4
  119. static uint8_t vdb_buf_2[BUFFER_SIZE] __aligned(BUFFER_ALIGN) __in_section_unique(lvgl.noinit.vdb.2);
  120. static uint8_t vdb_buf_3[BUFFER_SIZE] __aligned(BUFFER_ALIGN) __in_section_unique(lvgl.noinit.vdb.3);
  121. #endif
  122. #endif /* CONFIG_UI_MEM_VDB_SHARE_SURFACE_BUFFER */
  123. static lv_disp_draw_buf_t g_disp_drawbuf;
  124. static os_sem g_drawbuf_wait_sem;
  125. #endif /* CONFIG_LV_VDB_NUM > 0*/
  126. /* active display with indev attached to */
  127. static lv_disp_t * g_act_disp = NULL;
  128. #if USE_GPU_WAIT_ASYNC
  129. static lvgl_async_flush_ctx_t g_async_ctx;
  130. #endif
  131. /**********************
  132. * GLOBAL FUNCTIONS
  133. **********************/
  134. lv_disp_t * lvgl_virtual_display_create(surface_t * surface)
  135. {
  136. lv_disp_t *disp = NULL;
  137. lv_disp_drv_t *disp_drv = NULL;
  138. lvgl_disp_drv_data_t *drv_data = NULL;
  139. disp_drv = lv_mem_alloc(sizeof(*disp_drv));
  140. if (disp_drv == NULL) {
  141. LV_LOG_ERROR("Failed to alloc driver");
  142. return NULL;
  143. }
  144. lv_disp_drv_init(disp_drv);
  145. lv_port_gpu_init(disp_drv);
  146. disp_drv->screen_transp = hal_pixel_format_is_opaque(surface->pixel_format) ? 0 : 1;
  147. #if LV_COLOR_SCREEN_TRANSP == 0
  148. if (disp_drv->screen_transp) {
  149. LV_LOG_ERROR("Must enable LV_COLOR_SCREEN_TRANSP to support screen transp");
  150. goto fail_free_drv;
  151. }
  152. #endif
  153. drv_data = lv_mem_alloc(sizeof(*drv_data));
  154. if (drv_data == NULL) {
  155. LV_LOG_ERROR("Failed to allocate dirver data");
  156. goto fail_free_drv;
  157. }
  158. memset(drv_data, 0, sizeof(*drv_data));
  159. #if CONFIG_LV_VDB_NUM == 0
  160. os_sem_init(&drv_data->wait_sem, 0, 1);
  161. #endif
  162. drv_data->surface = surface;
  163. drv_data->disp_drv = disp_drv;
  164. drv_data->flush_pixel_format = lvgl_img_cf_to_display(
  165. disp_drv->screen_transp ? LV_IMG_CF_TRUE_COLOR_ALPHA : LV_IMG_CF_TRUE_COLOR, NULL);
  166. disp_drv->user_data = drv_data;
  167. if (_lvgl_draw_buf_alloc(disp_drv, surface)) {
  168. goto fail_free_drv_data;
  169. }
  170. disp_drv->hor_res = surface_get_width(surface);
  171. disp_drv->ver_res = surface_get_height(surface);
  172. disp_drv->rotated = _lvgl_rotate_flag_from_surface(surface_get_orientation(surface));
  173. #ifndef CONFIG_SURFACE_TRANSFORM_UPDATE
  174. disp_drv->sw_rotate = (disp_drv->rotated == LV_DISP_ROT_NONE) ? 0 : 1;
  175. #endif
  176. disp_drv->flush_cb = _lvgl_flush_cb;
  177. disp_drv->wait_cb = _lvgl_wait_cb;
  178. disp_drv->render_start_cb = _lvgl_render_start_cb;
  179. disp_drv->render_area_start_cb = _lvgl_render_area_start_cb;
  180. if (surface_get_max_possible_buffer_count() == 0) {
  181. disp_drv->rounder_cb = _lvgl_rounder_cb;
  182. }
  183. disp_drv->draw_ctx = lv_mem_alloc(disp_drv->draw_ctx_size);
  184. if (disp_drv->draw_ctx == NULL) {
  185. LV_LOG_ERROR("Failed to allocate dirver ctx");
  186. goto fail_free_draw_buf;
  187. }
  188. disp_drv->draw_ctx_init(disp_drv, disp_drv->draw_ctx);
  189. disp = lv_disp_drv_register(disp_drv);
  190. if (disp == NULL) {
  191. LV_LOG_ERROR("Failed to register driver.");
  192. goto fail_free_draw_ctx;
  193. }
  194. lv_disp_set_bg_color(disp, lv_color_black());
  195. lv_disp_set_bg_opa(disp, disp_drv->screen_transp ? LV_OPA_TRANSP : LV_OPA_COVER);
  196. /* Skip drawing the empty frame */
  197. lv_timer_pause(disp->refr_timer);
  198. surface_register_callback(surface, SURFACE_CB_DRAW, _lvgl_surface_draw_cb, disp);
  199. surface_set_continuous_draw_count(surface, disp_drv->draw_buf->buf_cnt);
  200. #if USE_GPU_WAIT_ASYNC
  201. _lvgl_init_async_flush_ctx();
  202. #endif /*USE_GPU_WAIT_ASYNC*/
  203. LV_LOG_INFO("disp %p created\n", disp);
  204. return disp;
  205. fail_free_draw_ctx:
  206. if (disp_drv->draw_ctx_deinit) {
  207. disp_drv->draw_ctx_deinit(disp_drv, disp_drv->draw_ctx);
  208. }
  209. lv_mem_free(disp_drv->draw_ctx);
  210. fail_free_draw_buf:
  211. #if CONFIG_LV_VDB_NUM == 0
  212. lv_mem_free(disp_drv->draw_buf);
  213. #endif
  214. fail_free_drv_data:
  215. lv_mem_free(disp_drv->user_data);
  216. fail_free_drv:
  217. lv_mem_free(disp_drv);
  218. return NULL;
  219. }
  220. void lvgl_virtual_display_destroy(lv_disp_t * disp)
  221. {
  222. lv_disp_drv_t *disp_drv = disp->driver;
  223. lvgl_disp_drv_data_t *drv_data = disp_drv->user_data;
  224. while (lv_disp_flush_is_finished(disp_drv) == false) {
  225. _lvgl_wait_cb(disp_drv);
  226. }
  227. /* unregister surface callback */
  228. surface_register_callback(drv_data->surface, SURFACE_CB_DRAW, NULL, NULL);
  229. if (g_act_disp == disp) {
  230. lvgl_virtual_display_set_focus(NULL, true);
  231. }
  232. lv_disp_remove(disp);
  233. if (disp_drv->draw_ctx_deinit) {
  234. disp_drv->draw_ctx_deinit(disp_drv, disp_drv->draw_ctx);
  235. }
  236. #if CONFIG_LV_VDB_NUM == 0
  237. lv_mem_free(disp_drv->draw_buf);
  238. #endif
  239. lv_mem_free(disp_drv->draw_ctx);
  240. lv_mem_free(disp_drv->user_data);
  241. lv_mem_free(disp_drv);
  242. LV_LOG_INFO("disp %p destroyed\n", disp);
  243. }
  244. int lvgl_virtual_display_set_default(lv_disp_t * disp)
  245. {
  246. /* Set default display */
  247. lv_disp_set_default(disp);
  248. return 0;
  249. }
  250. int lvgl_virtual_display_set_focus(lv_disp_t * disp, bool reset_indev)
  251. {
  252. if (disp == g_act_disp) {
  253. return 0;
  254. }
  255. if (disp == NULL) {
  256. LV_LOG_INFO("no active display\n");
  257. }
  258. /* Retach the input devices */
  259. lv_indev_t *indev = lv_indev_get_next(NULL);
  260. while (indev) {
  261. /* Reset the indev when focus changed */
  262. if (reset_indev) {
  263. lv_indev_reset(indev, NULL);
  264. } else {
  265. lv_indev_wait_release(indev);
  266. }
  267. indev->driver->disp = disp;
  268. indev = lv_indev_get_next(indev);
  269. LV_LOG_INFO("indev %p attached to disp %p\n", indev, disp);
  270. }
  271. /* Set default display */
  272. lv_disp_set_default(disp);
  273. g_act_disp = disp;
  274. return 0;
  275. }
  276. int lvgl_virtual_display_update(lv_disp_t * disp, uint16_t rotation)
  277. {
  278. lv_disp_drv_t * disp_drv = disp->driver;
  279. disp_drv->rotated = _lvgl_rotate_flag_from_surface(rotation);
  280. #ifndef CONFIG_SURFACE_TRANSFORM_UPDATE
  281. disp_drv->sw_rotate = (disp_drv->rotated == LV_DISP_ROT_NONE) ? 0 : 1;
  282. #endif
  283. lv_disp_drv_update(disp, disp_drv);
  284. return 0;
  285. }
  286. /**********************
  287. * STATIC FUNCTIONS
  288. **********************/
  289. #if CONFIG_LV_VDB_NUM > 0
  290. static void _lvgl_draw_buf_init_shared_vdb(lv_disp_draw_buf_t * drawbuf)
  291. {
  292. #ifdef CONFIG_UI_MEM_VDB_SHARE_SURFACE_BUFFER
  293. uint8_t *bufs[LV_VDB_NUM];
  294. unsigned int total_size = ui_mem_get_share_surface_buffer_size();
  295. unsigned int max_buf_size = ((total_size / LV_VDB_NUM) & ~(BUFFER_ALIGN - 1));
  296. uint8_t px_size = LV_COLOR_SCREEN_TRANSP ? LV_IMG_PX_SIZE_ALPHA_BYTE : (LV_COLOR_SIZE / 8);
  297. unsigned int max_nbr_pixels = (max_buf_size / px_size);
  298. unsigned int buf_size;
  299. unsigned int nbr_pixels;
  300. int i;
  301. if (CONFIG_LV_VDB_SIZE > 0 && CONFIG_LV_VDB_SIZE < max_nbr_pixels) {
  302. nbr_pixels = CONFIG_LV_VDB_SIZE;
  303. buf_size = (nbr_pixels * px_size + BUFFER_ALIGN - 1) & ~(BUFFER_ALIGN - 1);
  304. } else {
  305. nbr_pixels = max_nbr_pixels; /* auto compute LV_VDB_SIZE if 0 */
  306. buf_size = max_buf_size;
  307. }
  308. bufs[0] = ui_mem_get_share_surface_buffer();
  309. for (i = 1; i < LV_VDB_NUM; i++) {
  310. bufs[i] = bufs[i - 1] + buf_size;
  311. }
  312. #else /* CONFIG_UI_MEM_VDB_SHARE_SURFACE_BUFFER */
  313. uint8_t *bufs[LV_VDB_NUM] = {
  314. vdb_buf_0,
  315. #if LV_VDB_NUM >= 2
  316. vdb_buf_1,
  317. #endif
  318. #if LV_VDB_NUM >= 4
  319. vdb_buf_2,
  320. vdb_buf_3,
  321. #endif
  322. };
  323. unsigned int nbr_pixels = NBR_PIXELS_IN_BUFFER;
  324. #endif /* CONFIG_UI_MEM_VDB_SHARE_SURFACE_BUFFER */
  325. LV_LOG_INFO("LVGL VDB: size %u, num %u\n", nbr_pixels, LV_VDB_NUM);
  326. lv_disp_draw_buf_init2(drawbuf, (void **)bufs, LV_VDB_NUM, nbr_pixels);
  327. }
  328. #endif /* CONFIG_LV_VDB_NUM > 0 */
  329. static int _lvgl_draw_buf_alloc(lv_disp_drv_t * disp_drv, surface_t * surface)
  330. {
  331. #if CONFIG_LV_VDB_NUM > 0
  332. if (g_disp_drawbuf.buf_cnt == 0) {
  333. _lvgl_draw_buf_init_shared_vdb(&g_disp_drawbuf);
  334. }
  335. disp_drv->draw_buf = &g_disp_drawbuf;
  336. os_sem_init(&g_drawbuf_wait_sem, 0, 1);
  337. return 0;
  338. #else
  339. lvgl_disp_drv_data_t *drv_data = disp_drv->user_data;
  340. if (surface_get_max_possible_buffer_count() == 0) {
  341. LV_LOG_ERROR("no vdb, must increase CONFIG_SURFACE_MAX_BUFFER_COUNT to use direct mode");
  342. return -EINVAL;
  343. }
  344. if (surface->pixel_format != drv_data->flush_pixel_format) {
  345. LV_LOG_ERROR("pixel format %x must match in direct mode", surface->pixel_format);
  346. return -EINVAL;
  347. }
  348. disp_drv->draw_buf = lv_mem_alloc(sizeof(lv_disp_draw_buf_t));
  349. if (disp_drv->draw_buf == NULL) {
  350. LV_LOG_ERROR("draw buf alloc failed");
  351. return -ENOMEM;
  352. }
  353. LV_LOG_INFO("no vdb, use direct mode");
  354. graphic_buffer_t *buf1 = surface_get_draw_buffer(surface);
  355. graphic_buffer_t *buf2 = surface_get_post_buffer(surface);
  356. LV_ASSERT(buf1 != NULL);
  357. lv_disp_draw_buf_init(disp_drv->draw_buf,
  358. buf1->data, (buf2 == NULL || buf2 == buf1) ? NULL : buf2->data,
  359. disp_drv->hor_res * disp_drv->ver_res);
  360. disp_drv->direct_mode = 1;
  361. return 0;
  362. #endif /* CONFIG_LV_VDB_NUM > 0*/
  363. }
  364. static void _lvgl_render_start_cb(lv_disp_drv_t * disp_drv)
  365. {
  366. lvgl_disp_drv_data_t *drv_data = disp_drv->user_data;
  367. surface_begin_frame(drv_data->surface);
  368. if (disp_drv->direct_mode) {
  369. graphic_buffer_t *drawbuf = NULL;
  370. surface_begin_draw(drv_data->surface, SURFACE_FIRST_DRAW | SURFACE_LAST_DRAW, &drawbuf);
  371. LV_ASSERT(drawbuf != NULL && drawbuf->data == disp_drv->draw_buf->buf_act);
  372. }
  373. }
  374. static void _lvgl_render_area_start_cb(lv_disp_drv_t * disp_drv, const lv_area_t * area)
  375. {
  376. lvgl_disp_drv_data_t *drv_data = disp_drv->user_data;
  377. #if defined(CONFIG_TRACING) && defined(CONFIG_UI_SERVICE)
  378. ui_view_context_t *view = drv_data->surface->user_data[SURFACE_CB_POST];
  379. os_strace_u32x7(SYS_TRACE_ID_VIEW_DRAW, view->entry->id, drv_data->frame_cnt,
  380. drv_data->flush_idx, area->x1, area->y1, area->x2, area->y2);
  381. #endif /* CONFIG_TRACING */
  382. if (disp_drv->direct_mode) {
  383. if (drv_data->flush_idx == 0) {
  384. drv_data->flush_area.x1 = area->x1;
  385. drv_data->flush_area.y1 = area->y1;
  386. drv_data->flush_area.x2 = area->x2;
  387. drv_data->flush_area.y2 = area->y2;
  388. } else {
  389. drv_data->flush_area.x1 = LV_MIN(drv_data->flush_area.x1, area->x1);
  390. drv_data->flush_area.y1 = LV_MIN(drv_data->flush_area.y1, area->y1);
  391. drv_data->flush_area.x2 = LV_MAX(drv_data->flush_area.x2, area->x2);
  392. drv_data->flush_area.y2 = LV_MAX(drv_data->flush_area.y2, area->y2);
  393. }
  394. }
  395. }
  396. static void _lvgl_flush_cb(lv_disp_drv_t * disp_drv,
  397. const lv_area_t * area, lv_color_t * color_p)
  398. {
  399. lvgl_disp_drv_data_t *drv_data = disp_drv->user_data;
  400. int res = 0;
  401. if (disp_drv->sw_rotate) {
  402. mem_dcache_clean(color_p, lv_area_get_size(area) * sizeof(lv_color_t));
  403. mem_dcache_sync();
  404. }
  405. /*Flush the memory dcache*/
  406. if (disp_drv->clean_dcache_cb)
  407. disp_drv->clean_dcache_cb(disp_drv);
  408. if (!disp_drv->direct_mode) {
  409. #if USE_GPU_WAIT_ASYNC
  410. if (g_async_ctx.enabled) {
  411. while (g_async_ctx.flushing)
  412. os_sem_take(&g_async_ctx.flush_sem, OS_FOREVER);
  413. } else
  414. #endif
  415. {
  416. /*Flush the rendered content to the display*/
  417. lv_draw_wait_for_finish(disp_drv->draw_ctx);
  418. }
  419. }
  420. #ifdef CONFIG_SURFACE_TRANSFORM_UPDATE
  421. drv_data->flush_flag = _lvgl_rotate_flag_to_surface(disp_drv);
  422. if (drv_data->flush_idx == 0)
  423. drv_data->flush_flag |= SURFACE_FIRST_DRAW;
  424. #else
  425. drv_data->flush_flag = (drv_data->flush_idx == 0) ? SURFACE_FIRST_DRAW : 0;
  426. #endif
  427. if (lv_disp_flush_is_last(disp_drv)) {
  428. drv_data->flush_flag |= SURFACE_LAST_DRAW;
  429. drv_data->flush_idx = 0;
  430. #if defined(CONFIG_TRACING) && defined(CONFIG_UI_SERVICE)
  431. drv_data->frame_cnt++;
  432. #endif
  433. } else {
  434. drv_data->flush_idx++;
  435. }
  436. LV_LOG_TRACE("lvgl flush: flag %x, buf %p, area (%d %d %d %d), render %u, flush %u\n",
  437. drv_data->flush_flag, color_p, area->x1, area->y1, area->x2, area->y2,
  438. disp_drv->draw_buf->rendering_idx, disp_drv->draw_buf->flushing_idx);
  439. /* FIXME:
  440. * wait_for_finish() is already called in LVGL for the last flush for compability.
  441. */
  442. if (disp_drv->direct_mode) {
  443. if (drv_data->flush_flag & SURFACE_LAST_DRAW) {
  444. res = surface_end_draw(drv_data->surface, &drv_data->flush_area);
  445. surface_end_frame(drv_data->surface);
  446. if (res) {
  447. LV_LOG_ERROR("surface update failed");
  448. lv_disp_flush_ready(disp_drv);
  449. }
  450. }
  451. } else {
  452. #ifdef CONFIG_SURFACE_TRANSFORM_UPDATE
  453. _lvgl_rotate_area_to_surface(disp_drv, &drv_data->flush_area, area);
  454. #else
  455. ui_region_set(&drv_data->flush_area, area->x1, area->y1, area->x2, area->y2);
  456. #endif
  457. #if USE_GPU_WAIT_ASYNC
  458. if (g_async_ctx.enabled) {
  459. g_async_ctx.flushing = true;
  460. g_async_ctx.flush_buf = color_p;
  461. g_async_ctx.disp_data = drv_data;
  462. lv_port_gpu_insert_event(_lvgl_flush_async_cb, &g_async_ctx.flush_work);
  463. } else
  464. #endif
  465. {
  466. res = surface_update(drv_data->surface, drv_data->flush_flag,
  467. &drv_data->flush_area, color_p, lv_area_get_width(area),
  468. drv_data->flush_pixel_format);
  469. if (drv_data->flush_flag & SURFACE_LAST_DRAW) {
  470. surface_end_frame(drv_data->surface);
  471. }
  472. }
  473. if (res) {
  474. LV_LOG_ERROR("surface update failed");
  475. lv_disp_flush_ready(disp_drv);
  476. }
  477. }
  478. }
  479. #if USE_GPU_WAIT_ASYNC
  480. static void _lvgl_init_async_flush_ctx(void)
  481. {
  482. if (g_async_ctx.initialized == false) {
  483. g_async_ctx.initialized = true;
  484. g_async_ctx.enabled = (lvgl_display_get_flush_workq() != NULL);
  485. os_sem_init(&g_async_ctx.flush_sem, 0, 1);
  486. os_work_init(&g_async_ctx.flush_work, _lvgl_flush_async_lowerhalf_cb);
  487. }
  488. }
  489. static void _lvgl_flush_async_cb(void * work)
  490. {
  491. os_work_submit_to_queue(lvgl_display_get_flush_workq(), work);
  492. }
  493. static void _lvgl_flush_async_lowerhalf_cb(os_work * work)
  494. {
  495. lvgl_disp_drv_data_t *drv_data = g_async_ctx.disp_data;
  496. #ifdef CONFIG_SURFACE_TRANSFORM_UPDATE
  497. uint16_t stride = (drv_data->flush_flag & SURFACE_ROTATED_90) ?
  498. ui_region_get_height(&drv_data->flush_area) :
  499. ui_region_get_width(&drv_data->flush_area);
  500. #else
  501. uint16_t stride = ui_region_get_width(&drv_data->flush_area);
  502. #endif
  503. LV_LOG_TRACE("async flush (%d): flag %x, buf %p, stride %u, area (%d %d %d %d)\n",
  504. res, drv_data->flush_flag, g_async_ctx.flush_buf, stride,
  505. drv_data->flush_area.x1, drv_data->flush_area.y1,
  506. drv_data->flush_area.x2, drv_data->flush_area.y2);
  507. int res = surface_update(drv_data->surface, drv_data->flush_flag, &drv_data->flush_area,
  508. g_async_ctx.flush_buf, stride, drv_data->flush_pixel_format);
  509. if (res < 0)
  510. lv_disp_flush_ready(drv_data->disp_drv);
  511. if (drv_data->flush_flag & SURFACE_LAST_DRAW) {
  512. surface_end_frame(drv_data->surface);
  513. }
  514. g_async_ctx.flushing = false;
  515. os_sem_give(&g_async_ctx.flush_sem);
  516. }
  517. #endif /* USE_GPU_WAIT_ASYNC */
  518. static uint8_t _lvgl_rotate_flag_from_surface(uint16_t rotation)
  519. {
  520. LV_ASSERT(rotation <= 270);
  521. switch (rotation) {
  522. case 90:
  523. return LV_DISP_ROT_270;
  524. case 180:
  525. return LV_DISP_ROT_180;
  526. case 270:
  527. return LV_DISP_ROT_90;
  528. default:
  529. return LV_DISP_ROT_NONE;
  530. }
  531. }
  532. #ifdef CONFIG_SURFACE_TRANSFORM_UPDATE
  533. static uint8_t _lvgl_rotate_flag_to_surface(lv_disp_drv_t * disp_drv)
  534. {
  535. static const uint8_t flags[] = {
  536. 0, SURFACE_ROTATED_270, SURFACE_ROTATED_180, SURFACE_ROTATED_90,
  537. };
  538. return flags[disp_drv->rotated];
  539. }
  540. static void _lvgl_rotate_area_to_surface(lv_disp_drv_t * disp_drv,
  541. ui_region_t * region, const lv_area_t * area)
  542. {
  543. switch (disp_drv->rotated) {
  544. case LV_DISP_ROT_90: /* 270 degree clockwise rotation */
  545. region->x1 = area->y1;
  546. region->y1 = disp_drv->hor_res - area->x2 - 1;
  547. region->x2 = area->y2;
  548. region->y2 = disp_drv->hor_res - area->x1 - 1;
  549. break;
  550. case LV_DISP_ROT_180: /* 180 degree clockwise rotation */
  551. region->x1 = disp_drv->hor_res - area->x2 - 1;
  552. region->y1 = disp_drv->ver_res - area->y2 - 1;
  553. region->x2 = disp_drv->hor_res - area->x1 - 1;
  554. region->y2 = disp_drv->ver_res - area->y1 - 1;
  555. break;
  556. case LV_DISP_ROT_270: /* 90 degree clockwise rotation */
  557. region->x1 = disp_drv->ver_res - area->y2 - 1;
  558. region->y1 = area->x1;
  559. region->x2 = disp_drv->ver_res - area->y1 - 1;
  560. region->y2 = area->x2;
  561. break;
  562. case LV_DISP_ROT_NONE:
  563. default:
  564. ui_region_set(region, area->x1, area->y1, area->x2, area->y2);
  565. break;
  566. }
  567. }
  568. #endif /* CONFIG_SURFACE_TRANSFORM_UPDATE */
  569. static void _lvgl_rounder_cb(lv_disp_drv_t * disp_drv, lv_area_t * area)
  570. {
  571. /*
  572. * (1) Some LCD DDIC require even pixel alignment, so set even area if possible
  573. * for framebuffer-less refresh mode.
  574. * (2) Haraware may run faster on aligned pixels, so set horizontal even if possible
  575. */
  576. if (!(disp_drv->hor_res & 0x1)) {
  577. area->x1 &= ~0x1;
  578. area->x2 |= 0x1;
  579. }
  580. /* Framebuffer-less refresh mode must meet LCD DDIC pixel algnment */
  581. if (!(disp_drv->ver_res & 0x1)) {
  582. area->y1 &= ~0x1;
  583. area->y2 |= 0x1;
  584. }
  585. }
  586. static void _lvgl_wait_cb(lv_disp_drv_t * disp_drv)
  587. {
  588. LV_LOG_TRACE("lvgl wait: render %u, flush %u\n",
  589. disp_drv->draw_buf->rendering_idx, disp_drv->draw_buf->flushing_idx);
  590. #if CONFIG_LV_VDB_NUM == 0
  591. lvgl_disp_drv_data_t *drv_data = disp_drv->user_data;
  592. os_sem_take(&drv_data->wait_sem, OS_FOREVER);
  593. #else
  594. os_sem_take(&g_drawbuf_wait_sem, OS_FOREVER);
  595. #endif
  596. }
  597. static void _lvgl_surface_draw_cb(uint32_t event, void * data, void * user_data)
  598. {
  599. lv_disp_t *disp = user_data;
  600. lv_disp_drv_t *disp_drv = disp->driver;
  601. if (event == SURFACE_EVT_DRAW_READY) {
  602. lv_disp_flush_ready(disp_drv);
  603. LV_LOG_TRACE("lvgl ready: render %u, flush %u\n",
  604. disp_drv->draw_buf->rendering_idx, disp_drv->draw_buf->flushing_idx);
  605. #if CONFIG_LV_VDB_NUM == 0
  606. lvgl_disp_drv_data_t *drv_data = disp_drv->user_data;
  607. os_sem_give(&drv_data->wait_sem);
  608. #else
  609. os_sem_give(&g_drawbuf_wait_sem);
  610. #endif
  611. } else if (event == SURFACE_EVT_DRAW_COVER_CHECK) {
  612. surface_cover_check_data_t *cover_check = data;
  613. /* direct mode: buffer copy will be done in lv_refr.c */
  614. if (disp_drv->direct_mode || (
  615. disp->inv_areas[0].x1 <= cover_check->area->x1 &&
  616. disp->inv_areas[0].y1 <= cover_check->area->y1 &&
  617. disp->inv_areas[0].x2 >= cover_check->area->x2 &&
  618. disp->inv_areas[0].y2 >= cover_check->area->y2)) {
  619. cover_check->covered = true;
  620. } else {
  621. cover_check->covered = false;
  622. }
  623. }
  624. }