feat(gpu): improve NXP's PXP and VGLite accelerators (#3952)

Signed-off-by: Nicușor Cîțu <nicusor.citu@nxp.com>
Signed-off-by: Stefan Babatie <stefan.babatie@nxp.com>
Signed-off-by: Wenbin Yuan <wenbin.yuan@nxp.com>
Co-authored-by: Stefan Babatie <stefan.babatie@nxp.com>
Co-authored-by: Wenbin Yuan <wenbin.yuan@nxp.com>
This commit is contained in:
nicusorcitu
2023-02-01 11:35:24 +02:00
committed by GitHub
parent 39f424767f
commit 361ee79611
33 changed files with 2647 additions and 1696 deletions

View File

@@ -1,5 +1,3 @@
CSRCS += lv_gpu_nxp.c
DEPPATH += --dep-path $(LVGL_DIR)/$(LVGL_DIR_NAME)/src/draw/nxp
VPATH += :$(LVGL_DIR)/$(LVGL_DIR_NAME)/src/draw/nxp

View File

@@ -1,418 +0,0 @@
/**
* @file lv_gpu_nxp.c
*
*/
/**
* MIT License
*
* Copyright 2022 NXP
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next paragraph)
* shall be included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
* INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
/*********************
* INCLUDES
*********************/
#include "lv_gpu_nxp.h"
#if LV_USE_GPU_NXP_PXP || LV_USE_GPU_NXP_VG_LITE
/*
* allow to use both PXP and VGLITE
* both 2D accelerators can be used at the same time:
* thus VGLITE can be used to accelerate widget drawing
* while PXP accelerates Blit & Fill operations.
*/
#if LV_USE_GPU_NXP_PXP
#include "pxp/lv_draw_pxp_blend.h"
#endif
#if LV_USE_GPU_NXP_VG_LITE
#include "vglite/lv_draw_vglite_blend.h"
#include "vglite/lv_draw_vglite_rect.h"
#include "vglite/lv_draw_vglite_arc.h"
#endif
#if LV_COLOR_DEPTH != 32
#include "../../core/lv_refr.h"
#endif
/*********************
* DEFINES
*********************/
/**********************
* TYPEDEFS
**********************/
/**********************
* STATIC PROTOTYPES
**********************/
static void lv_draw_nxp_img_decoded(lv_draw_ctx_t * draw_ctx, const lv_draw_img_dsc_t * dsc,
const lv_area_t * coords, const uint8_t * map_p, lv_img_cf_t cf);
static void lv_draw_nxp_blend(lv_draw_ctx_t * draw_ctx, const lv_draw_sw_blend_dsc_t * dsc);
static void lv_draw_nxp_rect(lv_draw_ctx_t * draw_ctx, const lv_draw_rect_dsc_t * dsc, const lv_area_t * coords);
static lv_res_t draw_nxp_bg(lv_draw_ctx_t * draw_ctx, const lv_draw_rect_dsc_t * dsc, const lv_area_t * coords);
static void lv_draw_nxp_arc(lv_draw_ctx_t * draw_ctx, const lv_draw_arc_dsc_t * dsc, const lv_point_t * center,
uint16_t radius, uint16_t start_angle, uint16_t end_angle);
/**********************
* STATIC VARIABLES
**********************/
/**********************
* MACROS
**********************/
/**********************
* GLOBAL FUNCTIONS
**********************/
void lv_draw_nxp_ctx_init(lv_disp_drv_t * drv, lv_draw_ctx_t * draw_ctx)
{
lv_draw_sw_init_ctx(drv, draw_ctx);
lv_draw_nxp_ctx_t * nxp_draw_ctx = (lv_draw_sw_ctx_t *)draw_ctx;
nxp_draw_ctx->base_draw.draw_arc = lv_draw_nxp_arc;
nxp_draw_ctx->base_draw.draw_rect = lv_draw_nxp_rect;
nxp_draw_ctx->base_draw.draw_img_decoded = lv_draw_nxp_img_decoded;
nxp_draw_ctx->blend = lv_draw_nxp_blend;
//nxp_draw_ctx->base_draw.wait_for_finish = lv_draw_nxp_wait_cb;
}
void lv_draw_nxp_ctx_deinit(lv_disp_drv_t * drv, lv_draw_ctx_t * draw_ctx)
{
lv_draw_sw_deinit_ctx(drv, draw_ctx);
}
/**********************
* STATIC FUNCTIONS
**********************/
/**
* During rendering, LVGL might initializes new draw_ctxs and start drawing into
* a separate buffer (called layer). If the content to be rendered has "holes",
* e.g. rounded corner, LVGL temporarily sets the disp_drv.screen_transp flag.
* It means the renderers should draw into an ARGB buffer.
* With 32 bit color depth it's not a big problem but with 16 bit color depth
* the target pixel format is ARGB8565 which is not supported by the GPU.
* In this case, the NXP callbacks should fallback to SW rendering.
*/
static inline bool need_argb8565_support()
{
#if LV_COLOR_DEPTH != 32
lv_disp_t * disp = _lv_refr_get_disp_refreshing();
if(disp->driver->screen_transp == 1)
return true;
#endif
return false;
}
static void lv_draw_nxp_blend(lv_draw_ctx_t * draw_ctx, const lv_draw_sw_blend_dsc_t * dsc)
{
lv_area_t blend_area;
/*Let's get the blend area which is the intersection of the area to fill and the clip area.*/
if(!_lv_area_intersect(&blend_area, dsc->blend_area, draw_ctx->clip_area))
return; /*Fully clipped, nothing to do*/
/*Make the blend area relative to the buffer*/
lv_area_move(&blend_area, -draw_ctx->buf_area->x1, -draw_ctx->buf_area->y1);
bool done = false;
/*Fill/Blend only non masked, normal blended*/
if(dsc->mask_buf == NULL && dsc->blend_mode == LV_BLEND_MODE_NORMAL && !need_argb8565_support()) {
lv_color_t * dest_buf = draw_ctx->buf;
lv_coord_t dest_stride = lv_area_get_width(draw_ctx->buf_area);
#if LV_USE_GPU_NXP_VG_LITE
lv_coord_t dest_width = lv_area_get_width(draw_ctx->buf_area);
lv_coord_t dest_height = lv_area_get_height(draw_ctx->buf_area);
#endif
const lv_color_t * src_buf = dsc->src_buf;
if(src_buf == NULL) {
#if LV_USE_GPU_NXP_PXP
done = (lv_gpu_nxp_pxp_fill(dest_buf, dest_stride, &blend_area,
dsc->color, dsc->opa) == LV_RES_OK);
if(!done)
PXP_LOG_TRACE("PXP fill failed. Fallback.");
#endif
#if LV_USE_GPU_NXP_VG_LITE
if(!done) {
done = (lv_gpu_nxp_vglite_fill(dest_buf, dest_width, dest_height, &blend_area,
dsc->color, dsc->opa) == LV_RES_OK);
if(!done)
VG_LITE_LOG_TRACE("VG-Lite fill failed. Fallback.");
}
#endif
}
else {
#if LV_USE_GPU_NXP_PXP
done = (lv_gpu_nxp_pxp_blit(dest_buf, &blend_area, dest_stride, src_buf, dsc->blend_area,
dsc->opa, LV_DISP_ROT_NONE) == LV_RES_OK);
if(!done)
PXP_LOG_TRACE("PXP blit failed. Fallback.");
#endif
#if LV_USE_GPU_NXP_VG_LITE
if(!done) {
lv_gpu_nxp_vglite_blit_info_t blit;
lv_coord_t src_stride = lv_area_get_width(dsc->blend_area);
blit.src = src_buf;
blit.src_width = lv_area_get_width(dsc->blend_area);
blit.src_height = lv_area_get_height(dsc->blend_area);
blit.src_stride = src_stride * (int32_t)sizeof(lv_color_t);
blit.src_area.x1 = (blend_area.x1 - (dsc->blend_area->x1 - draw_ctx->buf_area->x1));
blit.src_area.y1 = (blend_area.y1 - (dsc->blend_area->y1 - draw_ctx->buf_area->y1));
blit.src_area.x2 = blit.src_area.x1 + blit.src_width - 1;
blit.src_area.y2 = blit.src_area.y1 + blit.src_height - 1;
blit.dst = dest_buf;
blit.dst_width = dest_width;
blit.dst_height = dest_height;
blit.dst_stride = dest_stride * (int32_t)sizeof(lv_color_t);
blit.dst_area.x1 = blend_area.x1;
blit.dst_area.y1 = blend_area.y1;
blit.dst_area.x2 = blend_area.x2;
blit.dst_area.y2 = blend_area.y2;
blit.opa = dsc->opa;
blit.zoom = LV_IMG_ZOOM_NONE;
blit.angle = 0;
done = (lv_gpu_nxp_vglite_blit(&blit) == LV_RES_OK);
if(!done)
VG_LITE_LOG_TRACE("VG-Lite blit failed. Fallback.");
}
#endif
}
}
if(!done)
lv_draw_sw_blend_basic(draw_ctx, dsc);
}
static void lv_draw_nxp_img_decoded(lv_draw_ctx_t * draw_ctx, const lv_draw_img_dsc_t * dsc,
const lv_area_t * coords, const uint8_t * map_p, lv_img_cf_t cf)
{
/*Use the clip area as draw area*/
lv_area_t draw_area;
lv_area_copy(&draw_area, draw_ctx->clip_area);
bool mask_any = lv_draw_mask_is_any(&draw_area);
#if LV_USE_GPU_NXP_VG_LITE
bool recolor = (dsc->recolor_opa != LV_OPA_TRANSP);
#endif
#if LV_USE_GPU_NXP_PXP
bool scale = (dsc->zoom != LV_IMG_ZOOM_NONE);
#endif
bool done = false;
lv_area_t blend_area;
/*Let's get the blend area which is the intersection of the area to fill and the clip area.*/
if(!_lv_area_intersect(&blend_area, coords, draw_ctx->clip_area))
return; /*Fully clipped, nothing to do*/
/*Make the blend area relative to the buffer*/
lv_area_move(&blend_area, -draw_ctx->buf_area->x1, -draw_ctx->buf_area->y1);
const lv_color_t * src_buf = (const lv_color_t *)map_p;
if(!src_buf) {
lv_draw_sw_img_decoded(draw_ctx, dsc, coords, map_p, cf);
return;
}
lv_color_t * dest_buf = draw_ctx->buf;
lv_coord_t dest_stride = lv_area_get_width(draw_ctx->buf_area);
#if LV_USE_GPU_NXP_PXP
if(!mask_any && !scale && !need_argb8565_support()
#if LV_COLOR_DEPTH!=32
&& !lv_img_cf_has_alpha(cf)
#endif
) {
done = (lv_gpu_nxp_pxp_blit_transform(dest_buf, &blend_area, dest_stride, src_buf, coords,
dsc, cf) == LV_RES_OK);
if(!done)
PXP_LOG_TRACE("PXP blit transform failed. Fallback.");
}
#endif
#if LV_USE_GPU_NXP_VG_LITE
if(!done && !mask_any && !need_argb8565_support() &&
!lv_img_cf_is_chroma_keyed(cf) && !recolor
#if LV_COLOR_DEPTH!=32
&& !lv_img_cf_has_alpha(cf)
#endif
) {
lv_gpu_nxp_vglite_blit_info_t blit;
lv_coord_t src_stride = lv_area_get_width(coords);
blit.src = src_buf;
blit.src_width = lv_area_get_width(coords);
blit.src_height = lv_area_get_height(coords);
blit.src_stride = src_stride * (int32_t)sizeof(lv_color_t);
blit.src_area.x1 = (blend_area.x1 - (coords->x1 - draw_ctx->buf_area->x1));
blit.src_area.y1 = (blend_area.y1 - (coords->y1 - draw_ctx->buf_area->y1));
blit.src_area.x2 = blit.src_area.x1 + blit.src_width - 1;
blit.src_area.y2 = blit.src_area.y1 + blit.src_height - 1;
blit.dst = dest_buf;
blit.dst_width = lv_area_get_width(draw_ctx->buf_area);
blit.dst_height = lv_area_get_height(draw_ctx->buf_area);
blit.dst_stride = dest_stride * (int32_t)sizeof(lv_color_t);
blit.dst_area.x1 = blend_area.x1;
blit.dst_area.y1 = blend_area.y1;
blit.dst_area.x2 = blend_area.x2;
blit.dst_area.y2 = blend_area.y2;
blit.opa = dsc->opa;
blit.angle = dsc->angle;
blit.pivot = dsc->pivot;
blit.zoom = dsc->zoom;
done = (lv_gpu_nxp_vglite_blit_transform(&blit) == LV_RES_OK);
if(!done)
VG_LITE_LOG_TRACE("VG-Lite blit transform failed. Fallback.");
}
#endif
if(!done)
lv_draw_sw_img_decoded(draw_ctx, dsc, coords, map_p, cf);
}
static void lv_draw_nxp_rect(lv_draw_ctx_t * draw_ctx, const lv_draw_rect_dsc_t * dsc, const lv_area_t * coords)
{
bool done = false;
lv_draw_rect_dsc_t nxp_dsc;
lv_memcpy(&nxp_dsc, dsc, sizeof(nxp_dsc));
#if LV_DRAW_COMPLEX
/* Draw only the shadow */
nxp_dsc.bg_opa = 0;
nxp_dsc.bg_img_opa = 0;
nxp_dsc.border_opa = 0;
nxp_dsc.outline_opa = 0;
lv_draw_sw_rect(draw_ctx, &nxp_dsc, coords);
/* Draw the background */
nxp_dsc.shadow_opa = 0;
nxp_dsc.bg_opa = dsc->bg_opa;
done = (draw_nxp_bg(draw_ctx, &nxp_dsc, coords) == LV_RES_OK);
#endif /*LV_DRAW_COMPLEX*/
/* Draw the remaining parts */
nxp_dsc.shadow_opa = 0;
if(done)
nxp_dsc.bg_opa = 0;
nxp_dsc.bg_img_opa = dsc->bg_img_opa;
nxp_dsc.border_opa = dsc->border_opa;
nxp_dsc.outline_opa = dsc->outline_opa;
lv_draw_sw_rect(draw_ctx, &nxp_dsc, coords);
}
static lv_res_t draw_nxp_bg(lv_draw_ctx_t * draw_ctx, const lv_draw_rect_dsc_t * dsc, const lv_area_t * coords)
{
if(dsc->bg_opa <= LV_OPA_MIN)
return LV_RES_INV;
lv_area_t bg_coords;
lv_area_copy(&bg_coords, coords);
/*If the border fully covers make the bg area 1px smaller to avoid artifacts on the corners*/
if(dsc->border_width > 1 && dsc->border_opa >= (lv_opa_t)LV_OPA_MAX && dsc->radius != 0) {
bg_coords.x1 += (dsc->border_side & LV_BORDER_SIDE_LEFT) ? 1 : 0;
bg_coords.y1 += (dsc->border_side & LV_BORDER_SIDE_TOP) ? 1 : 0;
bg_coords.x2 -= (dsc->border_side & LV_BORDER_SIDE_RIGHT) ? 1 : 0;
bg_coords.y2 -= (dsc->border_side & LV_BORDER_SIDE_BOTTOM) ? 1 : 0;
}
lv_area_t clipped_coords;
if(!_lv_area_intersect(&clipped_coords, &bg_coords, draw_ctx->clip_area))
return LV_RES_INV;
lv_grad_dir_t grad_dir = dsc->bg_grad.dir;
lv_color_t bg_color = grad_dir == LV_GRAD_DIR_NONE ? dsc->bg_color : dsc->bg_grad.stops[0].color;
if(bg_color.full == dsc->bg_grad.stops[1].color.full) grad_dir = LV_GRAD_DIR_NONE;
bool mask_any = lv_draw_mask_is_any(&bg_coords);
/*
* Most simple case: just a plain rectangle (no mask, no radius, no gradient)
* shall fallback to lv_draw_sw_blend().
*
* Complex case: gradient or radius but no mask.
*/
if(!mask_any && ((dsc->radius != 0) || (grad_dir != LV_GRAD_DIR_NONE)) && !need_argb8565_support()) {
#if LV_USE_GPU_NXP_VG_LITE
lv_res_t res = lv_gpu_nxp_vglite_draw_bg(draw_ctx, dsc, &bg_coords);
if(res != LV_RES_OK)
VG_LITE_LOG_TRACE("VG-Lite draw bg failed. Fallback.");
return res;
#endif
}
return LV_RES_INV;
}
static void lv_draw_nxp_arc(lv_draw_ctx_t * draw_ctx, const lv_draw_arc_dsc_t * dsc, const lv_point_t * center,
uint16_t radius, uint16_t start_angle, uint16_t end_angle)
{
bool done = false;
#if LV_DRAW_COMPLEX
if(dsc->opa <= LV_OPA_MIN)
return;
if(dsc->width == 0)
return;
if(start_angle == end_angle)
return;
#if LV_USE_GPU_NXP_VG_LITE
if(!need_argb8565_support()) {
done = (lv_gpu_nxp_vglite_draw_arc(draw_ctx, dsc, center, (int32_t)radius,
(int32_t)start_angle, (int32_t)end_angle) == LV_RES_OK);
if(!done)
VG_LITE_LOG_TRACE("VG-Lite draw arc failed. Fallback.");
}
#endif
#endif/*LV_DRAW_COMPLEX*/
if(!done)
lv_draw_sw_arc(draw_ctx, dsc, center, radius, start_angle, end_angle);
}
#endif /*LV_USE_GPU_NXP_PXP || LV_USE_GPU_NXP_VG_LITE*/

View File

@@ -1,3 +1,4 @@
CSRCS += lv_draw_pxp.c
CSRCS += lv_draw_pxp_blend.c
CSRCS += lv_gpu_nxp_pxp_osa.c
CSRCS += lv_gpu_nxp_pxp.c

View File

@@ -0,0 +1,250 @@
/**
* @file lv_draw_pxp.c
*
*/
/**
* MIT License
*
* Copyright 2022, 2023 NXP
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next paragraph)
* shall be included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
* INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
/*********************
* INCLUDES
*********************/
#include "lv_draw_pxp.h"
#if LV_USE_GPU_NXP_PXP
#include "lv_draw_pxp_blend.h"
#if LV_COLOR_DEPTH != 32
#include "../../../core/lv_refr.h"
#endif
/*********************
* DEFINES
*********************/
/* Minimum area (in pixels) for PXP blit/fill processing. */
#ifndef LV_GPU_NXP_PXP_SIZE_LIMIT
#define LV_GPU_NXP_PXP_SIZE_LIMIT 5000
#endif
/**********************
* TYPEDEFS
**********************/
/**********************
* STATIC PROTOTYPES
**********************/
static void lv_draw_pxp_wait_for_finish(lv_draw_ctx_t * draw_ctx);
static void lv_draw_pxp_img_decoded(lv_draw_ctx_t * draw_ctx, const lv_draw_img_dsc_t * dsc,
const lv_area_t * coords, const uint8_t * map_p, lv_img_cf_t cf);
static void lv_draw_pxp_blend(lv_draw_ctx_t * draw_ctx, const lv_draw_sw_blend_dsc_t * dsc);
/**********************
* STATIC VARIABLES
**********************/
/**********************
* MACROS
**********************/
/**********************
* GLOBAL FUNCTIONS
**********************/
void lv_draw_pxp_ctx_init(lv_disp_drv_t * drv, lv_draw_ctx_t * draw_ctx)
{
lv_draw_sw_init_ctx(drv, draw_ctx);
lv_draw_pxp_ctx_t * pxp_draw_ctx = (lv_draw_sw_ctx_t *)draw_ctx;
pxp_draw_ctx->base_draw.draw_img_decoded = lv_draw_pxp_img_decoded;
pxp_draw_ctx->blend = lv_draw_pxp_blend;
pxp_draw_ctx->base_draw.wait_for_finish = lv_draw_pxp_wait_for_finish;
}
void lv_draw_pxp_ctx_deinit(lv_disp_drv_t * drv, lv_draw_ctx_t * draw_ctx)
{
lv_draw_sw_deinit_ctx(drv, draw_ctx);
}
/**********************
* STATIC FUNCTIONS
**********************/
/**
* During rendering, LVGL might initializes new draw_ctxs and start drawing into
* a separate buffer (called layer). If the content to be rendered has "holes",
* e.g. rounded corner, LVGL temporarily sets the disp_drv.screen_transp flag.
* It means the renderers should draw into an ARGB buffer.
* With 32 bit color depth it's not a big problem but with 16 bit color depth
* the target pixel format is ARGB8565 which is not supported by the GPU.
* In this case, the PXP callbacks should fallback to SW rendering.
*/
static inline bool need_argb8565_support()
{
#if LV_COLOR_DEPTH != 32
lv_disp_t * disp = _lv_refr_get_disp_refreshing();
if(disp->driver->screen_transp == 1)
return true;
#endif
return false;
}
static void lv_draw_pxp_wait_for_finish(lv_draw_ctx_t * draw_ctx)
{
lv_gpu_nxp_pxp_wait();
lv_draw_sw_wait_for_finish(draw_ctx);
}
static void lv_draw_pxp_blend(lv_draw_ctx_t * draw_ctx, const lv_draw_sw_blend_dsc_t * dsc)
{
if(dsc->opa <= (lv_opa_t)LV_OPA_MIN)
return;
if(need_argb8565_support()) {
lv_draw_sw_blend_basic(draw_ctx, dsc);
return;
}
lv_area_t blend_area;
/*Let's get the blend area which is the intersection of the area to draw and the clip area*/
if(!_lv_area_intersect(&blend_area, dsc->blend_area, draw_ctx->clip_area))
return; /*Fully clipped, nothing to do*/
/*Make the blend area relative to the buffer*/
lv_area_move(&blend_area, -draw_ctx->buf_area->x1, -draw_ctx->buf_area->y1);
if(dsc->mask_buf != NULL || dsc->blend_mode != LV_BLEND_MODE_NORMAL ||
lv_area_get_size(&blend_area) < LV_GPU_NXP_PXP_SIZE_LIMIT) {
lv_draw_sw_blend_basic(draw_ctx, dsc);
return;
}
/*Fill/Blend only non masked, normal blended*/
lv_color_t * dest_buf = draw_ctx->buf;
lv_coord_t dest_stride = lv_area_get_width(draw_ctx->buf_area);
const lv_color_t * src_buf = dsc->src_buf;
if(src_buf == NULL) {
lv_gpu_nxp_pxp_fill(dest_buf, &blend_area, dest_stride, dsc->color, dsc->opa);
}
else {
lv_area_t src_area;
src_area.x1 = blend_area.x1 - (dsc->blend_area->x1 - draw_ctx->buf_area->x1);
src_area.y1 = blend_area.y1 - (dsc->blend_area->y1 - draw_ctx->buf_area->y1);
src_area.x2 = src_area.x1 + lv_area_get_width(dsc->blend_area) - 1;
src_area.y2 = src_area.y1 + lv_area_get_height(dsc->blend_area) - 1;
lv_coord_t src_stride = lv_area_get_width(dsc->blend_area);
lv_gpu_nxp_pxp_blit(dest_buf, &blend_area, dest_stride, src_buf, &src_area, src_stride,
dsc->opa, LV_DISP_ROT_NONE);
}
}
static void lv_draw_pxp_img_decoded(lv_draw_ctx_t * draw_ctx, const lv_draw_img_dsc_t * dsc,
const lv_area_t * coords, const uint8_t * map_p, lv_img_cf_t cf)
{
if(dsc->opa <= (lv_opa_t)LV_OPA_MIN)
return;
if(need_argb8565_support()) {
lv_draw_sw_img_decoded(draw_ctx, dsc, coords, map_p, cf);
return;
}
const lv_color_t * src_buf = (const lv_color_t *)map_p;
if(!src_buf) {
lv_draw_sw_img_decoded(draw_ctx, dsc, coords, map_p, cf);
return;
}
lv_area_t blend_area;
/*Let's get the blend area which is the intersection of the area to draw and the clip area.*/
if(!_lv_area_intersect(&blend_area, coords, draw_ctx->clip_area))
return; /*Fully clipped, nothing to do*/
/*Make the blend area relative to the buffer*/
lv_area_move(&blend_area, -draw_ctx->buf_area->x1, -draw_ctx->buf_area->y1);
lv_coord_t src_width = lv_area_get_width(coords);
lv_coord_t src_height = lv_area_get_height(coords);
bool has_mask = lv_draw_mask_is_any(&blend_area);
bool has_scale = (dsc->zoom != LV_IMG_ZOOM_NONE);
bool has_rotation = (dsc->angle != 0);
bool unsup_rotation = false;
if(has_rotation) {
/*
* PXP can only rotate at 90x angles.
*/
if(dsc->angle % 900) {
PXP_LOG_TRACE("Rotation angle %d is not supported. PXP can rotate only 90x angle.", dsc->angle);
unsup_rotation = true;
}
/*
* PXP is set to process 16x16 blocks to optimize the system for memory
* bandwidth and image processing time.
* The output engine essentially truncates any output pixels after the
* desired number of pixels has been written.
* When rotating a source image and the output is not divisible by the block
* size, the incorrect pixels could be truncated and the final output image
* can look shifted.
*/
if(src_width % 16 || src_height % 16) {
PXP_LOG_TRACE("Rotation is not supported for image w/o alignment to block size 16x16.");
unsup_rotation = true;
}
}
if(has_mask || has_scale || unsup_rotation || lv_area_get_size(&blend_area) < LV_GPU_NXP_PXP_SIZE_LIMIT
#if LV_COLOR_DEPTH != 32
|| lv_img_cf_has_alpha(cf)
#endif
) {
lv_draw_sw_img_decoded(draw_ctx, dsc, coords, map_p, cf);
return;
}
lv_color_t * dest_buf = draw_ctx->buf;
lv_coord_t dest_stride = lv_area_get_width(draw_ctx->buf_area);
lv_area_t src_area;
src_area.x1 = blend_area.x1 - (coords->x1 - draw_ctx->buf_area->x1);
src_area.y1 = blend_area.y1 - (coords->y1 - draw_ctx->buf_area->y1);
src_area.x2 = src_area.x1 + src_width - 1;
src_area.y2 = src_area.y1 + src_height - 1;
lv_coord_t src_stride = lv_area_get_width(coords);
lv_gpu_nxp_pxp_blit_transform(dest_buf, &blend_area, dest_stride, src_buf, &src_area, src_stride,
dsc, cf);
}
#endif /*LV_USE_GPU_NXP_PXP*/

View File

@@ -1,12 +1,12 @@
/**
* @file lv_gpu_nxp.h
* @file lv_draw_pxp.h
*
*/
/**
* MIT License
*
* Copyright 2022 NXP
* Copyright 2022, 2023 NXP
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -27,8 +27,8 @@
*
*/
#ifndef LV_GPU_NXP_H
#define LV_GPU_NXP_H
#ifndef LV_DRAW_PXP_H
#define LV_DRAW_PXP_H
#ifdef __cplusplus
extern "C" {
@@ -38,9 +38,10 @@ extern "C" {
* INCLUDES
*********************/
#include "../../lv_conf_internal.h"
#if LV_USE_GPU_NXP_PXP || LV_USE_GPU_NXP_VG_LITE
#include "../sw/lv_draw_sw.h"
#include "../../../lv_conf_internal.h"
#if LV_USE_GPU_NXP_PXP
#include "../../sw/lv_draw_sw.h"
/*********************
* DEFINES
@@ -49,23 +50,23 @@ extern "C" {
/**********************
* TYPEDEFS
**********************/
typedef lv_draw_sw_ctx_t lv_draw_nxp_ctx_t;
typedef lv_draw_sw_ctx_t lv_draw_pxp_ctx_t;
/**********************
* GLOBAL PROTOTYPES
**********************/
void lv_draw_nxp_ctx_init(struct _lv_disp_drv_t * drv, lv_draw_ctx_t * draw_ctx);
void lv_draw_pxp_ctx_init(struct _lv_disp_drv_t * drv, lv_draw_ctx_t * draw_ctx);
void lv_draw_nxp_ctx_deinit(struct _lv_disp_drv_t * drv, lv_draw_ctx_t * draw_ctx);
void lv_draw_pxp_ctx_deinit(struct _lv_disp_drv_t * drv, lv_draw_ctx_t * draw_ctx);
/**********************
* MACROS
**********************/
#endif /*LV_USE_GPU_NXP_PXP || LV_USE_GPU_NXP_VG_LITE*/
#endif /*LV_USE_GPU_NXP_PXP*/
#ifdef __cplusplus
} /*extern "C"*/
#endif
#endif /*LV_GPU_NXP_H*/
#endif /*LV_DRAW_PXP_H*/

View File

@@ -6,7 +6,7 @@
/**
* MIT License
*
* Copyright 2020-2022 NXP
* Copyright 2020-2023 NXP
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -34,20 +34,23 @@
#include "lv_draw_pxp_blend.h"
#if LV_USE_GPU_NXP_PXP
#include "lvgl_support.h"
/*********************
* DEFINES
*********************/
#define PXP_TEMP_BUF_SIZE LCD_WIDTH * LCD_HEIGHT * LCD_FB_BYTE_PER_PIXEL
#if LV_COLOR_16_SWAP
#error Color swap not implemented. Disable LV_COLOR_16_SWAP feature.
#endif
#if LV_COLOR_DEPTH==16
#if LV_COLOR_DEPTH == 16
#define PXP_OUT_PIXEL_FORMAT kPXP_OutputPixelFormatRGB565
#define PXP_AS_PIXEL_FORMAT kPXP_AsPixelFormatRGB565
#define PXP_PS_PIXEL_FORMAT kPXP_PsPixelFormatRGB565
#elif LV_COLOR_DEPTH==32
#elif LV_COLOR_DEPTH == 32
#define PXP_OUT_PIXEL_FORMAT kPXP_OutputPixelFormatARGB8888
#define PXP_AS_PIXEL_FORMAT kPXP_AsPixelFormatARGB8888
#define PXP_PS_PIXEL_FORMAT kPXP_PsPixelFormatRGB888
@@ -55,13 +58,6 @@
#error Only 16bit and 32bit color depth are supported. Set LV_COLOR_DEPTH to 16 or 32.
#endif
#if defined (__alpha__) || defined (__ia64__) || defined (__x86_64__) \
|| defined (_WIN64) || defined (__LP64__) || defined (__LLP64__)
#define ALIGN_SIZE 8
#else
#define ALIGN_SIZE 4
#endif
/**********************
* TYPEDEFS
**********************/
@@ -70,63 +66,60 @@
* STATIC PROTOTYPES
**********************/
static LV_ATTRIBUTE_MEM_ALIGN uint8_t temp_buf[PXP_TEMP_BUF_SIZE];
/**
* BLock Image Transfer - copy rectangular image from src buffer to dst buffer
* with combination of transformation (rotation, scale, recolor) and opacity, alpha channel
* or color keying. This requires two steps. First step is used for transformation into
* a temporary buffer and the second one will handle the color format or opacity.
*
* @param[in/out] dest_buf destination buffer
* @param[in] dest_area area to be copied from src_buf to dst_buf
* @param[in] dest_stride width (stride) of destination buffer in pixels
* @param[in] src_buf source buffer
* @param[in] src_area source area with absolute coordinates to draw on destination buffer
* @param[in] dsc image descriptor
* @param[in] cf color format
* @retval LV_RES_OK Fill completed
* @retval LV_RES_INV Error occurred (\see LV_GPU_NXP_PXP_LOG_ERRORS)
* @param[in/out] dest_buf Destination buffer
* @param[in] dest_area Area with relative coordinates of destination buffer
* @param[in] dest_stride Stride of destination buffer in pixels
* @param[in] src_buf Source buffer
* @param[in] src_area Area with relative coordinates of source buffer
* @param[in] src_stride Stride of source buffer in pixels
* @param[in] dsc Image descriptor
* @param[in] cf Color format
*/
static lv_res_t lv_gpu_nxp_pxp_blit_opa(lv_color_t * dest_buf, const lv_area_t * dest_area, lv_coord_t dest_stride,
const lv_color_t * src_buf, const lv_area_t * src_area,
const lv_draw_img_dsc_t * dsc, lv_img_cf_t cf);
static void lv_pxp_blit_opa(lv_color_t * dest_buf, const lv_area_t * dest_area, lv_coord_t dest_stride,
const lv_color_t * src_buf, const lv_area_t * src_area, lv_coord_t src_stride,
const lv_draw_img_dsc_t * dsc, lv_img_cf_t cf);
/**
* BLock Image Transfer - copy rectangular image from src buffer to dst buffer
* with transformation and full opacity.
*
* @param[in/out] dest_buf destination buffer
* @param[in] dest_area area to be copied from src_buf to dst_buf
* @param[in] dest_stride width (stride) of destination buffer in pixels
* @param[in] src_buf source buffer
* @param[in] src_area source area with absolute coordinates to draw on destination buffer
* @param[in] dsc image descriptor
* @param[in] cf color format
* @retval LV_RES_OK Fill completed
* @retval LV_RES_INV Error occurred (\see LV_GPU_NXP_PXP_LOG_ERRORS)
* @param[in/out] dest_buf Destination buffer
* @param[in] dest_area Area with relative coordinates of destination buffer
* @param[in] dest_stride Stride of destination buffer in pixels
* @param[in] src_buf Source buffer
* @param[in] src_area Area with relative coordinates of source buffer
* @param[in] src_stride Stride of source buffer in pixels
* @param[in] dsc Image descriptor
* @param[in] cf Color format
*/
static lv_res_t lv_gpu_nxp_pxp_blit_cover(lv_color_t * dest_buf, const lv_area_t * dest_area,
lv_coord_t dest_stride,
const lv_color_t * src_buf, const lv_area_t * src_area,
const lv_draw_img_dsc_t * dsc, lv_img_cf_t cf);
static void lv_pxp_blit_cover(lv_color_t * dest_buf, const lv_area_t * dest_area, lv_coord_t dest_stride,
const lv_color_t * src_buf, const lv_area_t * src_area, lv_coord_t src_stride,
const lv_draw_img_dsc_t * dsc, lv_img_cf_t cf);
/**
* BLock Image Transfer - copy rectangular image from src buffer to dst buffer
* without transformation but handling color format or opacity.
*
* @param[in/out] dest_buf destination buffer
* @param[in] dest_area area to be copied from src_buf to dst_buf
* @param[in] dest_stride width (stride) of destination buffer in pixels
* @param[in] src_buf source buffer
* @param[in] src_area source area with absolute coordinates to draw on destination buffer
* @param[in] dsc image descriptor
* @param[in] cf color format
* @retval LV_RES_OK Fill completed
* @retval LV_RES_INV Error occurred (\see LV_GPU_NXP_PXP_LOG_ERRORS)
* @param[in/out] dest_buf Destination buffer
* @param[in] dest_area Area with relative coordinates of destination buffer
* @param[in] dest_stride Stride of destination buffer in pixels
* @param[in] src_buf Source buffer
* @param[in] src_area Area with relative coordinates of source buffer
* @param[in] src_stride Stride of source buffer in pixels
* @param[in] dsc Image descriptor
* @param[in] cf Color format
*/
static lv_res_t lv_gpu_nxp_pxp_blit_cf(lv_color_t * dest_buf, const lv_area_t * dest_area,
lv_coord_t dest_stride,
const lv_color_t * src_buf, const lv_area_t * src_area,
const lv_draw_img_dsc_t * dsc, lv_img_cf_t cf);
static void lv_pxp_blit_cf(lv_color_t * dest_buf, const lv_area_t * dest_area, lv_coord_t dest_stride,
const lv_color_t * src_buf, const lv_area_t * src_area, lv_coord_t src_stride,
const lv_draw_img_dsc_t * dsc, lv_img_cf_t cf);
/**********************
* STATIC VARIABLES
@@ -136,45 +129,27 @@ static lv_res_t lv_gpu_nxp_pxp_blit_cf(lv_color_t * dest_buf, const lv_area_t *
* MACROS
**********************/
#define ROUND_UP(x, align) ((x + (align - 1)) & ~(align - 1))
/**********************
* GLOBAL FUNCTIONS
**********************/
lv_res_t lv_gpu_nxp_pxp_fill(lv_color_t * dest_buf, lv_coord_t dest_stride, const lv_area_t * fill_area,
lv_color_t color, lv_opa_t opa)
void lv_gpu_nxp_pxp_fill(lv_color_t * dest_buf, const lv_area_t * dest_area, lv_coord_t dest_stride,
lv_color_t color, lv_opa_t opa)
{
uint32_t area_size = lv_area_get_size(fill_area);
lv_coord_t area_w = lv_area_get_width(fill_area);
lv_coord_t area_h = lv_area_get_height(fill_area);
lv_coord_t dest_w = lv_area_get_width(dest_area);
lv_coord_t dest_h = lv_area_get_height(dest_area);
if(opa >= (lv_opa_t)LV_OPA_MAX) {
if(area_size < LV_GPU_NXP_PXP_FILL_SIZE_LIMIT) {
PXP_LOG_TRACE("Area size %d smaller than limit %d.", area_size, LV_GPU_NXP_PXP_FILL_SIZE_LIMIT);
return LV_RES_INV;
}
}
else {
if(area_size < LV_GPU_NXP_PXP_FILL_OPA_SIZE_LIMIT) {
PXP_LOG_TRACE("Area size %d smaller than limit %d.", area_size, LV_GPU_NXP_PXP_FILL_OPA_SIZE_LIMIT);
return LV_RES_INV;
}
}
PXP_Init(LV_GPU_NXP_PXP_ID);
PXP_EnableCsc1(LV_GPU_NXP_PXP_ID, false); /*Disable CSC1, it is enabled by default.*/
PXP_SetProcessBlockSize(LV_GPU_NXP_PXP_ID, kPXP_BlockSize16); /*Block size 16x16 for higher performance*/
lv_gpu_nxp_pxp_reset();
/*OUT buffer configure*/
pxp_output_buffer_config_t outputConfig = {
.pixelFormat = PXP_OUT_PIXEL_FORMAT,
.interlacedMode = kPXP_OutputProgressive,
.buffer0Addr = (uint32_t)(dest_buf + dest_stride * fill_area->y1 + fill_area->x1),
.buffer0Addr = (uint32_t)(dest_buf + dest_stride * dest_area->y1 + dest_area->x1),
.buffer1Addr = (uint32_t)NULL,
.pitchBytes = dest_stride * sizeof(lv_color_t),
.width = area_w,
.height = area_h
.width = dest_w,
.height = dest_h
};
PXP_SetOutputBufferConfig(LV_GPU_NXP_PXP_ID, &outputConfig);
@@ -193,7 +168,7 @@ lv_res_t lv_gpu_nxp_pxp_fill(lv_color_t * dest_buf, lv_coord_t dest_stride, cons
};
PXP_SetAlphaSurfaceBufferConfig(LV_GPU_NXP_PXP_ID, &asBufferConfig);
PXP_SetAlphaSurfacePosition(LV_GPU_NXP_PXP_ID, 0U, 0U, area_w, area_h);
PXP_SetAlphaSurfacePosition(LV_GPU_NXP_PXP_ID, 0U, 0U, dest_w - 1U, dest_h - 1U);
}
/*Disable PS, use as color generator*/
@@ -223,34 +198,19 @@ lv_res_t lv_gpu_nxp_pxp_fill(lv_color_t * dest_buf, lv_coord_t dest_stride, cons
PXP_SetPorterDuffConfig(LV_GPU_NXP_PXP_ID, &pdConfig);
lv_gpu_nxp_pxp_run(); /*Start PXP task*/
return LV_RES_OK;
lv_gpu_nxp_pxp_run();
}
lv_res_t lv_gpu_nxp_pxp_blit(lv_color_t * dest_buf, const lv_area_t * dest_area, lv_coord_t dest_stride,
const lv_color_t * src_buf, const lv_area_t * src_area, lv_opa_t opa, lv_disp_rot_t angle)
void lv_gpu_nxp_pxp_blit(lv_color_t * dest_buf, const lv_area_t * dest_area, lv_coord_t dest_stride,
const lv_color_t * src_buf, const lv_area_t * src_area, lv_coord_t src_stride,
lv_opa_t opa, lv_disp_rot_t angle)
{
uint32_t dest_size = lv_area_get_size(dest_area);
lv_coord_t dest_w = lv_area_get_width(dest_area);
lv_coord_t dest_h = lv_area_get_height(dest_area);
lv_coord_t src_w = lv_area_get_width(src_area);
lv_coord_t src_h = lv_area_get_height(src_area);
if(opa >= (lv_opa_t)LV_OPA_MAX) {
if(dest_size < LV_GPU_NXP_PXP_BLIT_SIZE_LIMIT) {
PXP_LOG_TRACE("Area size %d smaller than limit %d.", dest_size, LV_GPU_NXP_PXP_BLIT_SIZE_LIMIT);
return LV_RES_INV;
}
}
else {
if(dest_size < LV_GPU_NXP_PXP_BLIT_OPA_SIZE_LIMIT) {
PXP_LOG_TRACE("Area size %d smaller than limit %d.", dest_size, LV_GPU_NXP_PXP_BLIT_OPA_SIZE_LIMIT);
return LV_RES_INV;
}
}
PXP_Init(LV_GPU_NXP_PXP_ID);
PXP_EnableCsc1(LV_GPU_NXP_PXP_ID, false); /*Disable CSC1, it is enabled by default.*/
PXP_SetProcessBlockSize(LV_GPU_NXP_PXP_ID, kPXP_BlockSize16); /*block size 16x16 for higher performance*/
lv_gpu_nxp_pxp_reset();
/* convert rotation angle */
pxp_rotate_degree_t pxp_rot;
@@ -297,19 +257,17 @@ lv_res_t lv_gpu_nxp_pxp_blit(lv_color_t * dest_buf, const lv_area_t * dest_area,
asBlendConfig.alphaMode = kPXP_AlphaOverride;
PXP_SetProcessSurfaceBufferConfig(LV_GPU_NXP_PXP_ID, &psBufferConfig);
PXP_SetProcessSurfacePosition(LV_GPU_NXP_PXP_ID, 0U, 0U, dest_w - 1, dest_h - 1);
PXP_SetProcessSurfacePosition(LV_GPU_NXP_PXP_ID, 0U, 0U, dest_w - 1U, dest_h - 1U);
}
lv_coord_t src_stride = lv_area_get_width(src_area);
/*AS buffer - source image*/
pxp_as_buffer_config_t asBufferConfig = {
.pixelFormat = PXP_AS_PIXEL_FORMAT,
.bufferAddr = (uint32_t)src_buf,
.bufferAddr = (uint32_t)(src_buf + src_stride * src_area->y1 + src_area->x1),
.pitchBytes = src_stride * sizeof(lv_color_t)
};
PXP_SetAlphaSurfaceBufferConfig(LV_GPU_NXP_PXP_ID, &asBufferConfig);
PXP_SetAlphaSurfacePosition(LV_GPU_NXP_PXP_ID, 0U, 0U, dest_w - 1U, dest_h - 1U);
PXP_SetAlphaSurfacePosition(LV_GPU_NXP_PXP_ID, 0U, 0U, src_w - 1U, src_h - 1U);
PXP_SetAlphaSurfaceBlendConfig(LV_GPU_NXP_PXP_ID, &asBlendConfig);
PXP_EnableAlphaSurfaceOverlayColorKey(LV_GPU_NXP_PXP_ID, false);
@@ -325,162 +283,102 @@ lv_res_t lv_gpu_nxp_pxp_blit(lv_color_t * dest_buf, const lv_area_t * dest_area,
};
PXP_SetOutputBufferConfig(LV_GPU_NXP_PXP_ID, &outputBufferConfig);
lv_gpu_nxp_pxp_run(); /* Start PXP task */
return LV_RES_OK;
lv_gpu_nxp_pxp_run();
}
lv_res_t lv_gpu_nxp_pxp_blit_transform(lv_color_t * dest_buf, const lv_area_t * dest_area, lv_coord_t dest_stride,
const lv_color_t * src_buf, const lv_area_t * src_area,
const lv_draw_img_dsc_t * dsc, lv_img_cf_t cf)
void lv_gpu_nxp_pxp_blit_transform(lv_color_t * dest_buf, const lv_area_t * dest_area, lv_coord_t dest_stride,
const lv_color_t * src_buf, const lv_area_t * src_area, lv_coord_t src_stride,
const lv_draw_img_dsc_t * dsc, lv_img_cf_t cf)
{
uint32_t dest_size = lv_area_get_size(dest_area);
bool has_recolor = (dsc->recolor_opa != LV_OPA_TRANSP);
bool has_rotation = (dsc->angle != 0);
if(dsc->opa >= (lv_opa_t)LV_OPA_MAX) {
if(dest_size < LV_GPU_NXP_PXP_BLIT_SIZE_LIMIT) {
PXP_LOG_TRACE("Area size %d smaller than limit %d.", dest_size, LV_GPU_NXP_PXP_BLIT_SIZE_LIMIT);
return LV_RES_INV;
if(has_recolor || has_rotation) {
if(dsc->opa >= (lv_opa_t)LV_OPA_MAX && !lv_img_cf_has_alpha(cf) && !lv_img_cf_is_chroma_keyed(cf)) {
lv_pxp_blit_cover(dest_buf, dest_area, dest_stride, src_buf, src_area, src_stride, dsc, cf);
return;
}
}
else {
if(dest_size < LV_GPU_NXP_PXP_BLIT_OPA_SIZE_LIMIT) {
PXP_LOG_TRACE("Area size %d smaller than limit %d.", dest_size, LV_GPU_NXP_PXP_BLIT_OPA_SIZE_LIMIT);
return LV_RES_INV;
}
}
bool recolor = (dsc->recolor_opa != LV_OPA_TRANSP);
bool rotation = (dsc->angle != 0);
if(rotation) {
if(dsc->angle != 0 && dsc->angle != 900 && dsc->angle != 1800 && dsc->angle != 2700) {
PXP_LOG_TRACE("Rotation angle %d is not supported. PXP can rotate only 90x angle.", dsc->angle);
return LV_RES_INV;
}
}
if(recolor || rotation) {
if(dsc->opa >= (lv_opa_t)LV_OPA_MAX && !lv_img_cf_has_alpha(cf) && !lv_img_cf_is_chroma_keyed(cf))
return lv_gpu_nxp_pxp_blit_cover(dest_buf, dest_area, dest_stride, src_buf, src_area, dsc, cf);
else
else {
/*Recolor and/or rotation with alpha or opacity is done in two steps.*/
return lv_gpu_nxp_pxp_blit_opa(dest_buf, dest_area, dest_stride, src_buf, src_area, dsc, cf);
lv_pxp_blit_opa(dest_buf, dest_area, dest_stride, src_buf, src_area, src_stride, dsc, cf);
return;
}
}
return lv_gpu_nxp_pxp_blit_cf(dest_buf, dest_area, dest_stride, src_buf, src_area, dsc, cf);
lv_pxp_blit_cf(dest_buf, dest_area, dest_stride, src_buf, src_area, src_stride, dsc, cf);
}
/**********************
* STATIC FUNCTIONS
**********************/
static lv_res_t lv_gpu_nxp_pxp_blit_opa(lv_color_t * dest_buf, const lv_area_t * dest_area, lv_coord_t dest_stride,
const lv_color_t * src_buf, const lv_area_t * src_area,
const lv_draw_img_dsc_t * dsc, lv_img_cf_t cf)
static void lv_pxp_blit_opa(lv_color_t * dest_buf, const lv_area_t * dest_area, lv_coord_t dest_stride,
const lv_color_t * src_buf, const lv_area_t * src_area, lv_coord_t src_stride,
const lv_draw_img_dsc_t * dsc, lv_img_cf_t cf)
{
lv_coord_t dest_w = lv_area_get_width(dest_area);
lv_coord_t dest_h = lv_area_get_height(dest_area);
lv_res_t res;
uint32_t size = dest_w * dest_h * sizeof(lv_color_t);
if(ROUND_UP(size, ALIGN_SIZE) >= LV_MEM_SIZE)
PXP_RETURN_INV("Insufficient memory for temporary buffer. Please increase LV_MEM_SIZE.");
lv_color_t * tmp_buf = (lv_color_t *)lv_mem_buf_get(size);
if(!tmp_buf)
PXP_RETURN_INV("Allocating temporary buffer failed.");
const lv_area_t tmp_area = {
lv_coord_t temp_area_w = lv_area_get_width(dest_area);
lv_coord_t temp_area_h = lv_area_get_height(dest_area);
const lv_area_t temp_area = {
.x1 = 0,
.y1 = 0,
.x2 = dest_w - 1,
.y2 = dest_h - 1
.x2 = temp_area_w - 1,
.y2 = temp_area_h - 1
};
/*Step 1: Transform with full opacity to temporary buffer*/
res = lv_gpu_nxp_pxp_blit_cover(tmp_buf, &tmp_area, dest_w, src_buf, src_area, dsc, cf);
if(res != LV_RES_OK) {
PXP_LOG_TRACE("Blit cover with full opacity failed.");
lv_mem_buf_release(tmp_buf);
lv_pxp_blit_cover((lv_color_t *)temp_buf, &temp_area, temp_area_w, src_buf, src_area, src_stride, dsc, cf);
return res;
}
/*Step 2: Blit temporary results with required opacity to output*/
res = lv_gpu_nxp_pxp_blit_cf(dest_buf, dest_area, dest_stride, tmp_buf, &tmp_area, dsc, cf);
/*Clean-up memory*/
lv_mem_buf_release(tmp_buf);
return res;
/*Step 2: Blit temporary result with required opacity to output*/
lv_pxp_blit_cf(dest_buf, dest_area, dest_stride, (lv_color_t *)temp_buf, &temp_area, temp_area_w, dsc, cf);
}
static lv_res_t lv_gpu_nxp_pxp_blit_cover(lv_color_t * dest_buf, const lv_area_t * dest_area,
lv_coord_t dest_stride,
const lv_color_t * src_buf, const lv_area_t * src_area,
const lv_draw_img_dsc_t * dsc, lv_img_cf_t cf)
static void lv_pxp_blit_cover(lv_color_t * dest_buf, const lv_area_t * dest_area, lv_coord_t dest_stride,
const lv_color_t * src_buf, const lv_area_t * src_area, lv_coord_t src_stride,
const lv_draw_img_dsc_t * dsc, lv_img_cf_t cf)
{
lv_coord_t dest_w = lv_area_get_width(dest_area);
lv_coord_t dest_h = lv_area_get_height(dest_area);
lv_coord_t src_w = lv_area_get_width(src_area);
lv_coord_t src_h = lv_area_get_height(src_area);
bool recolor = (dsc->recolor_opa != LV_OPA_TRANSP);
bool rotation = (dsc->angle != 0);
bool has_recolor = (dsc->recolor_opa != LV_OPA_TRANSP);
bool has_rotation = (dsc->angle != 0);
PXP_Init(LV_GPU_NXP_PXP_ID);
PXP_EnableCsc1(LV_GPU_NXP_PXP_ID, false); /*Disable CSC1, it is enabled by default.*/
PXP_SetProcessBlockSize(LV_GPU_NXP_PXP_ID, kPXP_BlockSize16); /*block size 16x16 for higher performance*/
if(rotation) {
/*
* PXP is set to process 16x16 blocks to optimize the system for memory
* bandwidth and image processing time.
* The output engine essentially truncates any output pixels after the
* desired number of pixels has been written.
* When rotating a source image and the output is not divisible by the block
* size, the incorrect pixels could be truncated and the final output image
* can look shifted.
*/
if(lv_area_get_width(src_area) % 16 || lv_area_get_height(src_area) % 16) {
PXP_LOG_TRACE("Rotation is not supported for image w/o alignment to block size 16x16.");
return LV_RES_INV;
}
lv_gpu_nxp_pxp_reset();
if(has_rotation) {
/*Convert rotation angle*/
pxp_rotate_degree_t pxp_rot;
pxp_rotate_degree_t pxp_angle;
switch(dsc->angle) {
case 0:
pxp_rot = kPXP_Rotate0;
pxp_angle = kPXP_Rotate0;
break;
case 900:
pxp_rot = kPXP_Rotate90;
pxp_angle = kPXP_Rotate90;
break;
case 1800:
pxp_rot = kPXP_Rotate180;
pxp_angle = kPXP_Rotate180;
break;
case 2700:
pxp_rot = kPXP_Rotate270;
pxp_angle = kPXP_Rotate270;
break;
default:
PXP_LOG_TRACE("Rotation angle %d is not supported. PXP can rotate only 90x angle.", dsc->angle);
return LV_RES_INV;
pxp_angle = kPXP_Rotate0;
}
PXP_SetRotateConfig(LV_GPU_NXP_PXP_ID, kPXP_RotateOutputBuffer, pxp_rot, kPXP_FlipDisable);
PXP_SetRotateConfig(LV_GPU_NXP_PXP_ID, kPXP_RotateOutputBuffer, pxp_angle, kPXP_FlipDisable);
}
lv_coord_t src_stride = lv_area_get_width(src_area);
/*AS buffer - source image*/
pxp_as_buffer_config_t asBufferConfig = {
.pixelFormat = PXP_AS_PIXEL_FORMAT,
.bufferAddr = (uint32_t)src_buf,
.bufferAddr = (uint32_t)(src_buf + src_stride * src_area->y1 + src_area->x1),
.pitchBytes = src_stride * sizeof(lv_color_t)
};
PXP_SetAlphaSurfaceBufferConfig(LV_GPU_NXP_PXP_ID, &asBufferConfig);
PXP_SetAlphaSurfacePosition(LV_GPU_NXP_PXP_ID, 0U, 0U, dest_w - 1U, dest_h - 1U);
PXP_SetAlphaSurfacePosition(LV_GPU_NXP_PXP_ID, 0U, 0U, src_w - 1U, src_h - 1U);
/*Disable PS buffer*/
PXP_SetProcessSurfacePosition(LV_GPU_NXP_PXP_ID, 0xFFFFU, 0xFFFFU, 0U, 0U);
if(recolor)
if(has_recolor)
/*Use as color generator*/
PXP_SetProcessSurfaceBackGroundColor(LV_GPU_NXP_PXP_ID, lv_color_to32(dsc->recolor));
@@ -496,7 +394,7 @@ static lv_res_t lv_gpu_nxp_pxp_blit_cover(lv_color_t * dest_buf, const lv_area_t
};
PXP_SetOutputBufferConfig(LV_GPU_NXP_PXP_ID, &outputBufferConfig);
if(recolor || lv_img_cf_has_alpha(cf)) {
if(has_recolor || lv_img_cf_has_alpha(cf)) {
/**
* Configure Porter-Duff blending.
*
@@ -512,7 +410,7 @@ static lv_res_t lv_gpu_nxp_pxp_blit_cover(lv_color_t * dest_buf, const lv_area_t
.srcGlobalAlphaMode = lv_img_cf_has_alpha(cf) ? kPXP_PorterDuffLocalAlpha : kPXP_PorterDuffGlobalAlpha,
.dstFactorMode = kPXP_PorterDuffFactorStraight,
.srcFactorMode = kPXP_PorterDuffFactorInversed,
.dstGlobalAlpha = recolor ? dsc->recolor_opa : 0x00,
.dstGlobalAlpha = has_recolor ? dsc->recolor_opa : 0x00,
.srcGlobalAlpha = 0xff,
.dstAlphaMode = kPXP_PorterDuffAlphaStraight, /*don't care*/
.srcAlphaMode = kPXP_PorterDuffAlphaStraight
@@ -520,22 +418,19 @@ static lv_res_t lv_gpu_nxp_pxp_blit_cover(lv_color_t * dest_buf, const lv_area_t
PXP_SetPorterDuffConfig(LV_GPU_NXP_PXP_ID, &pdConfig);
}
lv_gpu_nxp_pxp_run(); /*Start PXP task*/
return LV_RES_OK;
lv_gpu_nxp_pxp_run();
}
static lv_res_t lv_gpu_nxp_pxp_blit_cf(lv_color_t * dest_buf, const lv_area_t * dest_area,
lv_coord_t dest_stride,
const lv_color_t * src_buf, const lv_area_t * src_area,
const lv_draw_img_dsc_t * dsc, lv_img_cf_t cf)
static void lv_pxp_blit_cf(lv_color_t * dest_buf, const lv_area_t * dest_area, lv_coord_t dest_stride,
const lv_color_t * src_buf, const lv_area_t * src_area, lv_coord_t src_stride,
const lv_draw_img_dsc_t * dsc, lv_img_cf_t cf)
{
lv_coord_t dest_w = lv_area_get_width(dest_area);
lv_coord_t dest_h = lv_area_get_height(dest_area);
lv_coord_t src_w = lv_area_get_width(src_area);
lv_coord_t src_h = lv_area_get_height(src_area);
PXP_Init(LV_GPU_NXP_PXP_ID);
PXP_EnableCsc1(LV_GPU_NXP_PXP_ID, false); /*Disable CSC1, it is enabled by default.*/
PXP_SetProcessBlockSize(LV_GPU_NXP_PXP_ID, kPXP_BlockSize16); /*block size 16x16 for higher performance*/
lv_gpu_nxp_pxp_reset();
pxp_as_blend_config_t asBlendConfig = {
.alpha = dsc->opa,
@@ -566,28 +461,26 @@ static lv_res_t lv_gpu_nxp_pxp_blit_cf(lv_color_t * dest_buf, const lv_area_t *
asBlendConfig.alphaMode = lv_img_cf_has_alpha(cf) ? kPXP_AlphaMultiply : kPXP_AlphaOverride;
}
PXP_SetProcessSurfaceBufferConfig(LV_GPU_NXP_PXP_ID, &psBufferConfig);
PXP_SetProcessSurfacePosition(LV_GPU_NXP_PXP_ID, 0U, 0U, dest_w - 1, dest_h - 1);
PXP_SetProcessSurfacePosition(LV_GPU_NXP_PXP_ID, 0U, 0U, dest_w - 1U, dest_h - 1U);
}
lv_coord_t src_stride = lv_area_get_width(src_area);
/*AS buffer - source image*/
pxp_as_buffer_config_t asBufferConfig = {
.pixelFormat = PXP_AS_PIXEL_FORMAT,
.bufferAddr = (uint32_t)src_buf,
.bufferAddr = (uint32_t)(src_buf + src_stride * src_area->y1 + src_area->x1),
.pitchBytes = src_stride * sizeof(lv_color_t)
};
PXP_SetAlphaSurfaceBufferConfig(LV_GPU_NXP_PXP_ID, &asBufferConfig);
PXP_SetAlphaSurfacePosition(LV_GPU_NXP_PXP_ID, 0U, 0U, dest_w - 1U, dest_h - 1U);
PXP_SetAlphaSurfacePosition(LV_GPU_NXP_PXP_ID, 0U, 0U, src_w - 1U, src_h - 1U);
PXP_SetAlphaSurfaceBlendConfig(LV_GPU_NXP_PXP_ID, &asBlendConfig);
if(lv_img_cf_is_chroma_keyed(cf)) {
lv_color_t colorKeyLow = LV_COLOR_CHROMA_KEY;
lv_color_t colorKeyHigh = LV_COLOR_CHROMA_KEY;
bool recolor = (dsc->recolor_opa != LV_OPA_TRANSP);
bool has_recolor = (dsc->recolor_opa != LV_OPA_TRANSP);
if(recolor) {
if(has_recolor) {
/* New color key after recoloring */
lv_color_t colorKey = lv_color_mix(dsc->recolor, LV_COLOR_CHROMA_KEY, dsc->recolor_opa);
@@ -595,11 +488,11 @@ static lv_res_t lv_gpu_nxp_pxp_blit_cf(lv_color_t * dest_buf, const lv_area_t *
LV_COLOR_SET_G(colorKeyLow, colorKey.ch.green != 0 ? colorKey.ch.green - 1 : 0);
LV_COLOR_SET_B(colorKeyLow, colorKey.ch.blue != 0 ? colorKey.ch.blue - 1 : 0);
#if LV_COLOR_DEPTH==16
#if LV_COLOR_DEPTH == 16
LV_COLOR_SET_R(colorKeyHigh, colorKey.ch.red != 0x1f ? colorKey.ch.red + 1 : 0x1f);
LV_COLOR_SET_G(colorKeyHigh, colorKey.ch.green != 0x3f ? colorKey.ch.green + 1 : 0x3f);
LV_COLOR_SET_B(colorKeyHigh, colorKey.ch.blue != 0x1f ? colorKey.ch.blue + 1 : 0x1f);
#else /*LV_COLOR_DEPTH==32*/
#else /*LV_COLOR_DEPTH == 32*/
LV_COLOR_SET_R(colorKeyHigh, colorKey.ch.red != 0xff ? colorKey.ch.red + 1 : 0xff);
LV_COLOR_SET_G(colorKeyHigh, colorKey.ch.green != 0xff ? colorKey.ch.green + 1 : 0xff);
LV_COLOR_SET_B(colorKeyHigh, colorKey.ch.blue != 0xff ? colorKey.ch.blue + 1 : 0xff);
@@ -624,9 +517,7 @@ static lv_res_t lv_gpu_nxp_pxp_blit_cf(lv_color_t * dest_buf, const lv_area_t *
};
PXP_SetOutputBufferConfig(LV_GPU_NXP_PXP_ID, &outputBufferConfig);
lv_gpu_nxp_pxp_run(); /* Start PXP task */
return LV_RES_OK;
lv_gpu_nxp_pxp_run();
}
#endif /*LV_USE_GPU_NXP_PXP*/

View File

@@ -6,7 +6,7 @@
/**
* MIT License
*
* Copyright 2020-2022 NXP
* Copyright 2020-2023 NXP
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -48,31 +48,6 @@ extern "C" {
* DEFINES
*********************/
#ifndef LV_GPU_NXP_PXP_BLIT_SIZE_LIMIT
/** Minimum area (in pixels) for image copy with 100% opacity to be handled by PXP*/
#define LV_GPU_NXP_PXP_BLIT_SIZE_LIMIT 5000
#endif
#ifndef LV_GPU_NXP_PXP_BLIT_OPA_SIZE_LIMIT
/** Minimum area (in pixels) for image copy with transparency to be handled by PXP*/
#define LV_GPU_NXP_PXP_BLIT_OPA_SIZE_LIMIT 5000
#endif
#ifndef LV_GPU_NXP_PXP_BUFF_SYNC_BLIT_SIZE_LIMIT
/** Minimum invalidated area (in pixels) to be synchronized by PXP during buffer sync */
#define LV_GPU_NXP_PXP_BUFF_SYNC_BLIT_SIZE_LIMIT 5000
#endif
#ifndef LV_GPU_NXP_PXP_FILL_SIZE_LIMIT
/** Minimum area (in pixels) to be filled by PXP with 100% opacity*/
#define LV_GPU_NXP_PXP_FILL_SIZE_LIMIT 5000
#endif
#ifndef LV_GPU_NXP_PXP_FILL_OPA_SIZE_LIMIT
/** Minimum area (in pixels) to be filled by PXP with transparency*/
#define LV_GPU_NXP_PXP_FILL_OPA_SIZE_LIMIT 5000
#endif
/**********************
* TYPEDEFS
**********************/
@@ -84,51 +59,49 @@ extern "C" {
/**
* Fill area, with optional opacity.
*
* @param[in/out] dest_buf destination buffer
* @param[in] dest_stride width (stride) of destination buffer in pixels
* @param[in] fill_area area to fill
* @param[in] color color
* @param[in] opa transparency of the color
* @retval LV_RES_OK Fill completed
* @retval LV_RES_INV Error occurred (\see LV_GPU_NXP_PXP_LOG_ERRORS)
* @param[in/out] dest_buf Destination buffer
* @param[in] dest_area Area with relative coordinates of destination buffer
* @param[in] dest_stride Stride of destination buffer in pixels
* @param[in] color Color
* @param[in] opa Opacity
*/
lv_res_t lv_gpu_nxp_pxp_fill(lv_color_t * dest_buf, lv_coord_t dest_stride, const lv_area_t * fill_area,
lv_color_t color, lv_opa_t opa);
void lv_gpu_nxp_pxp_fill(lv_color_t * dest_buf, const lv_area_t * dest_area, lv_coord_t dest_stride,
lv_color_t color, lv_opa_t opa);
/**
* BLock Image Transfer - copy rectangular image from src_buf to dst_buf with effects.
* By default, image is copied directly, with optional opacity. This function can also
* rotate the display output buffer to a specified angle (90x step).
*
* @param[in/out] dest_buf destination buffer
* @param[in] dest_area destination area
* @param[in] dest_stride width (stride) of destination buffer in pixels
* @param[in] src_buf source buffer
* @param[in] src_area source area with absolute coordinates to draw on destination buffer
* @param[in] opa opacity of the result
* @param[in] angle display rotation angle (90x)
* @retval LV_RES_OK Fill completed
* @retval LV_RES_INV Error occurred (\see LV_GPU_NXP_PXP_LOG_ERRORS)
* @param[in/out] dest_buf Destination buffer
* @param[in] dest_area Area with relative coordinates of destination buffer
* @param[in] dest_stride Stride of destination buffer in pixels
* @param[in] src_buf Source buffer
* @param[in] src_area Source area with relative coordinates of source buffer
* @param[in] src_stride Stride of source buffer in pixels
* @param[in] opa Opacity
* @param[in] angle Display rotation angle (90x)
*/
lv_res_t lv_gpu_nxp_pxp_blit(lv_color_t * dest_buf, const lv_area_t * dest_area, lv_coord_t dest_stride,
const lv_color_t * src_buf, const lv_area_t * src_area, lv_opa_t opa, lv_disp_rot_t angle);
void lv_gpu_nxp_pxp_blit(lv_color_t * dest_buf, const lv_area_t * dest_area, lv_coord_t dest_stride,
const lv_color_t * src_buf, const lv_area_t * src_area, lv_coord_t src_stride,
lv_opa_t opa, lv_disp_rot_t angle);
/**
* BLock Image Transfer - copy rectangular image from src_buf to dst_buf with transformation.
*
*
* @param[in/out] dest_buf destination buffer
* @param[in] dest_area destination area
* @param[in] dest_stride width (stride) of destination buffer in pixels
* @param[in] src_buf source buffer
* @param[in] src_area source area with absolute coordinates to draw on destination buffer
* @param[in] dsc image descriptor
* @param[in] cf color format
* @retval LV_RES_OK Fill completed
* @retval LV_RES_INV Error occurred (\see LV_GPU_NXP_PXP_LOG_ERRORS)
* @param[in/out] dest_buf Destination buffer
* @param[in] dest_area Area with relative coordinates of destination buffer
* @param[in] dest_stride Stride of destination buffer in pixels
* @param[in] src_buf Source buffer
* @param[in] src_area Area with relative coordinates of source buffer
* @param[in] src_stride Stride of source buffer in pixels
* @param[in] dsc Image descriptor
* @param[in] cf Color format
*/
lv_res_t lv_gpu_nxp_pxp_blit_transform(lv_color_t * dest_buf, const lv_area_t * dest_area, lv_coord_t dest_stride,
const lv_color_t * src_buf, const lv_area_t * src_area, const lv_draw_img_dsc_t * dsc, lv_img_cf_t cf);
void lv_gpu_nxp_pxp_blit_transform(lv_color_t * dest_buf, const lv_area_t * dest_area, lv_coord_t dest_stride,
const lv_color_t * src_buf, const lv_area_t * src_area, lv_coord_t src_stride,
const lv_draw_img_dsc_t * dsc, lv_img_cf_t cf);
/**********************
* MACROS

View File

@@ -6,7 +6,7 @@
/**
* MIT License
*
* Copyright 2020-2022 NXP
* Copyright 2020-2023 NXP
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -50,9 +50,9 @@
**********************/
/**
* Clean & invalidate cache.
* Clean and invalidate cache.
*/
static void invalidate_cache(void);
static inline void invalidate_cache(void);
/**********************
* STATIC VARIABLES
@@ -70,16 +70,23 @@ static lv_nxp_pxp_cfg_t * pxp_cfg;
lv_res_t lv_gpu_nxp_pxp_init(void)
{
#if LV_USE_GPU_NXP_PXP_AUTO_INIT
pxp_cfg = lv_gpu_nxp_pxp_get_cfg();
#endif
if(!pxp_cfg || !pxp_cfg->pxp_interrupt_deinit || !pxp_cfg->pxp_interrupt_init || !pxp_cfg->pxp_run)
if(!pxp_cfg || !pxp_cfg->pxp_interrupt_deinit || !pxp_cfg->pxp_interrupt_init ||
!pxp_cfg->pxp_run || !pxp_cfg->pxp_wait)
PXP_RETURN_INV("PXP configuration error.");
PXP_Init(LV_GPU_NXP_PXP_ID);
PXP_EnableCsc1(LV_GPU_NXP_PXP_ID, false); /*Disable CSC1, it is enabled by default.*/
PXP_SetProcessBlockSize(LV_GPU_NXP_PXP_ID, kPXP_BlockSize16); /*Block size 16x16 for higher performance*/
PXP_EnableInterrupts(LV_GPU_NXP_PXP_ID, kPXP_CompleteInterruptEnable);
if(pxp_cfg->pxp_interrupt_init() != LV_RES_OK) {
PXP_DisableInterrupts(LV_GPU_NXP_PXP_ID, kPXP_CompleteInterruptEnable);
PXP_Deinit(LV_GPU_NXP_PXP_ID);
PXP_RETURN_INV("PXP interrupt init failed.");
}
@@ -90,23 +97,38 @@ lv_res_t lv_gpu_nxp_pxp_init(void)
void lv_gpu_nxp_pxp_deinit(void)
{
pxp_cfg->pxp_interrupt_deinit();
PXP_DisableInterrupts(PXP, kPXP_CompleteInterruptEnable);
PXP_DisableInterrupts(LV_GPU_NXP_PXP_ID, kPXP_CompleteInterruptEnable);
PXP_Deinit(LV_GPU_NXP_PXP_ID);
}
void lv_gpu_nxp_pxp_reset(void)
{
/* Wait for previous command to complete before resetting the registers. */
lv_gpu_nxp_pxp_wait();
PXP_ResetControl(LV_GPU_NXP_PXP_ID);
PXP_EnableCsc1(LV_GPU_NXP_PXP_ID, false); /*Disable CSC1, it is enabled by default.*/
PXP_SetProcessBlockSize(LV_GPU_NXP_PXP_ID, kPXP_BlockSize16); /*Block size 16x16 for higher performance*/
}
void lv_gpu_nxp_pxp_run(void)
{
/*Clean & invalidate cache*/
invalidate_cache();
pxp_cfg->pxp_run();
}
void lv_gpu_nxp_pxp_wait(void)
{
pxp_cfg->pxp_wait();
}
/**********************
* STATIC FUNCTIONS
**********************/
static void invalidate_cache(void)
static inline void invalidate_cache(void)
{
lv_disp_t * disp = _lv_refr_get_disp_refreshing();
if(disp->driver->clean_dcache_cb)

View File

@@ -6,7 +6,7 @@
/**
* MIT License
*
* Copyright 2020-2022 NXP
* Copyright 2020-2023 NXP
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -81,8 +81,11 @@ typedef struct {
/** Callback for PXP interrupt de-initialization*/
void (*pxp_interrupt_deinit)(void);
/** Callback that should start PXP and wait for operation complete*/
/** Callback for PXP start*/
void (*pxp_run)(void);
/** Callback for waiting of PXP completion*/
void (*pxp_wait)(void);
} lv_nxp_pxp_cfg_t;
/**********************
@@ -104,10 +107,20 @@ lv_res_t lv_gpu_nxp_pxp_init(void);
void lv_gpu_nxp_pxp_deinit(void);
/**
* Start PXP job and wait for completion.
* Reset PXP device.
*/
void lv_gpu_nxp_pxp_reset(void);
/**
* Clear cache and start PXP.
*/
void lv_gpu_nxp_pxp_run(void);
/**
* Wait for PXP completion.
*/
void lv_gpu_nxp_pxp_wait(void);
/**********************
* MACROS
**********************/

View File

@@ -6,7 +6,7 @@
/**
* MIT License
*
* Copyright 2020, 2022 NXP
* Copyright 2020, 2022, 2023 NXP
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -65,24 +65,29 @@ static lv_res_t _lv_gpu_nxp_pxp_interrupt_init(void);
static void _lv_gpu_nxp_pxp_interrupt_deinit(void);
/**
* Start the PXP job and wait for task completion.
* Start the PXP job.
*/
static void _lv_gpu_nxp_pxp_run(void);
/**
* Wait for PXP completion.
*/
static void _lv_gpu_nxp_pxp_wait(void);
/**********************
* STATIC VARIABLES
**********************/
#if defined(SDK_OS_FREE_RTOS)
static SemaphoreHandle_t s_pxpIdle;
#else
static volatile bool s_pxpIdle;
static SemaphoreHandle_t s_pxpIdleSem;
#endif
static volatile bool s_pxpIdle;
static lv_nxp_pxp_cfg_t pxp_default_cfg = {
.pxp_interrupt_init = _lv_gpu_nxp_pxp_interrupt_init,
.pxp_interrupt_deinit = _lv_gpu_nxp_pxp_interrupt_deinit,
.pxp_run = _lv_gpu_nxp_pxp_run
.pxp_run = _lv_gpu_nxp_pxp_run,
.pxp_wait = _lv_gpu_nxp_pxp_wait,
};
/**********************
@@ -102,7 +107,7 @@ void PXP_IRQHandler(void)
if(kPXP_CompleteFlag & PXP_GetStatusFlags(LV_GPU_NXP_PXP_ID)) {
PXP_ClearStatusFlags(LV_GPU_NXP_PXP_ID, kPXP_CompleteFlag);
#if defined(SDK_OS_FREE_RTOS)
xSemaphoreGiveFromISR(s_pxpIdle, &taskAwake);
xSemaphoreGiveFromISR(s_pxpIdleSem, &taskAwake);
portYIELD_FROM_ISR(taskAwake);
#else
s_pxpIdle = true;
@@ -122,14 +127,13 @@ lv_nxp_pxp_cfg_t * lv_gpu_nxp_pxp_get_cfg(void)
static lv_res_t _lv_gpu_nxp_pxp_interrupt_init(void)
{
#if defined(SDK_OS_FREE_RTOS)
s_pxpIdle = xSemaphoreCreateBinary();
if(s_pxpIdle == NULL)
s_pxpIdleSem = xSemaphoreCreateBinary();
if(s_pxpIdleSem == NULL)
return LV_RES_INV;
NVIC_SetPriority(LV_GPU_NXP_PXP_IRQ_ID, configLIBRARY_MAX_SYSCALL_INTERRUPT_PRIORITY + 1);
#else
s_pxpIdle = true;
#endif
s_pxpIdle = true;
NVIC_EnableIRQ(LV_GPU_NXP_PXP_IRQ_ID);
@@ -140,21 +144,33 @@ static void _lv_gpu_nxp_pxp_interrupt_deinit(void)
{
NVIC_DisableIRQ(LV_GPU_NXP_PXP_IRQ_ID);
#if defined(SDK_OS_FREE_RTOS)
vSemaphoreDelete(s_pxpIdle);
vSemaphoreDelete(s_pxpIdleSem);
#endif
}
/**
* Function to start PXP job.
*/
static void _lv_gpu_nxp_pxp_run(void)
{
#if !defined(SDK_OS_FREE_RTOS)
s_pxpIdle = false;
#endif
PXP_EnableInterrupts(LV_GPU_NXP_PXP_ID, kPXP_CompleteInterruptEnable);
PXP_Start(LV_GPU_NXP_PXP_ID);
}
/**
* Function to wait for PXP completion.
*/
static void _lv_gpu_nxp_pxp_wait(void)
{
#if defined(SDK_OS_FREE_RTOS)
PXP_COND_STOP(!xSemaphoreTake(s_pxpIdle, portMAX_DELAY), "xSemaphoreTake failed.");
/* Return if PXP was never started, otherwise the semaphore will lock forever. */
if(s_pxpIdle == true)
return;
if(xSemaphoreTake(s_pxpIdleSem, portMAX_DELAY) == pdTRUE)
s_pxpIdle = true;
#else
while(s_pxpIdle == false) {
}

View File

@@ -1,7 +1,10 @@
CSRCS += lv_draw_vglite.c
CSRCS += lv_draw_vglite_arc.c
CSRCS += lv_draw_vglite_blend.c
CSRCS += lv_draw_vglite_line.c
CSRCS += lv_draw_vglite_rect.c
CSRCS += lv_gpu_nxp_vglite.c
CSRCS += lv_vglite_buf.c
CSRCS += lv_vglite_utils.c
DEPPATH += --dep-path $(LVGL_DIR)/$(LVGL_DIR_NAME)/src/draw/nxp/vglite
VPATH += :$(LVGL_DIR)/$(LVGL_DIR_NAME)/src/draw/nxp/vglite

View File

@@ -0,0 +1,508 @@
/**
* @file lv_draw_vglite.c
*
*/
/**
* MIT License
*
* Copyright 2022, 2023 NXP
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next paragraph)
* shall be included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
* INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
/*********************
* INCLUDES
*********************/
#include "lv_draw_vglite.h"
#if LV_USE_GPU_NXP_VG_LITE
#include <math.h>
#include "lv_draw_vglite_blend.h"
#include "lv_draw_vglite_line.h"
#include "lv_draw_vglite_rect.h"
#include "lv_draw_vglite_arc.h"
#include "lv_vglite_buf.h"
#if LV_COLOR_DEPTH != 32
#include "../../../core/lv_refr.h"
#endif
/*********************
* DEFINES
*********************/
/* Minimum area (in pixels) for VG-Lite blit/fill processing. */
#ifndef LV_GPU_NXP_VG_LITE_SIZE_LIMIT
#define LV_GPU_NXP_VG_LITE_SIZE_LIMIT 5000
#endif
/**********************
* TYPEDEFS
**********************/
/**********************
* STATIC PROTOTYPES
**********************/
static void lv_draw_vglite_init_buf(lv_draw_ctx_t * draw_ctx);
static void lv_draw_vglite_wait_for_finish(lv_draw_ctx_t * draw_ctx);
static void lv_draw_vglite_img_decoded(lv_draw_ctx_t * draw_ctx, const lv_draw_img_dsc_t * dsc,
const lv_area_t * coords, const uint8_t * map_p, lv_img_cf_t cf);
static void lv_draw_vglite_blend(lv_draw_ctx_t * draw_ctx, const lv_draw_sw_blend_dsc_t * dsc);
static void lv_draw_vglite_line(lv_draw_ctx_t * draw_ctx, const lv_draw_line_dsc_t * dsc, const lv_point_t * point1,
const lv_point_t * point2);
static void lv_draw_vglite_rect(lv_draw_ctx_t * draw_ctx, const lv_draw_rect_dsc_t * dsc, const lv_area_t * coords);
static lv_res_t lv_draw_vglite_bg(lv_draw_ctx_t * draw_ctx, const lv_draw_rect_dsc_t * dsc, const lv_area_t * coords);
static lv_res_t lv_draw_vglite_border(lv_draw_ctx_t * draw_ctx, const lv_draw_rect_dsc_t * dsc,
const lv_area_t * coords);
static lv_res_t lv_draw_vglite_outline(lv_draw_ctx_t * draw_ctx, const lv_draw_rect_dsc_t * dsc,
const lv_area_t * coords);
static void lv_draw_vglite_arc(lv_draw_ctx_t * draw_ctx, const lv_draw_arc_dsc_t * dsc, const lv_point_t * center,
uint16_t radius, uint16_t start_angle, uint16_t end_angle);
/**********************
* STATIC VARIABLES
**********************/
/**********************
* MACROS
**********************/
/**********************
* GLOBAL FUNCTIONS
**********************/
void lv_draw_vglite_ctx_init(lv_disp_drv_t * drv, lv_draw_ctx_t * draw_ctx)
{
lv_draw_sw_init_ctx(drv, draw_ctx);
lv_draw_vglite_ctx_t * vglite_draw_ctx = (lv_draw_sw_ctx_t *)draw_ctx;
vglite_draw_ctx->base_draw.init_buf = lv_draw_vglite_init_buf;
vglite_draw_ctx->base_draw.draw_line = lv_draw_vglite_line;
vglite_draw_ctx->base_draw.draw_arc = lv_draw_vglite_arc;
vglite_draw_ctx->base_draw.draw_rect = lv_draw_vglite_rect;
vglite_draw_ctx->base_draw.draw_img_decoded = lv_draw_vglite_img_decoded;
vglite_draw_ctx->blend = lv_draw_vglite_blend;
vglite_draw_ctx->base_draw.wait_for_finish = lv_draw_vglite_wait_for_finish;
}
void lv_draw_vglite_ctx_deinit(lv_disp_drv_t * drv, lv_draw_ctx_t * draw_ctx)
{
lv_draw_sw_deinit_ctx(drv, draw_ctx);
}
/**********************
* STATIC FUNCTIONS
**********************/
/**
* During rendering, LVGL might initializes new draw_ctxs and start drawing into
* a separate buffer (called layer). If the content to be rendered has "holes",
* e.g. rounded corner, LVGL temporarily sets the disp_drv.screen_transp flag.
* It means the renderers should draw into an ARGB buffer.
* With 32 bit color depth it's not a big problem but with 16 bit color depth
* the target pixel format is ARGB8565 which is not supported by the GPU.
* In this case, the VG-Lite callbacks should fallback to SW rendering.
*/
static inline bool need_argb8565_support()
{
#if LV_COLOR_DEPTH != 32
lv_disp_t * disp = _lv_refr_get_disp_refreshing();
if(disp->driver->screen_transp == 1)
return true;
#endif
return false;
}
static void lv_draw_vglite_init_buf(lv_draw_ctx_t * draw_ctx)
{
lv_gpu_nxp_vglite_init_buf(draw_ctx->buf, draw_ctx->buf_area, lv_area_get_width(draw_ctx->buf_area));
}
static void lv_draw_vglite_wait_for_finish(lv_draw_ctx_t * draw_ctx)
{
vg_lite_finish();
lv_draw_sw_wait_for_finish(draw_ctx);
}
static void lv_draw_vglite_blend(lv_draw_ctx_t * draw_ctx, const lv_draw_sw_blend_dsc_t * dsc)
{
if(dsc->opa <= (lv_opa_t)LV_OPA_MIN)
return;
if(need_argb8565_support()) {
lv_draw_sw_blend_basic(draw_ctx, dsc);
return;
}
lv_area_t blend_area;
/*Let's get the blend area which is the intersection of the area to draw and the clip area*/
if(!_lv_area_intersect(&blend_area, dsc->blend_area, draw_ctx->clip_area))
return; /*Fully clipped, nothing to do*/
/*Make the blend area relative to the buffer*/
lv_area_move(&blend_area, -draw_ctx->buf_area->x1, -draw_ctx->buf_area->y1);
bool done = false;
/*Fill/Blend only non masked, normal blended*/
if(dsc->mask_buf == NULL && dsc->blend_mode == LV_BLEND_MODE_NORMAL &&
lv_area_get_size(&blend_area) >= LV_GPU_NXP_VG_LITE_SIZE_LIMIT) {
const lv_color_t * src_buf = dsc->src_buf;
if(src_buf == NULL) {
done = (lv_gpu_nxp_vglite_fill(&blend_area, dsc->color, dsc->opa) == LV_RES_OK);
if(!done)
VG_LITE_LOG_TRACE("VG-Lite fill failed. Fallback.");
}
else {
lv_color_t * dest_buf = draw_ctx->buf;
lv_coord_t dest_stride = lv_area_get_width(draw_ctx->buf_area);
lv_area_t src_area;
src_area.x1 = blend_area.x1 - (dsc->blend_area->x1 - draw_ctx->buf_area->x1);
src_area.y1 = blend_area.y1 - (dsc->blend_area->y1 - draw_ctx->buf_area->y1);
src_area.x2 = src_area.x1 + lv_area_get_width(dsc->blend_area) - 1;
src_area.y2 = src_area.y1 + lv_area_get_height(dsc->blend_area) - 1;
lv_coord_t src_stride = lv_area_get_width(dsc->blend_area);
done = (lv_gpu_nxp_vglite_blit(dest_buf, &blend_area, dest_stride,
src_buf, &src_area, src_stride, dsc->opa) == LV_RES_OK);
if(!done)
VG_LITE_LOG_TRACE("VG-Lite blit failed. Fallback.");
}
}
if(!done)
lv_draw_sw_blend_basic(draw_ctx, dsc);
}
static void lv_draw_vglite_img_decoded(lv_draw_ctx_t * draw_ctx, const lv_draw_img_dsc_t * dsc,
const lv_area_t * coords, const uint8_t * map_p, lv_img_cf_t cf)
{
if(dsc->opa <= (lv_opa_t)LV_OPA_MIN)
return;
if(need_argb8565_support()) {
lv_draw_sw_img_decoded(draw_ctx, dsc, coords, map_p, cf);
return;
}
const lv_color_t * src_buf = (const lv_color_t *)map_p;
if(!src_buf) {
lv_draw_sw_img_decoded(draw_ctx, dsc, coords, map_p, cf);
return;
}
lv_area_t blend_area;
/*Let's get the blend area which is the intersection of the area to draw and the clip area*/
if(!_lv_area_intersect(&blend_area, coords, draw_ctx->clip_area))
return; /*Fully clipped, nothing to do*/
/*Make the blend area relative to the buffer*/
lv_area_move(&blend_area, -draw_ctx->buf_area->x1, -draw_ctx->buf_area->y1);
bool has_mask = lv_draw_mask_is_any(&blend_area);
bool has_recolor = (dsc->recolor_opa != LV_OPA_TRANSP);
bool done = false;
if(!has_mask && !has_recolor && !lv_img_cf_is_chroma_keyed(cf) &&
lv_area_get_size(&blend_area) >= LV_GPU_NXP_VG_LITE_SIZE_LIMIT
#if LV_COLOR_DEPTH != 32
&& !lv_img_cf_has_alpha(cf)
#endif
) {
lv_color_t * dest_buf = draw_ctx->buf;
lv_coord_t dest_stride = lv_area_get_width(draw_ctx->buf_area);
lv_area_t src_area;
src_area.x1 = blend_area.x1 - (coords->x1 - draw_ctx->buf_area->x1);
src_area.y1 = blend_area.y1 - (coords->y1 - draw_ctx->buf_area->y1);
src_area.x2 = src_area.x1 + lv_area_get_width(coords) - 1;
src_area.y2 = src_area.y1 + lv_area_get_height(coords) - 1;
lv_coord_t src_stride = lv_area_get_width(coords);
done = (lv_gpu_nxp_vglite_blit_transform(dest_buf, &blend_area, dest_stride,
src_buf, &src_area, src_stride, dsc) == LV_RES_OK);
if(!done)
VG_LITE_LOG_TRACE("VG-Lite blit transform failed. Fallback.");
}
if(!done)
lv_draw_sw_img_decoded(draw_ctx, dsc, coords, map_p, cf);
}
static void lv_draw_vglite_line(lv_draw_ctx_t * draw_ctx, const lv_draw_line_dsc_t * dsc, const lv_point_t * point1,
const lv_point_t * point2)
{
if(dsc->width == 0)
return;
if(dsc->opa <= (lv_opa_t)LV_OPA_MIN)
return;
if(point1->x == point2->x && point1->y == point2->y)
return;
if(need_argb8565_support()) {
lv_draw_sw_line(draw_ctx, dsc, point1, point2);
return;
}
lv_area_t rel_clip_area;
rel_clip_area.x1 = LV_MIN(point1->x, point2->x) - dsc->width / 2;
rel_clip_area.x2 = LV_MAX(point1->x, point2->x) + dsc->width / 2;
rel_clip_area.y1 = LV_MIN(point1->y, point2->y) - dsc->width / 2;
rel_clip_area.y2 = LV_MAX(point1->y, point2->y) + dsc->width / 2;
bool is_common;
is_common = _lv_area_intersect(&rel_clip_area, &rel_clip_area, draw_ctx->clip_area);
if(!is_common)
return;
/* Make coordinates relative to the draw buffer */
lv_area_move(&rel_clip_area, -draw_ctx->buf_area->x1, -draw_ctx->buf_area->y1);
lv_point_t rel_point1 = { point1->x - draw_ctx->buf_area->x1, point1->y - draw_ctx->buf_area->y1 };
lv_point_t rel_point2 = { point2->x - draw_ctx->buf_area->x1, point2->y - draw_ctx->buf_area->y1 };
bool done = false;
bool mask_any = lv_draw_mask_is_any(&rel_clip_area);
if(!mask_any) {
done = (lv_gpu_nxp_vglite_draw_line(&rel_point1, &rel_point2, &rel_clip_area, dsc) == LV_RES_OK);
if(!done)
VG_LITE_LOG_TRACE("VG-Lite draw line failed. Fallback.");
}
if(!done)
lv_draw_sw_line(draw_ctx, dsc, point1, point2);
}
static void lv_draw_vglite_rect(lv_draw_ctx_t * draw_ctx, const lv_draw_rect_dsc_t * dsc, const lv_area_t * coords)
{
if(need_argb8565_support()) {
lv_draw_sw_rect(draw_ctx, dsc, coords);
return;
}
lv_draw_rect_dsc_t vglite_dsc;
lv_memcpy(&vglite_dsc, dsc, sizeof(vglite_dsc));
vglite_dsc.bg_opa = 0;
vglite_dsc.bg_img_opa = 0;
vglite_dsc.border_opa = 0;
vglite_dsc.outline_opa = 0;
#if LV_DRAW_COMPLEX
/* Draw the shadow with CPU */
lv_draw_sw_rect(draw_ctx, &vglite_dsc, coords);
vglite_dsc.shadow_opa = 0;
#endif /*LV_DRAW_COMPLEX*/
/* Draw the background */
vglite_dsc.bg_opa = dsc->bg_opa;
if(lv_draw_vglite_bg(draw_ctx, &vglite_dsc, coords) != LV_RES_OK)
lv_draw_sw_rect(draw_ctx, &vglite_dsc, coords);
vglite_dsc.bg_opa = 0;
/* Draw the background image
* It will be done once draw_ctx->draw_img_decoded()
* callback gets called from lv_draw_sw_rect().
*/
vglite_dsc.bg_img_opa = dsc->bg_img_opa;
lv_draw_sw_rect(draw_ctx, &vglite_dsc, coords);
vglite_dsc.bg_img_opa = 0;
/* Draw the border */
vglite_dsc.border_opa = dsc->border_opa;
if(lv_draw_vglite_border(draw_ctx, &vglite_dsc, coords) != LV_RES_OK)
lv_draw_sw_rect(draw_ctx, &vglite_dsc, coords);
vglite_dsc.border_opa = 0;
/* Draw the outline */
vglite_dsc.outline_opa = dsc->outline_opa;
if(lv_draw_vglite_outline(draw_ctx, &vglite_dsc, coords) != LV_RES_OK)
lv_draw_sw_rect(draw_ctx, &vglite_dsc, coords);
}
static lv_res_t lv_draw_vglite_bg(lv_draw_ctx_t * draw_ctx, const lv_draw_rect_dsc_t * dsc, const lv_area_t * coords)
{
if(dsc->bg_opa <= (lv_opa_t)LV_OPA_MIN)
return LV_RES_INV;
lv_area_t rel_coords;
lv_area_copy(&rel_coords, coords);
/*If the border fully covers make the bg area 1px smaller to avoid artifacts on the corners*/
if(dsc->border_width > 1 && dsc->border_opa >= (lv_opa_t)LV_OPA_MAX && dsc->radius != 0) {
rel_coords.x1 += (dsc->border_side & LV_BORDER_SIDE_LEFT) ? 1 : 0;
rel_coords.y1 += (dsc->border_side & LV_BORDER_SIDE_TOP) ? 1 : 0;
rel_coords.x2 -= (dsc->border_side & LV_BORDER_SIDE_RIGHT) ? 1 : 0;
rel_coords.y2 -= (dsc->border_side & LV_BORDER_SIDE_BOTTOM) ? 1 : 0;
}
/* Make coordinates relative to draw buffer */
lv_area_move(&rel_coords, -draw_ctx->buf_area->x1, -draw_ctx->buf_area->y1);
lv_area_t rel_clip_area;
lv_area_copy(&rel_clip_area, draw_ctx->clip_area);
lv_area_move(&rel_clip_area, -draw_ctx->buf_area->x1, -draw_ctx->buf_area->y1);
lv_area_t clipped_coords;
if(!_lv_area_intersect(&clipped_coords, &rel_coords, &rel_clip_area))
return LV_RES_INV;
bool mask_any = lv_draw_mask_is_any(&rel_coords);
lv_grad_dir_t grad_dir = dsc->bg_grad.dir;
lv_color_t bg_color = (grad_dir == (lv_grad_dir_t)LV_GRAD_DIR_NONE) ?
dsc->bg_color : dsc->bg_grad.stops[0].color;
if(bg_color.full == dsc->bg_grad.stops[1].color.full)
grad_dir = LV_GRAD_DIR_NONE;
/*
* Most simple case: just a plain rectangle (no mask, no radius, no gradient)
* shall be handled by draw_ctx->blend().
*
* Complex case: gradient or radius but no mask.
*/
if(!mask_any && ((dsc->radius != 0) || (grad_dir != (lv_grad_dir_t)LV_GRAD_DIR_NONE))) {
lv_res_t res = lv_gpu_nxp_vglite_draw_bg(&rel_coords, &rel_clip_area, dsc);
if(res != LV_RES_OK)
VG_LITE_LOG_TRACE("VG-Lite draw bg failed. Fallback.");
return res;
}
return LV_RES_INV;
}
static lv_res_t lv_draw_vglite_border(lv_draw_ctx_t * draw_ctx, const lv_draw_rect_dsc_t * dsc,
const lv_area_t * coords)
{
if(dsc->border_opa <= (lv_opa_t)LV_OPA_MIN)
return LV_RES_INV;
if(dsc->border_width == 0)
return LV_RES_INV;
if(dsc->border_post)
return LV_RES_INV;
if(dsc->border_side != (lv_border_side_t)LV_BORDER_SIDE_FULL)
return LV_RES_INV;
lv_area_t rel_coords;
lv_coord_t border_width = dsc->border_width;
/* Move border inwards to align with software rendered border */
rel_coords.x1 = coords->x1 + ceil(border_width / 2.0f);
rel_coords.x2 = coords->x2 - floor(border_width / 2.0f);
rel_coords.y1 = coords->y1 + ceil(border_width / 2.0f);
rel_coords.y2 = coords->y2 - floor(border_width / 2.0f);
/* Make coordinates relative to the draw buffer */
lv_area_move(&rel_coords, -draw_ctx->buf_area->x1, -draw_ctx->buf_area->y1);
lv_area_t rel_clip_area;
lv_area_copy(&rel_clip_area, draw_ctx->clip_area);
lv_area_move(&rel_clip_area, -draw_ctx->buf_area->x1, -draw_ctx->buf_area->y1);
lv_res_t res = lv_gpu_nxp_vglite_draw_border_generic(&rel_coords, &rel_clip_area, dsc, true);
if(res != LV_RES_OK)
VG_LITE_LOG_TRACE("VG-Lite draw border failed. Fallback.");
return res;
}
static lv_res_t lv_draw_vglite_outline(lv_draw_ctx_t * draw_ctx, const lv_draw_rect_dsc_t * dsc,
const lv_area_t * coords)
{
if(dsc->outline_opa <= (lv_opa_t)LV_OPA_MIN)
return LV_RES_INV;
if(dsc->outline_width == 0)
return LV_RES_INV;
/* Move outline outwards to align with software rendered outline */
lv_coord_t outline_pad = dsc->outline_pad - 1;
lv_area_t rel_coords;
rel_coords.x1 = coords->x1 - outline_pad - floor(dsc->outline_width / 2.0f);
rel_coords.x2 = coords->x2 + outline_pad + ceil(dsc->outline_width / 2.0f);
rel_coords.y1 = coords->y1 - outline_pad - floor(dsc->outline_width / 2.0f);
rel_coords.y2 = coords->y2 + outline_pad + ceil(dsc->outline_width / 2.0f);
/* Make coordinates relative to the draw buffer */
lv_area_move(&rel_coords, -draw_ctx->buf_area->x1, -draw_ctx->buf_area->y1);
lv_area_t rel_clip_area;
lv_area_copy(&rel_clip_area, draw_ctx->clip_area);
lv_area_move(&rel_clip_area, -draw_ctx->buf_area->x1, -draw_ctx->buf_area->y1);
lv_res_t res = lv_gpu_nxp_vglite_draw_border_generic(&rel_coords, &rel_clip_area, dsc, false);
if(res != LV_RES_OK)
VG_LITE_LOG_TRACE("VG-Lite draw outline failed. Fallback.");
return res;
}
static void lv_draw_vglite_arc(lv_draw_ctx_t * draw_ctx, const lv_draw_arc_dsc_t * dsc, const lv_point_t * center,
uint16_t radius, uint16_t start_angle, uint16_t end_angle)
{
bool done = false;
#if LV_DRAW_COMPLEX
if(dsc->opa <= (lv_opa_t)LV_OPA_MIN)
return;
if(dsc->width == 0)
return;
if(start_angle == end_angle)
return;
if(need_argb8565_support()) {
lv_draw_sw_arc(draw_ctx, dsc, center, radius, start_angle, end_angle);
return;
}
/* Make coordinates relative to the draw buffer */
lv_point_t rel_center = {center->x - draw_ctx->buf_area->x1, center->y - draw_ctx->buf_area->y1};
lv_area_t rel_clip_area;
lv_area_copy(&rel_clip_area, draw_ctx->clip_area);
lv_area_move(&rel_clip_area, -draw_ctx->buf_area->x1, -draw_ctx->buf_area->y1);
done = (lv_gpu_nxp_vglite_draw_arc(&rel_center, (int32_t)radius, (int32_t)start_angle, (int32_t)end_angle,
&rel_clip_area, dsc) == LV_RES_OK);
if(!done)
VG_LITE_LOG_TRACE("VG-Lite draw arc failed. Fallback.");
#endif/*LV_DRAW_COMPLEX*/
if(!done)
lv_draw_sw_arc(draw_ctx, dsc, center, radius, start_angle, end_angle);
}
#endif /*LV_USE_GPU_NXP_VG_LITE*/

View File

@@ -0,0 +1,72 @@
/**
* @file lv_draw_vglite.h
*
*/
/**
* MIT License
*
* Copyright 2022, 2023 NXP
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next paragraph)
* shall be included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
* INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef LV_DRAW_VGLITE_H
#define LV_DRAW_VGLITE_H
#ifdef __cplusplus
extern "C" {
#endif
/*********************
* INCLUDES
*********************/
#include "../../../lv_conf_internal.h"
#if LV_USE_GPU_NXP_VG_LITE
#include "../../sw/lv_draw_sw.h"
/*********************
* DEFINES
*********************/
/**********************
* TYPEDEFS
**********************/
typedef lv_draw_sw_ctx_t lv_draw_vglite_ctx_t;
/**********************
* GLOBAL PROTOTYPES
**********************/
void lv_draw_vglite_ctx_init(struct _lv_disp_drv_t * drv, lv_draw_ctx_t * draw_ctx);
void lv_draw_vglite_ctx_deinit(struct _lv_disp_drv_t * drv, lv_draw_ctx_t * draw_ctx);
/**********************
* MACROS
**********************/
#endif /*LV_USE_GPU_NXP_VG_LITE*/
#ifdef __cplusplus
} /*extern "C"*/
#endif
#endif /*LV_DRAW_VGLITE_H*/

View File

@@ -6,7 +6,7 @@
/**
* MIT License
*
* Copyright 2021, 2022 NXP
* Copyright 2021-2023 NXP
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -34,7 +34,8 @@
#include "lv_draw_vglite_arc.h"
#if LV_USE_GPU_NXP_VG_LITE
#include "math.h"
#include "lv_vglite_buf.h"
#include <math.h>
/*********************
* DEFINES
@@ -88,7 +89,7 @@ typedef struct _cubic_cont_pt {
static void rotate_point(int32_t angle, int32_t * x, int32_t * y);
static void add_arc_path(int32_t * arc_path, int * pidx, int32_t radius,
int32_t start_angle, int32_t end_angle, lv_point_t center, bool cw);
int32_t start_angle, int32_t end_angle, const lv_point_t * center, bool cw);
/**********************
* STATIC VARIABLES
@@ -102,31 +103,20 @@ static void add_arc_path(int32_t * arc_path, int * pidx, int32_t radius,
* GLOBAL FUNCTIONS
**********************/
lv_res_t lv_gpu_nxp_vglite_draw_arc(lv_draw_ctx_t * draw_ctx, const lv_draw_arc_dsc_t * dsc, const lv_point_t * center,
int32_t radius, int32_t start_angle, int32_t end_angle)
lv_res_t lv_gpu_nxp_vglite_draw_arc(const lv_point_t * center, int32_t radius, int32_t start_angle, int32_t end_angle,
const lv_area_t * clip_area, const lv_draw_arc_dsc_t * dsc)
{
vg_lite_buffer_t vgbuf;
vg_lite_error_t err = VG_LITE_SUCCESS;
lv_color32_t col32 = {.full = lv_color_to32(dsc->color)}; /*Convert color to RGBA8888*/
lv_coord_t dest_width = lv_area_get_width(draw_ctx->buf_area);
lv_coord_t dest_height = lv_area_get_height(draw_ctx->buf_area);
vg_lite_path_t path;
vg_lite_color_t vgcol; /* vglite takes ABGR */
vg_lite_matrix_t matrix;
lv_opa_t opa = dsc->opa;
bool donut = ((end_angle - start_angle) % 360 == 0) ? true : false;
lv_point_t clip_center = {center->x - draw_ctx->buf_area->x1, center->y - draw_ctx->buf_area->y1};
vg_lite_buffer_t * vgbuf = lv_vglite_get_dest_buf();
/* path: max size = 16 cubic bezier (7 words each) */
int32_t arc_path[16 * 7];
lv_memset_00(arc_path, sizeof(arc_path));
/*** Init destination buffer ***/
if(lv_vglite_init_buf(&vgbuf, (uint32_t)dest_width, (uint32_t)dest_height, (uint32_t)dest_width * sizeof(lv_color_t),
(const lv_color_t *)draw_ctx->buf, false) != LV_RES_OK)
VG_LITE_RETURN_INV("Init buffer failed.");
/*** Init path ***/
lv_coord_t width = dsc->width; /* inner arc radius = outer arc radius - width */
if(width > (lv_coord_t)radius)
@@ -140,11 +130,11 @@ lv_res_t lv_gpu_nxp_vglite_draw_arc(lv_draw_ctx_t * draw_ctx, const lv_draw_arc_
cp_y = 0;
rotate_point(start_angle, &cp_x, &cp_y);
arc_path[pidx++] = VLC_OP_MOVE;
arc_path[pidx++] = clip_center.x + cp_x;
arc_path[pidx++] = clip_center.y + cp_y;
arc_path[pidx++] = center->x + cp_x;
arc_path[pidx++] = center->y + cp_y;
/* draw 1-5 outer quarters */
add_arc_path(arc_path, &pidx, radius, start_angle, end_angle, clip_center, true);
add_arc_path(arc_path, &pidx, radius, start_angle, end_angle, center, true);
if(donut) {
/* close outer circle */
@@ -152,24 +142,24 @@ lv_res_t lv_gpu_nxp_vglite_draw_arc(lv_draw_ctx_t * draw_ctx, const lv_draw_arc_
cp_y = 0;
rotate_point(start_angle, &cp_x, &cp_y);
arc_path[pidx++] = VLC_OP_LINE;
arc_path[pidx++] = clip_center.x + cp_x;
arc_path[pidx++] = clip_center.y + cp_y;
arc_path[pidx++] = center->x + cp_x;
arc_path[pidx++] = center->y + cp_y;
/* start inner circle */
cp_x = radius - width;
cp_y = 0;
rotate_point(start_angle, &cp_x, &cp_y);
arc_path[pidx++] = VLC_OP_MOVE;
arc_path[pidx++] = clip_center.x + cp_x;
arc_path[pidx++] = clip_center.y + cp_y;
arc_path[pidx++] = center->x + cp_x;
arc_path[pidx++] = center->y + cp_y;
}
else if(dsc->rounded != 0U) { /* 1st rounded arc ending */
cp_x = radius - width / 2;
cp_y = 0;
rotate_point(end_angle, &cp_x, &cp_y);
lv_point_t round_center = {clip_center.x + cp_x, clip_center.y + cp_y};
lv_point_t round_center = {center->x + cp_x, center->y + cp_y};
add_arc_path(arc_path, &pidx, width / 2, end_angle, (end_angle + 180),
round_center, true);
&round_center, true);
}
else { /* 1st flat ending */
@@ -177,12 +167,12 @@ lv_res_t lv_gpu_nxp_vglite_draw_arc(lv_draw_ctx_t * draw_ctx, const lv_draw_arc_
cp_y = 0;
rotate_point(end_angle, &cp_x, &cp_y);
arc_path[pidx++] = VLC_OP_LINE;
arc_path[pidx++] = clip_center.x + cp_x;
arc_path[pidx++] = clip_center.y + cp_y;
arc_path[pidx++] = center->x + cp_x;
arc_path[pidx++] = center->y + cp_y;
}
/* draw 1-5 inner quarters */
add_arc_path(arc_path, &pidx, radius - width, start_angle, end_angle, clip_center, false);
add_arc_path(arc_path, &pidx, radius - width, start_angle, end_angle, center, false);
/* last control point of curve */
if(donut) { /* close the loop */
@@ -190,17 +180,17 @@ lv_res_t lv_gpu_nxp_vglite_draw_arc(lv_draw_ctx_t * draw_ctx, const lv_draw_arc_
cp_y = 0;
rotate_point(start_angle, &cp_x, &cp_y);
arc_path[pidx++] = VLC_OP_LINE;
arc_path[pidx++] = clip_center.x + cp_x;
arc_path[pidx++] = clip_center.y + cp_y;
arc_path[pidx++] = center->x + cp_x;
arc_path[pidx++] = center->y + cp_y;
}
else if(dsc->rounded != 0U) { /* 2nd rounded arc ending */
cp_x = radius - width / 2;
cp_y = 0;
rotate_point(start_angle, &cp_x, &cp_y);
lv_point_t round_center = {clip_center.x + cp_x, clip_center.y + cp_y};
lv_point_t round_center = {center->x + cp_x, center->y + cp_y};
add_arc_path(arc_path, &pidx, width / 2, (start_angle + 180), (start_angle + 360),
round_center, true);
&round_center, true);
}
else { /* 2nd flat ending */
@@ -208,46 +198,30 @@ lv_res_t lv_gpu_nxp_vglite_draw_arc(lv_draw_ctx_t * draw_ctx, const lv_draw_arc_
cp_y = 0;
rotate_point(start_angle, &cp_x, &cp_y);
arc_path[pidx++] = VLC_OP_LINE;
arc_path[pidx++] = clip_center.x + cp_x;
arc_path[pidx++] = clip_center.y + cp_y;
arc_path[pidx++] = center->x + cp_x;
arc_path[pidx++] = center->y + cp_y;
}
arc_path[pidx++] = VLC_OP_END;
err = vg_lite_init_path(&path, VG_LITE_S32, VG_LITE_HIGH, (uint32_t)pidx * sizeof(int32_t), arc_path,
(vg_lite_float_t) draw_ctx->clip_area->x1, (vg_lite_float_t) draw_ctx->clip_area->y1,
((vg_lite_float_t) draw_ctx->clip_area->x2) + 1.0f, ((vg_lite_float_t) draw_ctx->clip_area->y2) + 1.0f);
(vg_lite_float_t)clip_area->x1, (vg_lite_float_t)clip_area->y1,
((vg_lite_float_t)clip_area->x2) + 1.0f, ((vg_lite_float_t)clip_area->y2) + 1.0f);
VG_LITE_ERR_RETURN_INV(err, "Init path failed.");
/* set rotation angle */
vg_lite_buffer_format_t color_format = LV_COLOR_DEPTH == 16 ? VG_LITE_BGRA8888 : VG_LITE_ABGR8888;
if(lv_vglite_premult_and_swizzle(&vgcol, col32, dsc->opa, color_format) != LV_RES_OK)
VG_LITE_RETURN_INV("Premultiplication and swizzle failed.");
vg_lite_matrix_t matrix;
vg_lite_identity(&matrix);
if(opa <= (lv_opa_t)LV_OPA_MAX) {
/* Only pre-multiply color if hardware pre-multiplication is not present */
if(!vg_lite_query_feature(gcFEATURE_BIT_VG_PE_PREMULTIPLY)) {
col32.ch.red = (uint8_t)(((uint16_t)col32.ch.red * opa) >> 8);
col32.ch.green = (uint8_t)(((uint16_t)col32.ch.green * opa) >> 8);
col32.ch.blue = (uint8_t)(((uint16_t)col32.ch.blue * opa) >> 8);
}
col32.ch.alpha = opa;
}
#if LV_COLOR_DEPTH==16
vgcol = col32.full;
#else /*LV_COLOR_DEPTH==32*/
vgcol = ((uint32_t)col32.ch.alpha << 24) | ((uint32_t)col32.ch.blue << 16) | ((uint32_t)col32.ch.green << 8) |
(uint32_t)col32.ch.red;
#endif
/*Clean & invalidate cache*/
lv_vglite_invalidate_cache();
/*** Draw arc ***/
err = vg_lite_draw(&vgbuf, &path, VG_LITE_FILL_NON_ZERO, &matrix, VG_LITE_BLEND_SRC_OVER, vgcol);
err = vg_lite_draw(vgbuf, &path, VG_LITE_FILL_NON_ZERO, &matrix, VG_LITE_BLEND_SRC_OVER, vgcol);
VG_LITE_ERR_RETURN_INV(err, "Draw arc failed.");
err = vg_lite_finish();
VG_LITE_ERR_RETURN_INV(err, "Finish failed.");
if(lv_vglite_run() != LV_RES_OK)
VG_LITE_RETURN_INV("Run failed.");
err = vg_lite_clear_path(&path);
VG_LITE_ERR_RETURN_INV(err, "Clear path failed.");
@@ -564,50 +538,50 @@ static void get_arc_control_points(vg_arc * arc, bool start)
* center: (in) the center of the circle in draw coordinates
* cw: (in) true if arc is clockwise
*/
static void add_split_arc_path(int32_t * arc_path, int * pidx, vg_arc * q_arc, lv_point_t center, bool cw)
static void add_split_arc_path(int32_t * arc_path, int * pidx, vg_arc * q_arc, const lv_point_t * center, bool cw)
{
/* assumes first control point already in array arc_path[] */
int idx = *pidx;
if(cw) {
#if BEZIER_DBG_CONTROL_POINTS
arc_path[idx++] = VLC_OP_LINE;
arc_path[idx++] = q_arc->p1x + center.x;
arc_path[idx++] = q_arc->p1y + center.y;
arc_path[idx++] = q_arc->p1x + center->x;
arc_path[idx++] = q_arc->p1y + center->y;
arc_path[idx++] = VLC_OP_LINE;
arc_path[idx++] = q_arc->p2x + center.x;
arc_path[idx++] = q_arc->p2y + center.y;
arc_path[idx++] = q_arc->p2x + center->x;
arc_path[idx++] = q_arc->p2y + center->y;
arc_path[idx++] = VLC_OP_LINE;
arc_path[idx++] = q_arc->p3x + center.x;
arc_path[idx++] = q_arc->p3y + center.y;
arc_path[idx++] = q_arc->p3x + center->x;
arc_path[idx++] = q_arc->p3y + center->y;
#else
arc_path[idx++] = VLC_OP_CUBIC;
arc_path[idx++] = q_arc->p1x + center.x;
arc_path[idx++] = q_arc->p1y + center.y;
arc_path[idx++] = q_arc->p2x + center.x;
arc_path[idx++] = q_arc->p2y + center.y;
arc_path[idx++] = q_arc->p3x + center.x;
arc_path[idx++] = q_arc->p3y + center.y;
arc_path[idx++] = q_arc->p1x + center->x;
arc_path[idx++] = q_arc->p1y + center->y;
arc_path[idx++] = q_arc->p2x + center->x;
arc_path[idx++] = q_arc->p2y + center->y;
arc_path[idx++] = q_arc->p3x + center->x;
arc_path[idx++] = q_arc->p3y + center->y;
#endif
}
else { /* reverse points order when counter-clockwise */
#if BEZIER_DBG_CONTROL_POINTS
arc_path[idx++] = VLC_OP_LINE;
arc_path[idx++] = q_arc->p2x + center.x;
arc_path[idx++] = q_arc->p2y + center.y;
arc_path[idx++] = q_arc->p2x + center->x;
arc_path[idx++] = q_arc->p2y + center->y;
arc_path[idx++] = VLC_OP_LINE;
arc_path[idx++] = q_arc->p1x + center.x;
arc_path[idx++] = q_arc->p1y + center.y;
arc_path[idx++] = q_arc->p1x + center->x;
arc_path[idx++] = q_arc->p1y + center->y;
arc_path[idx++] = VLC_OP_LINE;
arc_path[idx++] = q_arc->p0x + center.x;
arc_path[idx++] = q_arc->p0y + center.y;
arc_path[idx++] = q_arc->p0x + center->x;
arc_path[idx++] = q_arc->p0y + center->y;
#else
arc_path[idx++] = VLC_OP_CUBIC;
arc_path[idx++] = q_arc->p2x + center.x;
arc_path[idx++] = q_arc->p2y + center.y;
arc_path[idx++] = q_arc->p1x + center.x;
arc_path[idx++] = q_arc->p1y + center.y;
arc_path[idx++] = q_arc->p0x + center.x;
arc_path[idx++] = q_arc->p0y + center.y;
arc_path[idx++] = q_arc->p2x + center->x;
arc_path[idx++] = q_arc->p2y + center->y;
arc_path[idx++] = q_arc->p1x + center->x;
arc_path[idx++] = q_arc->p1y + center->y;
arc_path[idx++] = q_arc->p0x + center->x;
arc_path[idx++] = q_arc->p0y + center->y;
#endif
}
/* update index i n path array*/
@@ -615,7 +589,7 @@ static void add_split_arc_path(int32_t * arc_path, int * pidx, vg_arc * q_arc, l
}
static void add_arc_path(int32_t * arc_path, int * pidx, int32_t radius,
int32_t start_angle, int32_t end_angle, lv_point_t center, bool cw)
int32_t start_angle, int32_t end_angle, const lv_point_t * center, bool cw)
{
/* set number of arcs to draw */
vg_arc q_arc;

View File

@@ -6,7 +6,7 @@
/**
* MIT License
*
* Copyright 2021, 2022 NXP
* Copyright 2021-2023 NXP
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -40,7 +40,7 @@ extern "C" {
#include "../../../lv_conf_internal.h"
#if LV_USE_GPU_NXP_VG_LITE
#include "lv_gpu_nxp_vglite.h"
#include "lv_vglite_utils.h"
/*********************
* DEFINES
@@ -54,17 +54,21 @@ extern "C" {
* GLOBAL PROTOTYPES
**********************/
/***
/**
* Draw arc shape with effects
* @param draw_ctx drawing context
* @param dsc the arc description structure (width, rounded ending, opacity)
* @param center the coordinates of the arc center
* @param radius the radius of external arc
* @param start_angle the starting angle in degrees
* @param end_angle the ending angle in degrees
*
* @param[in] center Arc center with relative coordinates
* @param[in] radius Radius of external arc
* @param[in] start_angle Starting angle in degrees
* @param[in] end_angle Ending angle in degrees
* @param[in] clip_area Clipping area with relative coordinates to dest buff
* @param[in] dsc Arc description structure (width, rounded ending, opacity)
*
* @retval LV_RES_OK Draw completed
* @retval LV_RES_INV Error occurred (\see LV_GPU_NXP_VG_LITE_LOG_ERRORS)
*/
lv_res_t lv_gpu_nxp_vglite_draw_arc(lv_draw_ctx_t * draw_ctx, const lv_draw_arc_dsc_t * dsc, const lv_point_t * center,
int32_t radius, int32_t start_angle, int32_t end_angle);
lv_res_t lv_gpu_nxp_vglite_draw_arc(const lv_point_t * center, int32_t radius, int32_t start_angle, int32_t end_angle,
const lv_area_t * clip_area, const lv_draw_arc_dsc_t * dsc);
/**********************
* MACROS

View File

@@ -6,7 +6,7 @@
/**
* MIT License
*
* Copyright 2020-2022 NXP
* Copyright 2020-2023 NXP
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -34,12 +34,19 @@
#include "lv_draw_vglite_blend.h"
#if LV_USE_GPU_NXP_VG_LITE
#include "lv_vglite_buf.h"
/*********************
* DEFINES
*********************/
/* Enable BLIT quality degradation workaround for RT595, recommended for screen's dimension > 352 pixels */
/** Stride in px required by VG-Lite HW*/
#define LV_GPU_NXP_VG_LITE_STRIDE_ALIGN_PX 16U
/**
* Enable BLIT quality degradation workaround for RT595,
* recommended for screen's dimension > 352 pixels.
*/
#define RT595_BLIT_WRKRND_ENABLED 1
/* Internal compound symbol */
@@ -51,12 +58,13 @@
#define VG_LITE_BLIT_SPLIT_ENABLED 0
#endif
/**
* BLIT split threshold - BLITs with width or height higher than this value will be done
* in multiple steps. Value must be 16-aligned. Don't change.
*/
#define LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR 352
#if VG_LITE_BLIT_SPLIT_ENABLED
/**
* BLIT split threshold - BLITs with width or height higher than this value will be done
* in multiple steps. Value must be 16-aligned. Don't change.
*/
#define LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR 352
#endif
/**********************
* TYPEDEFS
@@ -67,63 +75,87 @@
**********************/
/**
* BLock Image Transfer - single direct BLIT.
* Blit single image, with optional opacity.
*
* @param[in] dest_area Area with relative coordinates of destination buffer
* @param[in] src_area Source area with relative coordinates of source buffer
* @param[in] opa Opacity
*
* @param[in] blit Description of the transfer
* @retval LV_RES_OK Transfer complete
* @retval LV_RES_INV Error occurred (\see LV_GPU_NXP_VG_LITE_LOG_ERRORS)
*/
static lv_res_t _lv_gpu_nxp_vglite_blit_single(lv_gpu_nxp_vglite_blit_info_t * blit);
static lv_res_t lv_vglite_blit_single(const lv_area_t * dest_area, const lv_area_t * src_area, lv_opa_t opa);
/**
* Check source memory and stride alignment.
*
* @param[in] src_buf Source buffer
* @param[in] src_stride Stride of source buffer in pixels
*
* @retval LV_RES_OK Alignment OK
* @retval LV_RES_INV Error occurred (\see LV_GPU_NXP_VG_LITE_LOG_ERRORS)
*/
static lv_res_t check_src_alignment(const lv_color_t * src_buf, lv_coord_t src_stride);
/**
* Creates matrix that translates to origin of given destination area.
*
* @param[in] dest_area Area with relative coordinates of destination buffer
*/
static inline void lv_vglite_set_translation_matrix(const lv_area_t * dest_area);
/**
* Creates matrix that translates to origin of given destination area with transformation (scale or rotate).
*
* @param[in] dest_area Area with relative coordinates of destination buffer
* @param[in] dsc Image descriptor
*/
static inline void lv_vglite_set_transformation_matrix(const lv_area_t * dest_area, const lv_draw_img_dsc_t * dsc);
#if VG_LITE_BLIT_SPLIT_ENABLED
/**
* Move buffer pointer as close as possible to area, but with respect to alignment requirements. X-axis only.
*
* @param[in,out] area Area to be updated
* @param[in,out] buf Pointer to be updated
*/
static void _align_x(lv_area_t * area, lv_color_t ** buf);
/**
* Move buffer pointer as close as possible to area, but with respect to alignment requirements. X-axis only.
*
* @param[in/out] area Area to be updated
* @param[in/out] buf Pointer to be updated
*/
static void align_x(lv_area_t * area, lv_color_t ** buf);
/**
* Move buffer pointer to the area start and update variables, Y-axis only.
*
* @param[in,out] area Area to be updated
* @param[in,out] buf Pointer to be updated
* @param[in] stridePx Buffer stride in pixels
*/
static void _align_y(lv_area_t * area, lv_color_t ** buf, uint32_t stridePx);
/**
* Move buffer pointer to the area start and update variables, Y-axis only.
*
* @param[in/out] area Area to be updated
* @param[in/out] buf Pointer to be updated
* @param[in] stride Buffer stride in pixels
*/
static void align_y(lv_area_t * area, lv_color_t ** buf, lv_coord_t stride);
/**
* Software BLIT as a fall-back scenario.
*
* @param[in] blit BLIT configuration
*/
static void _sw_blit(lv_gpu_nxp_vglite_blit_info_t * blit);
/**
* Verify BLIT structure - widths, stride, pointer alignment
*
* @param[in] blit BLIT configuration
* @retval LV_RES_OK
* @retval LV_RES_INV Error occurred (\see LV_GPU_NXP_VG_LITE_LOG_ERRORS)
*/
static lv_res_t _lv_gpu_nxp_vglite_check_blit(lv_gpu_nxp_vglite_blit_info_t * blit);
/**
* BLock Image Transfer - split BLIT.
*
* @param[in] blit BLIT configuration
* @retval LV_RES_OK Transfer complete
* @retval LV_RES_INV Error occurred (\see LV_GPU_NXP_VG_LITE_LOG_ERRORS)
*/
static lv_res_t _lv_gpu_nxp_vglite_blit_split(lv_gpu_nxp_vglite_blit_info_t * blit);
/**
* Blit image split in tiles, with optional opacity.
*
* @param[in/out] dest_buf Destination buffer
* @param[in] dest_area Area with relative coordinates of destination buffer
* @param[in] dest_stride Stride of destination buffer in pixels
* @param[in] src_buf Source buffer
* @param[in] src_area Source area with relative coordinates of source buffer
* @param[in] src_stride Stride of source buffer in pixels
* @param[in] opa Opacity
*
* @retval LV_RES_OK Transfer complete
* @retval LV_RES_INV Error occurred (\see LV_GPU_NXP_VG_LITE_LOG_ERRORS)
*/
static lv_res_t lv_vglite_blit_split(lv_color_t * dest_buf, lv_area_t * dest_area, lv_coord_t dest_stride,
const lv_color_t * src_buf, lv_area_t * src_area, lv_coord_t src_stride,
lv_opa_t opa);
#endif
/**********************
* STATIC VARIABLES
**********************/
static vg_lite_matrix_t vgmatrix;
/**********************
* MACROS
**********************/
@@ -132,98 +164,57 @@ static lv_res_t _lv_gpu_nxp_vglite_blit_single(lv_gpu_nxp_vglite_blit_info_t * b
* GLOBAL FUNCTIONS
**********************/
lv_res_t lv_gpu_nxp_vglite_fill(lv_color_t * dest_buf, lv_coord_t dest_width, lv_coord_t dest_height,
const lv_area_t * fill_area, lv_color_t color, lv_opa_t opa)
lv_res_t lv_gpu_nxp_vglite_fill(const lv_area_t * dest_area, lv_color_t color, lv_opa_t opa)
{
uint32_t area_size = lv_area_get_size(fill_area);
lv_coord_t area_w = lv_area_get_width(fill_area);
lv_coord_t area_h = lv_area_get_height(fill_area);
if(opa >= (lv_opa_t)LV_OPA_MAX) {
if(area_size < LV_GPU_NXP_VG_LITE_FILL_SIZE_LIMIT)
VG_LITE_RETURN_INV("Area size %d smaller than limit %d.", area_size, LV_GPU_NXP_VG_LITE_FILL_SIZE_LIMIT);
}
else {
if(area_size < LV_GPU_NXP_VG_LITE_FILL_OPA_SIZE_LIMIT)
VG_LITE_RETURN_INV("Area size %d smaller than limit %d.", area_size, LV_GPU_NXP_VG_LITE_FILL_OPA_SIZE_LIMIT);
}
vg_lite_buffer_t vgbuf;
vg_lite_rectangle_t rect;
vg_lite_error_t err = VG_LITE_SUCCESS;
lv_color32_t col32 = {.full = lv_color_to32(color)}; /*Convert color to RGBA8888*/
vg_lite_color_t vgcol; /* vglite takes ABGR */
vg_lite_buffer_t * vgbuf = lv_vglite_get_dest_buf();
if(lv_vglite_init_buf(&vgbuf, (uint32_t)dest_width, (uint32_t)dest_height, (uint32_t)dest_width * sizeof(lv_color_t),
(const lv_color_t *)dest_buf, false) != LV_RES_OK)
VG_LITE_RETURN_INV("Init buffer failed.");
vg_lite_buffer_format_t color_format = LV_COLOR_DEPTH == 16 ? VG_LITE_BGRA8888 : VG_LITE_ABGR8888;
if(lv_vglite_premult_and_swizzle(&vgcol, col32, opa, color_format) != LV_RES_OK)
VG_LITE_RETURN_INV("Premultiplication and swizzle failed.");
if(opa >= (lv_opa_t)LV_OPA_MAX) { /*Opaque fill*/
rect.x = fill_area->x1;
rect.y = fill_area->y1;
rect.width = area_w;
rect.height = area_h;
vg_lite_rectangle_t rect = {
.x = dest_area->x1,
.y = dest_area->y1,
.width = lv_area_get_width(dest_area),
.height = lv_area_get_height(dest_area)
};
/*Clean & invalidate cache*/
lv_vglite_invalidate_cache();
#if LV_COLOR_DEPTH==16
vgcol = col32.full;
#else /*LV_COLOR_DEPTH==32*/
vgcol = ((uint32_t)col32.ch.alpha << 24) | ((uint32_t)col32.ch.blue << 16) | ((uint32_t)col32.ch.green << 8) |
(uint32_t)col32.ch.red;
#endif
err = vg_lite_clear(&vgbuf, &rect, vgcol);
err = vg_lite_clear(vgbuf, &rect, vgcol);
VG_LITE_ERR_RETURN_INV(err, "Clear failed.");
err = vg_lite_finish();
VG_LITE_ERR_RETURN_INV(err, "Finish failed.");
if(lv_vglite_run() != LV_RES_OK)
VG_LITE_RETURN_INV("Run failed.");
}
else { /*fill with transparency*/
vg_lite_path_t path;
int32_t path_data[] = { /*VG rectangular path*/
VLC_OP_MOVE, fill_area->x1, fill_area->y1,
VLC_OP_LINE, fill_area->x2 + 1, fill_area->y1,
VLC_OP_LINE, fill_area->x2 + 1, fill_area->y2 + 1,
VLC_OP_LINE, fill_area->x1, fill_area->y2 + 1,
VLC_OP_LINE, fill_area->x1, fill_area->y1,
VLC_OP_MOVE, dest_area->x1, dest_area->y1,
VLC_OP_LINE, dest_area->x2 + 1, dest_area->y1,
VLC_OP_LINE, dest_area->x2 + 1, dest_area->y2 + 1,
VLC_OP_LINE, dest_area->x1, dest_area->y2 + 1,
VLC_OP_LINE, dest_area->x1, dest_area->y1,
VLC_OP_END
};
err = vg_lite_init_path(&path, VG_LITE_S32, VG_LITE_LOW, sizeof(path_data), path_data,
(vg_lite_float_t) fill_area->x1, (vg_lite_float_t) fill_area->y1,
((vg_lite_float_t) fill_area->x2) + 1.0f, ((vg_lite_float_t) fill_area->y2) + 1.0f);
(vg_lite_float_t) dest_area->x1, (vg_lite_float_t) dest_area->y1,
((vg_lite_float_t) dest_area->x2) + 1.0f, ((vg_lite_float_t) dest_area->y2) + 1.0f);
VG_LITE_ERR_RETURN_INV(err, "Init path failed.");
/* Only pre-multiply color if hardware pre-multiplication is not present */
if(!vg_lite_query_feature(gcFEATURE_BIT_VG_PE_PREMULTIPLY)) {
col32.ch.red = (uint8_t)(((uint16_t)col32.ch.red * opa) >> 8);
col32.ch.green = (uint8_t)(((uint16_t)col32.ch.green * opa) >> 8);
col32.ch.blue = (uint8_t)(((uint16_t)col32.ch.blue * opa) >> 8);
}
col32.ch.alpha = opa;
#if LV_COLOR_DEPTH==16
vgcol = col32.full;
#else /*LV_COLOR_DEPTH==32*/
vgcol = ((uint32_t)col32.ch.alpha << 24) | ((uint32_t)col32.ch.blue << 16) | ((uint32_t)col32.ch.green << 8) |
(uint32_t)col32.ch.red;
#endif
/*Clean & invalidate cache*/
lv_vglite_invalidate_cache();
vg_lite_matrix_t matrix;
vg_lite_identity(&matrix);
/*Draw rectangle*/
err = vg_lite_draw(&vgbuf, &path, VG_LITE_FILL_EVEN_ODD, &matrix, VG_LITE_BLEND_SRC_OVER, vgcol);
err = vg_lite_draw(vgbuf, &path, VG_LITE_FILL_EVEN_ODD, &matrix, VG_LITE_BLEND_SRC_OVER, vgcol);
VG_LITE_ERR_RETURN_INV(err, "Draw rectangle failed.");
err = vg_lite_finish();
VG_LITE_ERR_RETURN_INV(err, "Finish failed.");
if(lv_vglite_run() != LV_RES_OK)
VG_LITE_RETURN_INV("Run failed.");
err = vg_lite_clear_path(&path);
VG_LITE_ERR_RETURN_INV(err, "Clear path failed.");
@@ -232,41 +223,64 @@ lv_res_t lv_gpu_nxp_vglite_fill(lv_color_t * dest_buf, lv_coord_t dest_width, lv
return LV_RES_OK;
}
lv_res_t lv_gpu_nxp_vglite_blit(lv_gpu_nxp_vglite_blit_info_t * blit)
lv_res_t lv_gpu_nxp_vglite_blit(lv_color_t * dest_buf, lv_area_t * dest_area, lv_coord_t dest_stride,
const lv_color_t * src_buf, lv_area_t * src_area, lv_coord_t src_stride,
lv_opa_t opa)
{
uint32_t dest_size = lv_area_get_size(&blit->dst_area);
/* Set vgmatrix. */
lv_vglite_set_translation_matrix(dest_area);
if(blit->opa >= (lv_opa_t)LV_OPA_MAX) {
if(dest_size < LV_GPU_NXP_VG_LITE_BLIT_SIZE_LIMIT)
VG_LITE_RETURN_INV("Area size %d smaller than limit %d.", dest_size, LV_GPU_NXP_VG_LITE_BLIT_SIZE_LIMIT);
}
else {
if(dest_size < LV_GPU_NXP_VG_LITE_BLIT_OPA_SIZE_LIMIT)
VG_LITE_RETURN_INV("Area size %d smaller than limit %d.", dest_size, LV_GPU_NXP_VG_LITE_BLIT_OPA_SIZE_LIMIT);
}
/* Set src_vgbuf structure. */
lv_vglite_set_src_buf(src_buf, src_area, src_stride);
#if VG_LITE_BLIT_SPLIT_ENABLED
return _lv_gpu_nxp_vglite_blit_split(blit);
#endif /* non RT595 */
lv_color_t * orig_dest_buf = dest_buf;
/* Just pass down */
return _lv_gpu_nxp_vglite_blit_single(blit);
lv_res_t rv = lv_vglite_blit_split(dest_buf, dest_area, dest_stride, src_buf, src_area, src_stride, opa);
/* Restore the original dest_vgbuf memory address. */
lv_vglite_set_dest_buf_ptr(orig_dest_buf);
return rv;
#else
LV_UNUSED(dest_buf);
LV_UNUSED(dest_stride);
if(check_src_alignment(src_buf, src_stride) != LV_RES_OK)
VG_LITE_RETURN_INV("Check src alignment failed.");
return lv_vglite_blit_single(dest_area, src_area, opa);
#endif
}
lv_res_t lv_gpu_nxp_vglite_blit_transform(lv_gpu_nxp_vglite_blit_info_t * blit)
lv_res_t lv_gpu_nxp_vglite_blit_transform(lv_color_t * dest_buf, lv_area_t * dest_area, lv_coord_t dest_stride,
const lv_color_t * src_buf, lv_area_t * src_area, lv_coord_t src_stride,
const lv_draw_img_dsc_t * dsc)
{
uint32_t dest_size = lv_area_get_size(&blit->dst_area);
/* Set vgmatrix. */
lv_vglite_set_transformation_matrix(dest_area, dsc);
if(blit->opa >= (lv_opa_t)LV_OPA_MAX) {
if(dest_size < LV_GPU_NXP_VG_LITE_BLIT_SIZE_LIMIT)
VG_LITE_RETURN_INV("Area size %d smaller than limit %d.", dest_size, LV_GPU_NXP_VG_LITE_BLIT_SIZE_LIMIT);
}
else {
if(dest_size < LV_GPU_NXP_VG_LITE_BLIT_OPA_SIZE_LIMIT)
VG_LITE_RETURN_INV("Area size %d smaller than limit %d.", dest_size, LV_GPU_NXP_VG_LITE_BLIT_OPA_SIZE_LIMIT);
}
/* Set src_vgbuf structure. */
lv_vglite_set_src_buf(src_buf, src_area, src_stride);
return _lv_gpu_nxp_vglite_blit_single(blit);
#if VG_LITE_BLIT_SPLIT_ENABLED
lv_color_t * orig_dest_buf = dest_buf;
lv_res_t rv = lv_vglite_blit_split(dest_buf, dest_area, dest_stride, src_buf, src_area, src_stride, dsc->opa);
/* Restore the original dest_vgbuf memory address. */
lv_vglite_set_dest_buf_ptr(orig_dest_buf);
return rv;
#else
LV_UNUSED(dest_buf);
LV_UNUSED(dest_stride);
if(check_src_alignment(src_buf, src_stride) != LV_RES_OK)
VG_LITE_RETURN_INV("Check src alignment failed.");
return lv_vglite_blit_single(dest_area, src_area, dsc->opa);
#endif
}
/**********************
@@ -274,223 +288,196 @@ lv_res_t lv_gpu_nxp_vglite_blit_transform(lv_gpu_nxp_vglite_blit_info_t * blit)
**********************/
#if VG_LITE_BLIT_SPLIT_ENABLED
static lv_res_t _lv_gpu_nxp_vglite_blit_split(lv_gpu_nxp_vglite_blit_info_t * blit)
static lv_res_t lv_vglite_blit_split(lv_color_t * dest_buf, lv_area_t * dest_area, lv_coord_t dest_stride,
const lv_color_t * src_buf, lv_area_t * src_area, lv_coord_t src_stride,
lv_opa_t opa)
{
lv_res_t rv = LV_RES_INV;
if(_lv_gpu_nxp_vglite_check_blit(blit) != LV_RES_OK) {
PRINT_BLT("Blit check failed\n");
return LV_RES_INV;
}
VG_LITE_LOG_TRACE("Blit "
"Area: ([%d,%d], [%d,%d]) -> ([%d,%d], [%d,%d]) | "
"Size: ([%dx%d] -> [%dx%d]) | "
"Addr: (0x%x -> 0x%x)",
src_area->x1, src_area->y1, src_area->x2, src_area->y2,
dest_area->x1, dest_area->y1, dest_area->x2, dest_area->y2,
lv_area_get_width(src_area), lv_area_get_height(src_area),
lv_area_get_width(dest_area), lv_area_get_height(dest_area),
(uintptr_t)src_buf, (uintptr_t)dest_buf);
PRINT_BLT("BLIT from: "
"Area: %03d,%03d - %03d,%03d "
"Addr: %d\n\n",
blit->src_area.x1, blit->src_area.y1,
blit->src_area.x2, blit->src_area.y2,
(uintptr_t) blit->src);
PRINT_BLT("BLIT to: "
"Area: %03d,%03d - %03d,%03d "
"Addr: %d\n\n",
blit->dst_area.x1, blit->dst_area.y1,
blit->dst_area.x2, blit->dst_area.y2,
(uintptr_t) blit->src);
/* Stage 1: Move starting pointers as close as possible to [x1, y1], so coordinates are as small as possible. */
_align_x(&blit->src_area, (lv_color_t **)&blit->src);
_align_y(&blit->src_area, (lv_color_t **)&blit->src, blit->src_stride / sizeof(lv_color_t));
_align_x(&blit->dst_area, (lv_color_t **)&blit->dst);
_align_y(&blit->dst_area, (lv_color_t **)&blit->dst, blit->dst_stride / sizeof(lv_color_t));
/* Stage 1: Move starting pointers as close as possible to [x1, y1], so coordinates are as small as possible. */
align_x(src_area, (lv_color_t **)&src_buf);
align_y(src_area, (lv_color_t **)&src_buf, src_stride);
align_x(dest_area, (lv_color_t **)&dest_buf);
align_y(dest_area, (lv_color_t **)&dest_buf, dest_stride);
/* Stage 2: If we're in limit, do a single BLIT */
if((blit->src_area.x2 < LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR) &&
(blit->src_area.y2 < LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR)) {
PRINT_BLT("Simple blit!\n");
return _lv_gpu_nxp_vglite_blit_single(blit);
if((src_area->x2 < LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR) &&
(src_area->y2 < LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR)) {
if(check_src_alignment(src_buf, src_stride) != LV_RES_OK)
VG_LITE_RETURN_INV("Check src alignment failed.");
/* Set new dest_vgbuf and src_vgbuf memory addresses. */
lv_vglite_set_dest_buf_ptr(dest_buf);
lv_vglite_set_src_buf_ptr(src_buf);
/* Set vgmatrix. */
lv_vglite_set_translation_matrix(dest_area);
rv = lv_vglite_blit_single(dest_area, src_area, opa);
VG_LITE_LOG_TRACE("Single "
"Area: ([%d,%d], [%d,%d]) -> ([%d,%d], [%d,%d]) | "
"Size: ([%dx%d] -> [%dx%d]) | "
"Addr: (0x%x -> 0x%x) %s",
src_area->x1, src_area->y1, src_area->x2, src_area->y2,
dest_area->x1, dest_area->y1, dest_area->x2, dest_area->y2,
lv_area_get_width(src_area), lv_area_get_height(src_area),
lv_area_get_width(dest_area), lv_area_get_height(dest_area),
(uintptr_t)src_buf, (uintptr_t)dest_buf,
rv == LV_RES_OK ? "OK!" : "FAILED!");
return rv;
};
/* Stage 3: Split the BLIT into multiple tiles */
PRINT_BLT("Split blit!\n");
PRINT_BLT("Blit "
"([%03d,%03d], [%03d,%03d]) -> "
"([%03d,%03d], [%03d,%03d]) | "
"([%03dx%03d] -> [%03dx%03d]) | "
"A:(%d -> %d)\n",
blit->src_area.x1, blit->src_area.y1, blit->src_area.x2, blit->src_area.y2,
blit->dst_area.x1, blit->dst_area.y1, blit->dst_area.x2, blit->dst_area.y2,
lv_area_get_width(&blit->src_area), lv_area_get_height(&blit->src_area),
lv_area_get_width(&blit->dst_area), lv_area_get_height(&blit->dst_area),
(uintptr_t) blit->src, (uintptr_t) blit->dst);
VG_LITE_LOG_TRACE("Split "
"Area: ([%d,%d], [%d,%d]) -> ([%d,%d], [%d,%d]) | "
"Size: ([%dx%d] -> [%dx%d]) | "
"Addr: (0x%x -> 0x%x)",
src_area->x1, src_area->y1, src_area->x2, src_area->y2,
dest_area->x1, dest_area->y1, dest_area->x2, dest_area->y2,
lv_area_get_width(src_area), lv_area_get_height(src_area),
lv_area_get_width(dest_area), lv_area_get_height(dest_area),
(uintptr_t)src_buf, (uintptr_t)dest_buf);
lv_coord_t totalWidth = lv_area_get_width(&blit->src_area);
lv_coord_t totalHeight = lv_area_get_height(&blit->src_area);
lv_gpu_nxp_vglite_blit_info_t tileBlit;
lv_coord_t width = lv_area_get_width(src_area);
lv_coord_t height = lv_area_get_height(src_area);
/* Number of tiles needed */
int totalTilesX = (blit->src_area.x1 + totalWidth + LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR - 1) /
LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR;
int totalTilesY = (blit->src_area.y1 + totalHeight + LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR - 1) /
LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR;
int total_tiles_x = (src_area->x1 + width + LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR - 1) /
LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR;
int total_tiles_y = (src_area->y1 + height + LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR - 1) /
LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR;
/* src and dst buffer shift against each other. Src buffer real data [0,0] may start actually at [3,0] in buffer, as
* the buffer pointer has to be aligned, while dst buffer real data [0,0] may start at [1,0] in buffer. alignment may be
* different */
int shiftSrcX = (blit->src_area.x1 > blit->dst_area.x1) ? (blit->src_area.x1 - blit->dst_area.x1) : 0;
int shiftDstX = (blit->src_area.x1 < blit->dst_area.x1) ? (blit->dst_area.x1 - blit->src_area.x1) : 0;
int shift_src_x = (src_area->x1 > dest_area->x1) ? (src_area->x1 - dest_area->x1) : 0;
int shift_dest_x = (src_area->x1 < dest_area->x1) ? (dest_area->x1 - src_area->x1) : 0;
PRINT_BLT("\n");
PRINT_BLT("Align shift: src: %d, dst: %d\n", shiftSrcX, shiftDstX);
VG_LITE_LOG_TRACE("X shift: src: %d, dst: %d", shift_src_x, shift_dest_x);
tileBlit = *blit;
lv_color_t * tile_dest_buf;
lv_area_t tile_dest_area;
const lv_color_t * tile_src_buf;
lv_area_t tile_src_area;
for(int tileY = 0; tileY < totalTilesY; tileY++) {
for(int y = 0; y < total_tiles_y; y++) {
tileBlit.src_area.y1 = 0; /* no vertical alignment, always start from 0 */
tileBlit.src_area.y2 = totalHeight - tileY * LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR - 1;
if(tileBlit.src_area.y2 >= LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR) {
tileBlit.src_area.y2 = LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR - 1; /* Should never happen */
tile_src_area.y1 = 0; /* no vertical alignment, always start from 0 */
tile_src_area.y2 = height - y * LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR - 1;
if(tile_src_area.y2 >= LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR) {
tile_src_area.y2 = LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR - 1; /* Should never happen */
}
tileBlit.src = blit->src + tileY * LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR * blit->src_stride / sizeof(
lv_color_t); /* stride in px! */
tile_src_buf = src_buf + y * LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR * src_stride;
tileBlit.dst_area.y1 = tileBlit.src_area.y1; /* y has no alignment, always in sync with src */
tileBlit.dst_area.y2 = tileBlit.src_area.y2;
tile_dest_area.y1 = tile_src_area.y1; /* y has no alignment, always in sync with src */
tile_dest_area.y2 = tile_src_area.y2;
tileBlit.dst = blit->dst + tileY * LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR * blit->dst_stride / sizeof(
lv_color_t); /* stride in px! */
tile_dest_buf = dest_buf + y * LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR * dest_stride;
for(int tileX = 0; tileX < totalTilesX; tileX++) {
for(int x = 0; x < total_tiles_x; x++) {
if(tileX == 0) {
if(x == 0) {
/* 1st tile is special - there may be a gap between buffer start pointer
* and area.x1 value, as the pointer has to be aligned.
* tileBlit.src pointer - keep init value from Y-loop.
* tile_src_buf pointer - keep init value from Y-loop.
* Also, 1st tile start is not shifted! shift is applied from 2nd tile */
tileBlit.src_area.x1 = blit->src_area.x1;
tileBlit.dst_area.x1 = blit->dst_area.x1;
tile_src_area.x1 = src_area->x1;
tile_dest_area.x1 = dest_area->x1;
}
else {
/* subsequent tiles always starts from 0, but shifted*/
tileBlit.src_area.x1 = 0 + shiftSrcX;
tileBlit.dst_area.x1 = 0 + shiftDstX;
tile_src_area.x1 = 0 + shift_src_x;
tile_dest_area.x1 = 0 + shift_dest_x;
/* and advance start pointer + 1 tile size */
tileBlit.src += LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR;
tileBlit.dst += LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR;
tile_src_buf += LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR;
tile_dest_buf += LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR;
}
/* Clip tile end coordinates */
tileBlit.src_area.x2 = totalWidth + blit->src_area.x1 - tileX * LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR - 1;
if(tileBlit.src_area.x2 >= LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR) {
tileBlit.src_area.x2 = LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR - 1;
tile_src_area.x2 = width + src_area->x1 - x * LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR - 1;
if(tile_src_area.x2 >= LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR) {
tile_src_area.x2 = LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR - 1;
}
tileBlit.dst_area.x2 = totalWidth + blit->dst_area.x1 - tileX * LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR - 1;
if(tileBlit.dst_area.x2 >= LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR) {
tileBlit.dst_area.x2 = LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR - 1;
tile_dest_area.x2 = width + dest_area->x1 - x * LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR - 1;
if(tile_dest_area.x2 >= LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR) {
tile_dest_area.x2 = LV_GPU_NXP_VG_LITE_BLIT_SPLIT_THR - 1;
}
if(tileX < (totalTilesX - 1)) {
/* And adjust end coords if shifted, but not for last tile! */
tileBlit.src_area.x2 += shiftSrcX;
tileBlit.dst_area.x2 += shiftDstX;
if(x < (total_tiles_x - 1)) {
/* And adjust end coords if shifted, but not for last tile! */
tile_src_area.x2 += shift_src_x;
tile_dest_area.x2 += shift_dest_x;
}
rv = _lv_gpu_nxp_vglite_blit_single(&tileBlit);
if(check_src_alignment(tile_src_buf, src_stride) != LV_RES_OK)
VG_LITE_RETURN_INV("Check src alignment failed.");
#if BLIT_DBG_AREAS
lv_vglite_dbg_draw_rectangle((lv_color_t *) tileBlit.dst, tileBlit.dst_width, tileBlit.dst_height, &tileBlit.dst_area,
LV_COLOR_RED);
lv_vglite_dbg_draw_rectangle((lv_color_t *) tileBlit.src, tileBlit.src_width, tileBlit.src_height, &tileBlit.src_area,
LV_COLOR_GREEN);
#endif
/* Set vgmatrix. */
lv_vglite_set_translation_matrix(&tile_dest_area);
PRINT_BLT("Tile [%d, %d]: "
"([%d,%d], [%d,%d]) -> "
"([%d,%d], [%d,%d]) | "
"([%dx%d] -> [%dx%d]) | "
"A:(0x%8X -> 0x%8X) %s\n",
tileX, tileY,
tileBlit.src_area.x1, tileBlit.src_area.y1, tileBlit.src_area.x2, tileBlit.src_area.y2,
tileBlit.dst_area.x1, tileBlit.dst_area.y1, tileBlit.dst_area.x2, tileBlit.dst_area.y2,
lv_area_get_width(&tileBlit.src_area), lv_area_get_height(&tileBlit.src_area),
lv_area_get_width(&tileBlit.dst_area), lv_area_get_height(&tileBlit.dst_area),
(uintptr_t) tileBlit.src, (uintptr_t) tileBlit.dst,
rv == LV_RES_OK ? "OK!" : "!!! FAILED !!!");
/* Set new dest_vgbuf and src_vgbuf memory addresses. */
lv_vglite_set_dest_buf_ptr(tile_dest_buf);
lv_vglite_set_src_buf_ptr(tile_src_buf);
if(rv != LV_RES_OK) { /* if anything goes wrong... */
#if LV_GPU_NXP_VG_LITE_LOG_ERRORS
LV_LOG_ERROR("Split blit failed. Trying SW blit instead.");
#endif
_sw_blit(&tileBlit);
rv = LV_RES_OK; /* Don't report error, as SW BLIT was performed */
rv = lv_vglite_blit_single(&tile_dest_area, &tile_src_area, opa);
VG_LITE_LOG_TRACE("Tile [%d, %d] "
"Area: ([%d,%d], [%d,%d]) -> ([%d,%d], [%d,%d]) | "
"Size: ([%dx%d] -> [%dx%d]) | "
"Addr: (0x%x -> 0x%x) %s",
x, y,
tile_src_area.x1, tile_src_area.y1, tile_src_area.x2, tile_src_area.y2,
tile_dest_area.x1, tile_dest_area.y1, tile_dest_area.x2, tile_dest_area.y2,
lv_area_get_width(&tile_src_area), lv_area_get_height(&tile_src_area),
lv_area_get_width(&tile_dest_area), lv_area_get_height(&tile_dest_area),
(uintptr_t)tile_src_buf, (uintptr_t)tile_dest_buf,
rv == LV_RES_OK ? "OK!" : "FAILED!");
if(rv != LV_RES_OK) {
return rv;
}
}
PRINT_BLT(" \n");
}
return rv; /* should never fail */
return rv;
}
#endif /* VG_LITE_BLIT_SPLIT_ENABLED */
static lv_res_t _lv_gpu_nxp_vglite_blit_single(lv_gpu_nxp_vglite_blit_info_t * blit)
static lv_res_t lv_vglite_blit_single(const lv_area_t * dest_area, const lv_area_t * src_area, lv_opa_t opa)
{
vg_lite_buffer_t src_vgbuf, dst_vgbuf;
vg_lite_error_t err = VG_LITE_SUCCESS;
uint32_t rect[4];
vg_lite_float_t scale = 1.0;
vg_lite_buffer_t * dst_vgbuf = lv_vglite_get_dest_buf();
vg_lite_buffer_t * src_vgbuf = lv_vglite_get_src_buf();
if(blit == NULL) {
/*Wrong parameter*/
return LV_RES_INV;
}
if(blit->opa < (lv_opa_t) LV_OPA_MIN) {
return LV_RES_OK; /*Nothing to BLIT*/
}
/*Wrap src/dst buffer into VG-Lite buffer*/
if(lv_vglite_init_buf(&src_vgbuf, (uint32_t)blit->src_width, (uint32_t)blit->src_height, (uint32_t)blit->src_stride,
blit->src, true) != LV_RES_OK)
VG_LITE_RETURN_INV("Init buffer failed.");
if(lv_vglite_init_buf(&dst_vgbuf, (uint32_t)blit->dst_width, (uint32_t)blit->dst_height, (uint32_t)blit->dst_stride,
blit->dst, false) != LV_RES_OK)
VG_LITE_RETURN_INV("Init buffer failed.");
rect[0] = (uint32_t)blit->src_area.x1; /* start x */
rect[1] = (uint32_t)blit->src_area.y1; /* start y */
rect[2] = (uint32_t)blit->src_area.x2 - (uint32_t)blit->src_area.x1 + 1U; /* width */
rect[3] = (uint32_t)blit->src_area.y2 - (uint32_t)blit->src_area.y1 + 1U; /* height */
vg_lite_matrix_t matrix;
vg_lite_identity(&matrix);
vg_lite_translate((vg_lite_float_t)blit->dst_area.x1, (vg_lite_float_t)blit->dst_area.y1, &matrix);
if((blit->angle != 0) || (blit->zoom != LV_IMG_ZOOM_NONE)) {
vg_lite_translate(blit->pivot.x, blit->pivot.y, &matrix);
vg_lite_rotate(blit->angle / 10.0f, &matrix); /* angle is 1/10 degree */
scale = 1.0f * blit->zoom / LV_IMG_ZOOM_NONE;
vg_lite_scale(scale, scale, &matrix);
vg_lite_translate(0.0f - blit->pivot.x, 0.0f - blit->pivot.y, &matrix);
}
/*Clean & invalidate cache*/
lv_vglite_invalidate_cache();
uint32_t rect[] = {
(uint32_t)src_area->x1, /* start x */
(uint32_t)src_area->y1, /* start y */
(uint32_t)lv_area_get_width(src_area), /* width */
(uint32_t)lv_area_get_height(src_area) /* height */
};
uint32_t color;
vg_lite_blend_t blend;
if(blit->opa >= (lv_opa_t)LV_OPA_MAX) {
if(opa >= (lv_opa_t)LV_OPA_MAX) {
color = 0xFFFFFFFFU;
blend = VG_LITE_BLEND_SRC_OVER;
src_vgbuf.transparency_mode = VG_LITE_IMAGE_TRANSPARENT;
src_vgbuf->transparency_mode = VG_LITE_IMAGE_TRANSPARENT;
}
else {
uint32_t opa = (uint32_t)blit->opa;
if(vg_lite_query_feature(gcFEATURE_BIT_VG_PE_PREMULTIPLY)) {
color = (opa << 24) | 0x00FFFFFFU;
}
@@ -498,94 +485,83 @@ static lv_res_t _lv_gpu_nxp_vglite_blit_single(lv_gpu_nxp_vglite_blit_info_t * b
color = (opa << 24) | (opa << 16) | (opa << 8) | opa;
}
blend = VG_LITE_BLEND_SRC_OVER;
src_vgbuf.image_mode = VG_LITE_MULTIPLY_IMAGE_MODE;
src_vgbuf.transparency_mode = VG_LITE_IMAGE_TRANSPARENT;
src_vgbuf->image_mode = VG_LITE_MULTIPLY_IMAGE_MODE;
src_vgbuf->transparency_mode = VG_LITE_IMAGE_TRANSPARENT;
}
err = vg_lite_blit_rect(&dst_vgbuf, &src_vgbuf, rect, &matrix, blend, color, VG_LITE_FILTER_POINT);
VG_LITE_ERR_RETURN_INV(err, "Blit rectangle failed.");
bool scissoring = lv_area_get_width(dest_area) < lv_area_get_width(src_area) ||
lv_area_get_height(dest_area) < lv_area_get_height(src_area);
if(scissoring) {
vg_lite_enable_scissor();
vg_lite_set_scissor((int32_t)dest_area->x1, (int32_t)dest_area->y1,
(int32_t)lv_area_get_width(dest_area),
(int32_t)lv_area_get_height(dest_area));
}
err = vg_lite_finish();
VG_LITE_ERR_RETURN_INV(err, "Finish failed.");
err = vg_lite_blit_rect(dst_vgbuf, src_vgbuf, rect, &vgmatrix, blend, color, VG_LITE_FILTER_POINT);
if(err != VG_LITE_SUCCESS) {
if(scissoring)
vg_lite_disable_scissor();
VG_LITE_RETURN_INV("Blit rectangle failed.");
}
if(lv_vglite_run() != LV_RES_OK) {
if(scissoring)
vg_lite_disable_scissor();
VG_LITE_RETURN_INV("Run failed.");
}
if(scissoring)
vg_lite_disable_scissor();
return LV_RES_OK;
}
static lv_res_t check_src_alignment(const lv_color_t * src_buf, lv_coord_t src_stride)
{
/* No alignment requirement for destination pixel buffer when using mode VG_LITE_LINEAR */
/* Test for pointer alignment */
if((((uintptr_t)src_buf) % (uintptr_t)LV_ATTRIBUTE_MEM_ALIGN_SIZE) != (uintptr_t)0x0U)
VG_LITE_RETURN_INV("Src buffer ptr (0x%x) not aligned to 0x%x bytes.",
(size_t)src_buf, LV_ATTRIBUTE_MEM_ALIGN_SIZE);
/* Test for stride alignment */
if((src_stride % (lv_coord_t)LV_GPU_NXP_VG_LITE_STRIDE_ALIGN_PX) != 0x0U)
VG_LITE_RETURN_INV("Src buffer stride (%d px) not aligned to %d px.",
src_stride, LV_GPU_NXP_VG_LITE_STRIDE_ALIGN_PX);
return LV_RES_OK;
}
static inline void lv_vglite_set_translation_matrix(const lv_area_t * dest_area)
{
vg_lite_identity(&vgmatrix);
vg_lite_translate((vg_lite_float_t)dest_area->x1, (vg_lite_float_t)dest_area->y1, &vgmatrix);
}
static inline void lv_vglite_set_transformation_matrix(const lv_area_t * dest_area, const lv_draw_img_dsc_t * dsc)
{
lv_vglite_set_translation_matrix(dest_area);
bool has_scale = (dsc->zoom != LV_IMG_ZOOM_NONE);
bool has_rotation = (dsc->angle != 0);
if(has_scale || has_rotation) {
vg_lite_translate(dsc->pivot.x, dsc->pivot.y, &vgmatrix);
if(has_rotation)
vg_lite_rotate(dsc->angle / 10.0f, &vgmatrix); /* angle is 1/10 degree */
if(has_scale) {
vg_lite_float_t scale = 1.0f * dsc->zoom / LV_IMG_ZOOM_NONE;
vg_lite_scale(scale, scale, &vgmatrix);
}
vg_lite_translate(0.0f - dsc->pivot.x, 0.0f - dsc->pivot.y, &vgmatrix);
}
}
#if VG_LITE_BLIT_SPLIT_ENABLED
static void _sw_blit(lv_gpu_nxp_vglite_blit_info_t * blit)
static void align_x(lv_area_t * area, lv_color_t ** buf)
{
int x, y;
lv_coord_t w = lv_area_get_width(&blit->src_area);
lv_coord_t h = lv_area_get_height(&blit->src_area);
int32_t srcStridePx = blit->src_stride / (int32_t)sizeof(lv_color_t);
int32_t dstStridePx = blit->dst_stride / (int32_t)sizeof(lv_color_t);
lv_color_t * src = (lv_color_t *)blit->src + blit->src_area.y1 * srcStridePx + blit->src_area.x1;
lv_color_t * dst = (lv_color_t *)blit->dst + blit->dst_area.y1 * dstStridePx + blit->dst_area.x1;
if(blit->opa >= (lv_opa_t)LV_OPA_MAX) {
/* simple copy */
for(y = 0; y < h; y++) {
lv_memcpy(dst, src, (uint32_t)w * sizeof(lv_color_t));
src += srcStridePx;
dst += dstStridePx;
}
}
else if(blit->opa >= LV_OPA_MIN) {
/* alpha blending */
for(y = 0; y < h; y++) {
for(x = 0; x < w; x++) {
dst[x] = lv_color_mix(src[x], dst[x], blit->opa);
}
src += srcStridePx;
dst += dstStridePx;
}
}
}
static lv_res_t _lv_gpu_nxp_vglite_check_blit(lv_gpu_nxp_vglite_blit_info_t * blit)
{
/* Test for minimal width */
if(lv_area_get_width(&blit->src_area) < (lv_coord_t)LV_GPU_NXP_VG_LITE_STRIDE_ALIGN_PX)
VG_LITE_RETURN_INV("Src area width (%d) is smaller than required (%d).", lv_area_get_width(&blit->src_area),
LV_GPU_NXP_VG_LITE_STRIDE_ALIGN_PX);
/* Test for minimal width */
if(lv_area_get_width(&blit->dst_area) < (lv_coord_t)LV_GPU_NXP_VG_LITE_STRIDE_ALIGN_PX)
VG_LITE_RETURN_INV("Dest area width (%d) is smaller than required (%d).", lv_area_get_width(&blit->dst_area),
LV_GPU_NXP_VG_LITE_STRIDE_ALIGN_PX);
/* Test for pointer alignment */
if((((uintptr_t) blit->src) % LV_ATTRIBUTE_MEM_ALIGN_SIZE) != 0x0)
VG_LITE_RETURN_INV("Src buffer ptr (0x%X) not aligned to %d.", (size_t) blit->src, LV_ATTRIBUTE_MEM_ALIGN_SIZE);
/* No alignment requirement for destination pixel buffer when using mode VG_LITE_LINEAR */
/* Test for stride alignment */
if((blit->src_stride % (LV_GPU_NXP_VG_LITE_STRIDE_ALIGN_PX * LV_COLOR_DEPTH / 8)) != 0x0)
VG_LITE_RETURN_INV("Src buffer stride (%d px) not aligned to %d px.", blit->src_stride,
LV_GPU_NXP_VG_LITE_STRIDE_ALIGN_PX);
/* Test for stride alignment */
if((blit->dst_stride % (LV_GPU_NXP_VG_LITE_STRIDE_ALIGN_PX * LV_COLOR_DEPTH / 8)) != 0x0)
VG_LITE_RETURN_INV("Dest buffer stride (%d px) not aligned to %d px.", blit->dst_stride,
LV_GPU_NXP_VG_LITE_STRIDE_ALIGN_PX);
if((lv_area_get_width(&blit->src_area) != lv_area_get_width(&blit->dst_area)) ||
(lv_area_get_height(&blit->src_area) != lv_area_get_height(&blit->dst_area)))
VG_LITE_RETURN_INV("Src and dest buffer areas are not equal.");
return LV_RES_OK;
}
static void _align_x(lv_area_t * area, lv_color_t ** buf)
{
int alignedAreaStartPx = area->x1 - (area->x1 % (LV_ATTRIBUTE_MEM_ALIGN_SIZE * 8 / LV_COLOR_DEPTH));
int alignedAreaStartPx = area->x1 - (area->x1 % (LV_ATTRIBUTE_MEM_ALIGN_SIZE / sizeof(lv_color_t)));
VG_LITE_COND_STOP(alignedAreaStartPx < 0, "Negative X alignment.");
area->x1 -= alignedAreaStartPx;
@@ -593,17 +569,17 @@ static void _align_x(lv_area_t * area, lv_color_t ** buf)
*buf += alignedAreaStartPx;
}
static void _align_y(lv_area_t * area, lv_color_t ** buf, uint32_t stridePx)
static void align_y(lv_area_t * area, lv_color_t ** buf, lv_coord_t stride)
{
int LineToAlignMem;
int alignedAreaStartPy;
/* find how many lines of pixels will respect memory alignment requirement */
if(stridePx % (uint32_t)LV_ATTRIBUTE_MEM_ALIGN_SIZE == 0U) {
if((stride % (lv_coord_t)LV_ATTRIBUTE_MEM_ALIGN_SIZE) == 0x0U) {
alignedAreaStartPy = area->y1;
}
else {
LineToAlignMem = LV_ATTRIBUTE_MEM_ALIGN_SIZE / (sizeof(lv_color_t) * LV_GPU_NXP_VG_LITE_STRIDE_ALIGN_PX);
VG_LITE_COND_STOP(LV_ATTRIBUTE_MEM_ALIGN_SIZE % (sizeof(lv_color_t) * LV_GPU_NXP_VG_LITE_STRIDE_ALIGN_PX),
LineToAlignMem = LV_ATTRIBUTE_MEM_ALIGN_SIZE / (LV_GPU_NXP_VG_LITE_STRIDE_ALIGN_PX * sizeof(lv_color_t));
VG_LITE_COND_STOP(LV_ATTRIBUTE_MEM_ALIGN_SIZE % (LV_GPU_NXP_VG_LITE_STRIDE_ALIGN_PX * sizeof(lv_color_t)),
"Complex case: need gcd function.");
alignedAreaStartPy = area->y1 - (area->y1 % LineToAlignMem);
VG_LITE_COND_STOP(alignedAreaStartPy < 0, "Negative Y alignment.");
@@ -611,7 +587,7 @@ static void _align_y(lv_area_t * area, lv_color_t ** buf, uint32_t stridePx)
area->y1 -= alignedAreaStartPy;
area->y2 -= alignedAreaStartPy;
*buf += (uint32_t)alignedAreaStartPy * stridePx;
*buf += (uint32_t)(alignedAreaStartPy * stride);
}
#endif /*VG_LITE_BLIT_SPLIT_ENABLED*/

View File

@@ -6,7 +6,7 @@
/**
* MIT License
*
* Copyright 2020-2022 NXP
* Copyright 2020-2023 NXP
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -41,64 +41,16 @@ extern "C" {
#include "../../../lv_conf_internal.h"
#if LV_USE_GPU_NXP_VG_LITE
#include "lv_gpu_nxp_vglite.h"
#include "lv_vglite_utils.h"
/*********************
* DEFINES
*********************/
#ifndef LV_GPU_NXP_VG_LITE_FILL_SIZE_LIMIT
/** Minimum area (in pixels) to be filled by VG-Lite with 100% opacity*/
#define LV_GPU_NXP_VG_LITE_FILL_SIZE_LIMIT 5000
#endif
#ifndef LV_GPU_NXP_VG_LITE_FILL_OPA_SIZE_LIMIT
/** Minimum area (in pixels) to be filled by VG-Lite with transparency*/
#define LV_GPU_NXP_VG_LITE_FILL_OPA_SIZE_LIMIT 5000
#endif
#ifndef LV_GPU_NXP_VG_LITE_BLIT_SIZE_LIMIT
/** Minimum area (in pixels) for image copy with 100% opacity to be handled by VG-Lite*/
#define LV_GPU_NXP_VG_LITE_BLIT_SIZE_LIMIT 5000
#endif
#ifndef LV_GPU_NXP_VG_LITE_BUFF_SYNC_BLIT_SIZE_LIMIT
/** Minimum invalidated area (in pixels) to be synchronized by VG-Lite during buffer sync */
#define LV_GPU_NXP_VG_LITE_BUFF_SYNC_BLIT_SIZE_LIMIT 5000
#endif
#ifndef LV_GPU_NXP_VG_LITE_BLIT_OPA_SIZE_LIMIT
/** Minimum area (in pixels) for image copy with transparency to be handled by VG-Lite*/
#define LV_GPU_NXP_VG_LITE_BLIT_OPA_SIZE_LIMIT 5000
#endif
/**********************
* TYPEDEFS
**********************/
/**
* BLock Image Transfer descriptor structure
*/
typedef struct {
const lv_color_t * src; /**< Source buffer pointer (must be aligned on 32 bytes)*/
lv_area_t src_area; /**< Area to be copied from source*/
lv_coord_t src_width; /**< Source buffer width*/
lv_coord_t src_height; /**< Source buffer height*/
int32_t src_stride; /**< Source buffer stride in bytes (must be aligned on 16 px)*/
const lv_color_t * dst; /**< Destination buffer pointer (must be aligned on 32 bytes)*/
lv_area_t dst_area; /**< Target area in destination buffer (must be the same as src_area)*/
lv_coord_t dst_width; /**< Destination buffer width*/
lv_coord_t dst_height; /**< Destination buffer height*/
int32_t dst_stride; /**< Destination buffer stride in bytes (must be aligned on 16 px)*/
lv_opa_t opa; /**< Opacity - alpha mix (0 = source not copied, 255 = 100% opaque)*/
uint32_t angle; /**< Rotation angle (1/10 of degree)*/
uint32_t zoom; /**< 256 = no zoom (1:1 scale ratio)*/
lv_point_t pivot; /**< The coordinates of rotation pivot in source image buffer*/
} lv_gpu_nxp_vglite_blit_info_t;
/**********************
* GLOBAL PROTOTYPES
**********************/
@@ -106,35 +58,52 @@ typedef struct {
/**
* Fill area, with optional opacity.
*
* @param[in/out] dest_buf Destination buffer pointer (must be aligned on 32 bytes)
* @param[in] dest_width Destination buffer width in pixels (must be aligned on 16 px)
* @param[in] dest_height Destination buffer height in pixels
* @param[in] fill_area Area to be filled
* @param[in] color Fill color
* @param[in] dest_area Area with relative coordinates of destination buffer
* @param[in] color Color
* @param[in] opa Opacity (255 = full, 128 = 50% background/50% color, 0 = no fill)
*
* @retval LV_RES_OK Fill completed
* @retval LV_RES_INV Error occurred (\see LV_GPU_NXP_VG_LITE_LOG_ERRORS)
*/
lv_res_t lv_gpu_nxp_vglite_fill(lv_color_t * dest_buf, lv_coord_t dest_width, lv_coord_t dest_height,
const lv_area_t * fill_area, lv_color_t color, lv_opa_t opa);
lv_res_t lv_gpu_nxp_vglite_fill(const lv_area_t * dest_area, lv_color_t color, lv_opa_t opa);
/**
* BLock Image Transfer.
* BLock Image Transfer - copy rectangular image from src_buf to dst_buf with effects.
* By default, image is copied directly, with optional opacity.
*
* @param[in/out] dest_buf Destination buffer
* @param[in] dest_area Area with relative coordinates of destination buffer
* @param[in] dest_stride Stride of destination buffer in pixels
* @param[in] src_buf Source buffer
* @param[in] src_area Source area with relative coordinates of source buffer
* @param[in] src_stride Stride of source buffer in pixels
* @param[in] opa Opacity
*
* @param[in] blit Description of the transfer
* @retval LV_RES_OK Transfer complete
* @retval LV_RES_INV Error occurred (\see LV_GPU_NXP_VG_LITE_LOG_ERRORS)
*/
lv_res_t lv_gpu_nxp_vglite_blit(lv_gpu_nxp_vglite_blit_info_t * blit);
lv_res_t lv_gpu_nxp_vglite_blit(lv_color_t * dest_buf, lv_area_t * dest_area, lv_coord_t dest_stride,
const lv_color_t * src_buf, lv_area_t * src_area, lv_coord_t src_stride,
lv_opa_t opa);
/**
* BLock Image Transfer with transformation.
* BLock Image Transfer - copy rectangular image from src_buf to dst_buf with transformation.
* By default, image is copied directly, with optional opacity.
*
* @param[in/out] dest_buf Destination buffer
* @param[in] dest_area Area with relative coordinates of destination buffer
* @param[in] dest_stride Stride of destination buffer in pixels
* @param[in] src_buf Source buffer
* @param[in] src_area Source area with relative coordinates of source buffer
* @param[in] src_stride Stride of source buffer in pixels
* @param[in] dsc Image descriptor
*
* @param[in] blit Description of the transfer
* @retval LV_RES_OK Transfer complete
* @retval LV_RES_INV Error occurred (\see LV_GPU_NXP_VG_LITE_LOG_ERRORS)
*/
lv_res_t lv_gpu_nxp_vglite_blit_transform(lv_gpu_nxp_vglite_blit_info_t * blit);
lv_res_t lv_gpu_nxp_vglite_blit_transform(lv_color_t * dest_buf, lv_area_t * dest_area, lv_coord_t dest_stride,
const lv_color_t * src_buf, lv_area_t * src_area, lv_coord_t src_stride,
const lv_draw_img_dsc_t * dsc);
/**********************
* MACROS

View File

@@ -0,0 +1,138 @@
/**
* @file lv_draw_vglite_line.c
*
*/
/**
* MIT License
*
* Copyright 2022, 2023 NXP
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next paragraph)
* shall be included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
* INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
/*********************
* INCLUDES
*********************/
#include "lv_draw_vglite_line.h"
#if LV_USE_GPU_NXP_VG_LITE
#include "lv_vglite_buf.h"
#include <math.h>
/*********************
* DEFINES
*********************/
/**********************
* TYPEDEFS
**********************/
/**********************
* STATIC PROTOTYPES
**********************/
/**********************
* STATIC VARIABLES
**********************/
/**********************
* MACROS
**********************/
/**********************
* GLOBAL FUNCTIONS
**********************/
lv_res_t lv_gpu_nxp_vglite_draw_line(const lv_point_t * point1, const lv_point_t * point2,
const lv_area_t * clip_area, const lv_draw_line_dsc_t * dsc)
{
vg_lite_error_t err = VG_LITE_SUCCESS;
vg_lite_path_t path;
vg_lite_color_t vgcol; /* vglite takes ABGR */
vg_lite_buffer_t * vgbuf = lv_vglite_get_dest_buf();
vg_lite_cap_style_t cap_style = (dsc->round_start || dsc->round_end) ? VG_LITE_CAP_ROUND : VG_LITE_CAP_BUTT;
vg_lite_join_style_t join_style = (dsc->round_start || dsc->round_end) ? VG_LITE_JOIN_ROUND : VG_LITE_JOIN_MITER;
bool is_dashed = (dsc->dash_width && dsc->dash_gap);
vg_lite_float_t stroke_dash_pattern[2] = {0, 0};
uint32_t stroke_dash_count = 0;
vg_lite_float_t stroke_dash_phase = 0;
if(is_dashed) {
stroke_dash_pattern[0] = (vg_lite_float_t)dsc->dash_width;
stroke_dash_pattern[1] = (vg_lite_float_t)dsc->dash_gap;
stroke_dash_count = sizeof(stroke_dash_pattern) / sizeof(vg_lite_float_t);
stroke_dash_phase = (vg_lite_float_t)dsc->dash_width / 2;
}
/* Choose vglite blend mode based on given lvgl blend mode */
vg_lite_blend_t vglite_blend_mode = lv_vglite_get_blend_mode(dsc->blend_mode);
/*** Init path ***/
lv_coord_t width = dsc->width;
int32_t line_path[] = { /*VG line path*/
VLC_OP_MOVE, point1->x, point1->y,
VLC_OP_LINE, point2->x, point2->y,
VLC_OP_END
};
err = vg_lite_init_path(&path, VG_LITE_S32, VG_LITE_HIGH, sizeof(line_path), line_path,
(vg_lite_float_t)clip_area->x1, (vg_lite_float_t)clip_area->y1,
((vg_lite_float_t)clip_area->x2) + 1.0f, ((vg_lite_float_t)clip_area->y2) + 1.0f);
VG_LITE_ERR_RETURN_INV(err, "Init path failed.");
vg_lite_matrix_t matrix;
vg_lite_identity(&matrix);
lv_color32_t col32 = { .full = lv_color_to32(dsc->color) }; /*Convert color to RGBA8888*/
vg_lite_buffer_format_t color_format = LV_COLOR_DEPTH == 16 ? VG_LITE_BGRA8888 : VG_LITE_ABGR8888;
if(lv_vglite_premult_and_swizzle(&vgcol, col32, dsc->opa, color_format) != LV_RES_OK)
VG_LITE_RETURN_INV("Premultiplication and swizzle failed.");
/*** Draw line ***/
err = vg_lite_set_draw_path_type(&path, VG_LITE_DRAW_STROKE_PATH);
VG_LITE_ERR_RETURN_INV(err, "Set draw path type failed.");
err = vg_lite_set_stroke(&path, cap_style, join_style, width, 8, stroke_dash_pattern, stroke_dash_count,
stroke_dash_phase, vgcol);
VG_LITE_ERR_RETURN_INV(err, "Set stroke failed.");
err = vg_lite_update_stroke(&path);
VG_LITE_ERR_RETURN_INV(err, "Update stroke failed.");
err = vg_lite_draw(vgbuf, &path, VG_LITE_FILL_NON_ZERO, &matrix, vglite_blend_mode, vgcol);
VG_LITE_ERR_RETURN_INV(err, "Draw line failed.");
if(lv_vglite_run() != LV_RES_OK)
VG_LITE_RETURN_INV("Run failed.");
err = vg_lite_clear_path(&path);
VG_LITE_ERR_RETURN_INV(err, "Clear path failed.");
return LV_RES_OK;
}
/**********************
* STATIC FUNCTIONS
**********************/
#endif /*LV_USE_GPU_NXP_VG_LITE*/

View File

@@ -0,0 +1,83 @@
/**
* @file lv_draw_vglite_line.h
*
*/
/**
* MIT License
*
* Copyright 2022, 2023 NXP
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next paragraph)
* shall be included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
* INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef LV_DRAW_VGLITE_LINE_H
#define LV_DRAW_VGLITE_LINE_H
#ifdef __cplusplus
extern "C"
{
#endif
/*********************
* INCLUDES
*********************/
#include "../../../lv_conf_internal.h"
#if LV_USE_GPU_NXP_VG_LITE
#include "lv_vglite_utils.h"
#include "../../lv_draw_line.h"
/*********************
* DEFINES
*********************/
/**********************
* TYPEDEFS
**********************/
/**********************
* GLOBAL PROTOTYPES
**********************/
/**
* Draw line shape with effects
*
* @param[in] point1 Starting point with relative coordinates
* @param[in] point2 Ending point with relative coordinates
* @param[in] clip_area Clipping area with relative coordinates to dest buff
* @param[in] dsc Line description structure (width, rounded ending, opacity, ...)
*
* @retval LV_RES_OK Draw completed
* @retval LV_RES_INV Error occurred (\see LV_GPU_NXP_VG_LITE_LOG_ERRORS)
*/
lv_res_t lv_gpu_nxp_vglite_draw_line(const lv_point_t * point1, const lv_point_t * point2,
const lv_area_t * clip_area, const lv_draw_line_dsc_t * dsc);
/**********************
* MACROS
**********************/
#endif /*LV_USE_GPU_NXP_VG_LITE*/
#ifdef __cplusplus
} /*extern "C"*/
#endif
#endif /*LV_DRAW_VGLITE_RECT_H*/

View File

@@ -6,7 +6,7 @@
/**
* MIT License
*
* Copyright 2021, 2022 NXP
* Copyright 2021-2023 NXP
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -34,10 +34,28 @@
#include "lv_draw_vglite_rect.h"
#if LV_USE_GPU_NXP_VG_LITE
#include "lv_vglite_buf.h"
#include <math.h>
/*********************
* DEFINES
*********************/
/*********************
* DEFINES
*********************/
/* Path data sizes for different elements */
#define CUBIC_PATH_DATA_SIZE 7 /* 1 opcode, 6 arguments */
#define LINE_PATH_DATA_SIZE 3 /* 1 opcode, 2 arguments */
#define MOVE_PATH_DATA_SIZE 3 /* 1 opcode, 2 arguments */
#define END_PATH_DATA_SIZE 1 /* 1 opcode, 0 arguments */
/* Maximum possible rectangle path size
* is in the rounded rectangle case:
* - 1 move for the path start
* - 4 cubics for the corners
* - 4 lines for the sides
* - 1 end for the path end */
#define RECT_PATH_DATA_MAX_SIZE 1 * MOVE_PATH_DATA_SIZE + 4 * CUBIC_PATH_DATA_SIZE + 4 * LINE_PATH_DATA_SIZE + 1 * END_PATH_DATA_SIZE
/**********************
* TYPEDEFS
@@ -47,6 +65,18 @@
* STATIC PROTOTYPES
**********************/
/**
* Generates path data for rectangle drawing.
*
* @param[in/out] path The path data to initialize
* @param[in/out] path_size The resulting size of the created path data
* @param[in] dsc The style descriptor for the rectangle to be drawn
* @param[in] coords The coordinates of the rectangle to be drawn
*/
static void lv_vglite_create_rect_path_data(int32_t * path_data, uint32_t * path_data_size,
lv_coord_t radius,
const lv_area_t * coords);
/**********************
* STATIC VARIABLES
**********************/
@@ -59,94 +89,37 @@
* GLOBAL FUNCTIONS
**********************/
lv_res_t lv_gpu_nxp_vglite_draw_bg(lv_draw_ctx_t * draw_ctx, const lv_draw_rect_dsc_t * dsc, const lv_area_t * coords)
lv_res_t lv_gpu_nxp_vglite_draw_bg(const lv_area_t * coords, const lv_area_t * clip_area,
const lv_draw_rect_dsc_t * dsc)
{
vg_lite_buffer_t vgbuf;
vg_lite_error_t err = VG_LITE_SUCCESS;
lv_coord_t dest_width = lv_area_get_width(draw_ctx->buf_area);
lv_coord_t dest_height = lv_area_get_height(draw_ctx->buf_area);
vg_lite_path_t path;
vg_lite_color_t vgcol; /* vglite takes ABGR */
vg_lite_matrix_t matrix;
lv_coord_t width = lv_area_get_width(coords);
lv_coord_t height = lv_area_get_height(coords);
vg_lite_linear_gradient_t gradient;
vg_lite_matrix_t * grad_matrix;
vg_lite_color_t vgcol;
lv_coord_t radius = dsc->radius;
vg_lite_buffer_t * vgbuf = lv_vglite_get_dest_buf();
if(dsc->radius < 0)
return LV_RES_INV;
/* Make areas relative to draw buffer */
lv_area_t rel_coords;
lv_area_copy(&rel_coords, coords);
lv_area_move(&rel_coords, -draw_ctx->buf_area->x1, -draw_ctx->buf_area->y1);
lv_area_t rel_clip;
lv_area_copy(&rel_clip, draw_ctx->clip_area);
lv_area_move(&rel_clip, -draw_ctx->buf_area->x1, -draw_ctx->buf_area->y1);
/*** Init destination buffer ***/
if(lv_vglite_init_buf(&vgbuf, (uint32_t)dest_width, (uint32_t)dest_height, (uint32_t)dest_width * sizeof(lv_color_t),
(const lv_color_t *)draw_ctx->buf, false) != LV_RES_OK)
VG_LITE_RETURN_INV("Init buffer failed.");
/*** Init path ***/
int32_t rad = dsc->radius;
if(dsc->radius == LV_RADIUS_CIRCLE) {
rad = (width > height) ? height / 2 : width / 2;
}
if((dsc->radius == LV_RADIUS_CIRCLE) && (width == height)) {
float tang = ((float)rad * BEZIER_OPTIM_CIRCLE);
int32_t cpoff = (int32_t)tang;
int32_t circle_path[] = { /*VG circle path*/
VLC_OP_MOVE, rel_coords.x1 + rad, rel_coords.y1,
VLC_OP_CUBIC_REL, cpoff, 0, rad, rad - cpoff, rad, rad, /* top-right */
VLC_OP_CUBIC_REL, 0, cpoff, cpoff - rad, rad, 0 - rad, rad, /* bottom-right */
VLC_OP_CUBIC_REL, 0 - cpoff, 0, 0 - rad, cpoff - rad, 0 - rad, 0 - rad, /* bottom-left */
VLC_OP_CUBIC_REL, 0, 0 - cpoff, rad - cpoff, 0 - rad, rad, 0 - rad, /* top-left */
VLC_OP_END
};
err = vg_lite_init_path(&path, VG_LITE_S32, VG_LITE_HIGH, sizeof(circle_path), circle_path,
(vg_lite_float_t) rel_clip.x1, (vg_lite_float_t) rel_clip.y1,
((vg_lite_float_t) rel_clip.x2) + 1.0f, ((vg_lite_float_t) rel_clip.y2) + 1.0f);
}
else if(dsc->radius > 0) {
float tang = ((float)rad * BEZIER_OPTIM_CIRCLE);
int32_t cpoff = (int32_t)tang;
int32_t rounded_path[] = { /*VG rounded rectangular path*/
VLC_OP_MOVE, rel_coords.x1 + rad, rel_coords.y1,
VLC_OP_LINE, rel_coords.x2 - rad + 1, rel_coords.y1, /* top */
VLC_OP_CUBIC_REL, cpoff, 0, rad, rad - cpoff, rad, rad, /* top-right */
VLC_OP_LINE, rel_coords.x2 + 1, rel_coords.y2 - rad + 1, /* right */
VLC_OP_CUBIC_REL, 0, cpoff, cpoff - rad, rad, 0 - rad, rad, /* bottom-right */
VLC_OP_LINE, rel_coords.x1 + rad, rel_coords.y2 + 1, /* bottom */
VLC_OP_CUBIC_REL, 0 - cpoff, 0, 0 - rad, cpoff - rad, 0 - rad, 0 - rad, /* bottom-left */
VLC_OP_LINE, rel_coords.x1, rel_coords.y1 + rad, /* left */
VLC_OP_CUBIC_REL, 0, 0 - cpoff, rad - cpoff, 0 - rad, rad, 0 - rad, /* top-left */
VLC_OP_END
};
err = vg_lite_init_path(&path, VG_LITE_S32, VG_LITE_HIGH, sizeof(rounded_path), rounded_path,
(vg_lite_float_t) rel_clip.x1, (vg_lite_float_t) rel_clip.y1,
((vg_lite_float_t) rel_clip.x2) + 1.0f, ((vg_lite_float_t) rel_clip.y2) + 1.0f);
}
else {
int32_t rect_path[] = { /*VG rectangular path*/
VLC_OP_MOVE, rel_coords.x1, rel_coords.y1,
VLC_OP_LINE, rel_coords.x2 + 1, rel_coords.y1,
VLC_OP_LINE, rel_coords.x2 + 1, rel_coords.y2 + 1,
VLC_OP_LINE, rel_coords.x1, rel_coords.y2 + 1,
VLC_OP_LINE, rel_coords.x1, rel_coords.y1,
VLC_OP_END
};
err = vg_lite_init_path(&path, VG_LITE_S32, VG_LITE_LOW, sizeof(rect_path), rect_path,
(vg_lite_float_t) rel_clip.x1, (vg_lite_float_t) rel_clip.y1,
((vg_lite_float_t) rel_clip.x2) + 1.0f, ((vg_lite_float_t) rel_clip.y2) + 1.0f);
}
int32_t path_data[RECT_PATH_DATA_MAX_SIZE];
uint32_t path_data_size;
lv_vglite_create_rect_path_data(path_data, &path_data_size, radius, coords);
vg_lite_quality_t path_quality = dsc->radius > 0 ? VG_LITE_HIGH : VG_LITE_LOW;
vg_lite_path_t path;
err = vg_lite_init_path(&path, VG_LITE_S32, path_quality, path_data_size, path_data,
(vg_lite_float_t)clip_area->x1, (vg_lite_float_t)clip_area->y1,
((vg_lite_float_t)clip_area->x2) + 1.0f, ((vg_lite_float_t)clip_area->y2) + 1.0f);
VG_LITE_ERR_RETURN_INV(err, "Init path failed.");
vg_lite_matrix_t matrix;
vg_lite_identity(&matrix);
vg_lite_matrix_t * grad_matrix;
vg_lite_linear_gradient_t gradient;
/*** Init Color/Gradient ***/
if(dsc->bg_grad.dir != (lv_grad_dir_t)LV_GRAD_DIR_NONE) {
uint32_t colors[2];
@@ -154,18 +127,14 @@ lv_res_t lv_gpu_nxp_vglite_draw_bg(lv_draw_ctx_t * draw_ctx, const lv_draw_rect_
lv_color32_t col32[2];
/* Gradient setup */
uint8_t cnt = MAX(dsc->bg_grad.stops_count, 2);
uint8_t cnt = LV_MAX(dsc->bg_grad.stops_count, 2);
for(uint8_t i = 0; i < cnt; i++) {
col32[i].full = lv_color_to32(dsc->bg_grad.stops[i].color); /*Convert color to RGBA8888*/
stops[i] = dsc->bg_grad.stops[i].frac;
#if LV_COLOR_DEPTH==16
colors[i] = ((uint32_t)col32[i].ch.alpha << 24) | ((uint32_t)col32[i].ch.blue << 16) |
((uint32_t)col32[i].ch.green << 8) | (uint32_t)col32[i].ch.red;
#else /*LV_COLOR_DEPTH==32*/
/* watchout: red and blue color components are inverted versus vg_lite_color_t order */
colors[i] = ((uint32_t)col32[i].ch.alpha << 24) | ((uint32_t)col32[i].ch.red << 16) |
((uint32_t)col32[i].ch.green << 8) | (uint32_t)col32[i].ch.blue;
#endif
vg_lite_buffer_format_t color_format = LV_COLOR_DEPTH == 16 ? VG_LITE_ABGR8888 : VG_LITE_ARGB8888;
if(lv_vglite_premult_and_swizzle(&colors[i], col32[i], dsc->bg_opa, color_format) != LV_RES_OK)
VG_LITE_RETURN_INV("Premultiplication and swizzle failed.");
}
lv_memset_00(&gradient, sizeof(vg_lite_linear_gradient_t));
@@ -181,7 +150,7 @@ lv_res_t lv_gpu_nxp_vglite_draw_bg(lv_draw_ctx_t * draw_ctx, const lv_draw_rect_
grad_matrix = vg_lite_get_grad_matrix(&gradient);
vg_lite_identity(grad_matrix);
vg_lite_translate((float)rel_coords.x1, (float)rel_coords.y1, grad_matrix);
vg_lite_translate((float)coords->x1, (float)coords->y1, grad_matrix);
if(dsc->bg_grad.dir == (lv_grad_dir_t)LV_GRAD_DIR_VER) {
vg_lite_scale(1.0f, (float)height / 256.0f, grad_matrix);
@@ -192,39 +161,22 @@ lv_res_t lv_gpu_nxp_vglite_draw_bg(lv_draw_ctx_t * draw_ctx, const lv_draw_rect_
}
}
lv_opa_t bg_opa = dsc->bg_opa;
lv_color32_t bg_col32 = {.full = lv_color_to32(dsc->bg_color)}; /*Convert color to RGBA8888*/
if(bg_opa <= (lv_opa_t)LV_OPA_MAX) {
/* Only pre-multiply color if hardware pre-multiplication is not present */
if(!vg_lite_query_feature(gcFEATURE_BIT_VG_PE_PREMULTIPLY)) {
bg_col32.ch.red = (uint8_t)(((uint16_t)bg_col32.ch.red * bg_opa) >> 8);
bg_col32.ch.green = (uint8_t)(((uint16_t)bg_col32.ch.green * bg_opa) >> 8);
bg_col32.ch.blue = (uint8_t)(((uint16_t)bg_col32.ch.blue * bg_opa) >> 8);
}
bg_col32.ch.alpha = bg_opa;
}
#if LV_COLOR_DEPTH==16
vgcol = bg_col32.full;
#else /*LV_COLOR_DEPTH==32*/
vgcol = ((uint32_t)bg_col32.ch.alpha << 24) | ((uint32_t)bg_col32.ch.blue << 16) |
((uint32_t)bg_col32.ch.green << 8) | (uint32_t)bg_col32.ch.red;
#endif
/*Clean & invalidate cache*/
lv_vglite_invalidate_cache();
vg_lite_buffer_format_t color_format = LV_COLOR_DEPTH == 16 ? VG_LITE_BGRA8888 : VG_LITE_ABGR8888;
if(lv_vglite_premult_and_swizzle(&vgcol, bg_col32, dsc->bg_opa, color_format) != LV_RES_OK)
VG_LITE_RETURN_INV("Premultiplication and swizzle failed.");
/*** Draw rectangle ***/
if(dsc->bg_grad.dir == (lv_grad_dir_t)LV_GRAD_DIR_NONE) {
err = vg_lite_draw(&vgbuf, &path, VG_LITE_FILL_EVEN_ODD, &matrix, VG_LITE_BLEND_SRC_OVER, vgcol);
err = vg_lite_draw(vgbuf, &path, VG_LITE_FILL_EVEN_ODD, &matrix, VG_LITE_BLEND_SRC_OVER, vgcol);
}
else {
err = vg_lite_draw_gradient(&vgbuf, &path, VG_LITE_FILL_EVEN_ODD, &matrix, &gradient, VG_LITE_BLEND_SRC_OVER);
err = vg_lite_draw_gradient(vgbuf, &path, VG_LITE_FILL_EVEN_ODD, &matrix, &gradient, VG_LITE_BLEND_SRC_OVER);
}
VG_LITE_ERR_RETURN_INV(err, "Draw gradient failed.");
err = vg_lite_finish();
VG_LITE_ERR_RETURN_INV(err, "Finish failed.");
if(lv_vglite_run() != LV_RES_OK)
VG_LITE_RETURN_INV("Run failed.");
err = vg_lite_clear_path(&path);
VG_LITE_ERR_RETURN_INV(err, "Clear path failed.");
@@ -237,6 +189,261 @@ lv_res_t lv_gpu_nxp_vglite_draw_bg(lv_draw_ctx_t * draw_ctx, const lv_draw_rect_
return LV_RES_OK;
}
lv_res_t lv_gpu_nxp_vglite_draw_border_generic(const lv_area_t * coords, const lv_area_t * clip_area,
const lv_draw_rect_dsc_t * dsc, bool border)
{
vg_lite_error_t err = VG_LITE_SUCCESS;
vg_lite_color_t vgcol; /* vglite takes ABGR */
lv_coord_t radius = dsc->radius;
vg_lite_buffer_t * vgbuf = lv_vglite_get_dest_buf();
if(radius < 0)
return LV_RES_INV;
if(border) {
/* Draw border - only has radius if object has radius*/
lv_coord_t border_half = (lv_coord_t)floor(dsc->border_width / 2.0f);
if(radius > border_half)
radius = radius - border_half;
}
else {
/* Draw outline - always has radius, leave the same radius in the circle case */
lv_coord_t outline_half = (lv_coord_t)ceil(dsc->outline_width / 2.0f);
if(radius < (lv_coord_t)LV_RADIUS_CIRCLE - outline_half)
radius = radius + outline_half;
}
vg_lite_cap_style_t cap_style = (radius) ? VG_LITE_CAP_ROUND : VG_LITE_CAP_BUTT;
vg_lite_join_style_t join_style = (radius) ? VG_LITE_JOIN_ROUND : VG_LITE_JOIN_MITER;
/* Choose vglite blend mode based on given lvgl blend mode */
vg_lite_blend_t vglite_blend_mode = lv_vglite_get_blend_mode(dsc->blend_mode);
/*** Init path ***/
int32_t path_data[RECT_PATH_DATA_MAX_SIZE];
uint32_t path_data_size;
lv_vglite_create_rect_path_data(path_data, &path_data_size, radius, coords);
vg_lite_quality_t path_quality = dsc->radius > 0 ? VG_LITE_HIGH : VG_LITE_LOW;
vg_lite_path_t path;
err = vg_lite_init_path(&path, VG_LITE_S32, path_quality, path_data_size, path_data,
(vg_lite_float_t)clip_area->x1, (vg_lite_float_t)clip_area->y1,
((vg_lite_float_t)clip_area->x2) + 1.0f, ((vg_lite_float_t)clip_area->y2) + 1.0f);
VG_LITE_ERR_RETURN_INV(err, "Init path failed.");
vg_lite_matrix_t matrix;
vg_lite_identity(&matrix);
lv_opa_t opa;
lv_color32_t col32;
lv_coord_t line_width;
if(border) {
opa = dsc->border_opa;
col32.full = lv_color_to32(dsc->border_color); /*Convert color to RGBA8888*/
line_width = dsc->border_width;
}
else {
opa = dsc->outline_opa;
col32.full = lv_color_to32(dsc->outline_color); /*Convert color to RGBA8888*/
line_width = dsc->outline_width;
}
vg_lite_buffer_format_t color_format = LV_COLOR_DEPTH == 16 ? VG_LITE_BGRA8888 : VG_LITE_ABGR8888;
if(lv_vglite_premult_and_swizzle(&vgcol, col32, opa, color_format) != LV_RES_OK)
VG_LITE_RETURN_INV("Premultiplication and swizzle failed.");
/*** Draw border ***/
err = vg_lite_set_draw_path_type(&path, VG_LITE_DRAW_STROKE_PATH);
VG_LITE_ERR_RETURN_INV(err, "Set draw path type failed.");
err = vg_lite_set_stroke(&path, cap_style, join_style, line_width, 8, NULL, 0, 0, vgcol);
VG_LITE_ERR_RETURN_INV(err, "Set stroke failed.");
err = vg_lite_update_stroke(&path);
VG_LITE_ERR_RETURN_INV(err, "Update stroke failed.");
err = vg_lite_draw(vgbuf, &path, VG_LITE_FILL_NON_ZERO, &matrix, vglite_blend_mode, vgcol);
VG_LITE_ERR_RETURN_INV(err, "Draw border failed.");
if(lv_vglite_run() != LV_RES_OK)
VG_LITE_RETURN_INV("Run failed.");
err = vg_lite_clear_path(&path);
VG_LITE_ERR_RETURN_INV(err, "Clear path failed.");
return LV_RES_OK;
}
static void lv_vglite_create_rect_path_data(int32_t * path_data, uint32_t * path_data_size,
lv_coord_t radius,
const lv_area_t * coords)
{
lv_coord_t rect_width = lv_area_get_width(coords);
lv_coord_t rect_height = lv_area_get_height(coords);
/* Get the final radius. Can't be larger than the half of the shortest side */
int32_t shortest_side = LV_MIN(rect_width, rect_height);
int32_t final_radius = LV_MIN(radius, shortest_side / 2);
/* Path data element index */
uint8_t pidx = 0;
if((radius == (lv_coord_t)LV_RADIUS_CIRCLE) && (rect_width == rect_height)) {
/* Get the control point offset for rounded cases */
int32_t cpoff = (int32_t)((float)final_radius * BEZIER_OPTIM_CIRCLE);
/* Circle case */
/* Starting point */
path_data[pidx++] = VLC_OP_MOVE;
path_data[pidx++] = coords->x1 + final_radius;
path_data[pidx++] = coords->y1;
/* Top-right arc */
path_data[pidx++] = VLC_OP_CUBIC_REL;
path_data[pidx++] = cpoff;
path_data[pidx++] = 0;
path_data[pidx++] = final_radius;
path_data[pidx++] = final_radius - cpoff;
path_data[pidx++] = final_radius;
path_data[pidx++] = final_radius;
/* Bottom-right arc*/
path_data[pidx++] = VLC_OP_CUBIC_REL;
path_data[pidx++] = 0;
path_data[pidx++] = cpoff;
path_data[pidx++] = cpoff - final_radius;
path_data[pidx++] = final_radius;
path_data[pidx++] = 0 - final_radius;
path_data[pidx++] = final_radius;
/* Bottom-left arc */
path_data[pidx++] = VLC_OP_CUBIC_REL;
path_data[pidx++] = 0 - cpoff;
path_data[pidx++] = 0;
path_data[pidx++] = 0 - final_radius;
path_data[pidx++] = cpoff - final_radius;
path_data[pidx++] = 0 - final_radius;
path_data[pidx++] = 0 - final_radius;
/* Top-left arc*/
path_data[pidx++] = VLC_OP_CUBIC_REL;
path_data[pidx++] = 0;
path_data[pidx++] = 0 - cpoff;
path_data[pidx++] = final_radius - cpoff;
path_data[pidx++] = 0 - final_radius;
path_data[pidx++] = final_radius;
path_data[pidx++] = 0 - final_radius;
/* Ending point */
path_data[pidx++] = VLC_OP_END;
}
else if(radius > 0) {
/* Get the control point offset for rounded cases */
int32_t cpoff = (int32_t)((float)final_radius * BEZIER_OPTIM_CIRCLE);
/* Rounded rectangle case */
/* Starting point */
path_data[pidx++] = VLC_OP_MOVE;
path_data[pidx++] = coords->x1 + final_radius;
path_data[pidx++] = coords->y1;
/* Top side */
path_data[pidx++] = VLC_OP_LINE;
path_data[pidx++] = coords->x2 - final_radius + 1; // Extended for VGLite
path_data[pidx++] = coords->y1;
/* Top-right corner */
path_data[pidx++] = VLC_OP_CUBIC_REL;
path_data[pidx++] = cpoff;
path_data[pidx++] = 0;
path_data[pidx++] = final_radius;
path_data[pidx++] = final_radius - cpoff;
path_data[pidx++] = final_radius;
path_data[pidx++] = final_radius;
/* Right side */
path_data[pidx++] = VLC_OP_LINE;
path_data[pidx++] = coords->x2 + 1; // Extended for VGLite
path_data[pidx++] = coords->y2 - final_radius + 1; // Extended for VGLite
/* Bottom-right corner*/
path_data[pidx++] = VLC_OP_CUBIC_REL;
path_data[pidx++] = 0;
path_data[pidx++] = cpoff;
path_data[pidx++] = cpoff - final_radius;
path_data[pidx++] = final_radius;
path_data[pidx++] = 0 - final_radius;
path_data[pidx++] = final_radius;
/* Bottom side */
path_data[pidx++] = VLC_OP_LINE;
path_data[pidx++] = coords->x1 + final_radius;
path_data[pidx++] = coords->y2 + 1; // Extended for VGLite
/* Bottom-left corner */
path_data[pidx++] = VLC_OP_CUBIC_REL;
path_data[pidx++] = 0 - cpoff;
path_data[pidx++] = 0;
path_data[pidx++] = 0 - final_radius;
path_data[pidx++] = cpoff - final_radius;
path_data[pidx++] = 0 - final_radius;
path_data[pidx++] = 0 - final_radius;
/* Left side*/
path_data[pidx++] = VLC_OP_LINE;
path_data[pidx++] = coords->x1;
path_data[pidx++] = coords->y1 + final_radius;
/* Top-left corner */
path_data[pidx++] = VLC_OP_CUBIC_REL;
path_data[pidx++] = 0;
path_data[pidx++] = 0 - cpoff;
path_data[pidx++] = final_radius - cpoff;
path_data[pidx++] = 0 - final_radius;
path_data[pidx++] = final_radius;
path_data[pidx++] = 0 - final_radius;
/* Ending point */
path_data[pidx++] = VLC_OP_END;
}
else {
/* Non-rounded rectangle case */
/* Starting point */
path_data[pidx++] = VLC_OP_MOVE;
path_data[pidx++] = coords->x1;
path_data[pidx++] = coords->y1;
/* Top side */
path_data[pidx++] = VLC_OP_LINE;
path_data[pidx++] = coords->x2 + 1; // Extended for VGLite
path_data[pidx++] = coords->y1;
/* Right side */
path_data[pidx++] = VLC_OP_LINE;
path_data[pidx++] = coords->x2 + 1; // Extended for VGLite
path_data[pidx++] = coords->y2 + 1; // Extended for VGLite
/* Bottom side */
path_data[pidx++] = VLC_OP_LINE;
path_data[pidx++] = coords->x1;
path_data[pidx++] = coords->y2 + 1; // Extended for VGLite
/* Left side*/
path_data[pidx++] = VLC_OP_LINE;
path_data[pidx++] = coords->x1;
path_data[pidx++] = coords->y1;
/* Ending point */
path_data[pidx++] = VLC_OP_END;
}
/* Resulting path size */
*path_data_size = pidx * sizeof(int32_t);
}
/**********************
* STATIC FUNCTIONS
**********************/

View File

@@ -6,7 +6,7 @@
/**
* MIT License
*
* Copyright 2021, 2022 NXP
* Copyright 2021-2023 NXP
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -40,7 +40,7 @@ extern "C" {
#include "../../../lv_conf_internal.h"
#if LV_USE_GPU_NXP_VG_LITE
#include "lv_gpu_nxp_vglite.h"
#include "lv_vglite_utils.h"
#include "../../lv_draw_rect.h"
/*********************
@@ -56,13 +56,33 @@ extern "C" {
**********************/
/**
* Draw rectangle shape with effects (rounded corners, gradient)
* Draw rectangle background with effects (rounded corners, gradient)
*
* @param[in] coords Coordinates of the rectangle background (relative to dest buff)
* @param[in] clip_area Clipping area with relative coordinates to dest buff
* @param[in] dsc Description of the rectangle background
*
* @retval LV_RES_OK Draw completed
* @retval LV_RES_INV Error occurred (\see LV_GPU_NXP_VG_LITE_LOG_ERRORS)
*
* @param draw_ctx drawing context
* @param dsc description of the rectangle
* @param coords the area where rectangle is clipped
*/
lv_res_t lv_gpu_nxp_vglite_draw_bg(lv_draw_ctx_t * draw_ctx, const lv_draw_rect_dsc_t * dsc, const lv_area_t * coords);
lv_res_t lv_gpu_nxp_vglite_draw_bg(const lv_area_t * coords, const lv_area_t * clip_area,
const lv_draw_rect_dsc_t * dsc);
/**
* Draw rectangle border/outline shape with effects (rounded corners, opacity)
*
* @param[in] coords Coordinates of the rectangle border/outline (relative to dest buff)
* @param[in] clip_area Clipping area with relative coordinates to dest buff
* @param[in] dsc Description of the rectangle border/outline
* @param[in] border True for border, False for outline
*
* @retval LV_RES_OK Draw completed
* @retval LV_RES_INV Error occurred (\see LV_GPU_NXP_VG_LITE_LOG_ERRORS)
*
*/
lv_res_t lv_gpu_nxp_vglite_draw_border_generic(const lv_area_t * coords, const lv_area_t * clip_area,
const lv_draw_rect_dsc_t * dsc, bool border);
/**********************
* MACROS

View File

@@ -1,153 +0,0 @@
/**
* @file lv_gpu_nxp_vglite.c
*
*/
/**
* MIT License
*
* Copyright 2020-2022 NXP
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next paragraph)
* shall be included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
* INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
/*********************
* INCLUDES
*********************/
#include "lv_gpu_nxp_vglite.h"
#if LV_USE_GPU_NXP_VG_LITE
#include "../../../core/lv_refr.h"
#if BLIT_DBG_AREAS
#include "lv_draw_vglite_blend.h"
#endif
/*********************
* DEFINES
*********************/
#if LV_COLOR_DEPTH==16
#define VG_LITE_PX_FMT VG_LITE_RGB565
#elif LV_COLOR_DEPTH==32
#define VG_LITE_PX_FMT VG_LITE_BGRA8888
#else
#error Only 16bit and 32bit color depth are supported. Set LV_COLOR_DEPTH to 16 or 32.
#endif
/**********************
* TYPEDEFS
**********************/
/**********************
* STATIC PROTOTYPES
**********************/
/**********************
* STATIC VARIABLES
**********************/
/**********************
* MACROS
**********************/
/**********************
* GLOBAL FUNCTIONS
**********************/
lv_res_t lv_vglite_init_buf(vg_lite_buffer_t * vgbuf, uint32_t width, uint32_t height, uint32_t stride,
const lv_color_t * ptr, bool source)
{
/*Test for memory alignment*/
if((((uintptr_t)ptr) % (uintptr_t)LV_ATTRIBUTE_MEM_ALIGN_SIZE) != (uintptr_t)0x0U)
VG_LITE_RETURN_INV("%s buffer (0x%x) not aligned to %d.", source ? "Src" : "Dest",
(size_t) ptr, LV_ATTRIBUTE_MEM_ALIGN_SIZE);
/*Test for stride alignment*/
if(source && (stride % (LV_GPU_NXP_VG_LITE_STRIDE_ALIGN_PX * sizeof(lv_color_t))) != 0x0U)
VG_LITE_RETURN_INV("Src buffer stride (%d bytes) not aligned to %d bytes.", stride,
LV_GPU_NXP_VG_LITE_STRIDE_ALIGN_PX * sizeof(lv_color_t));
vgbuf->format = VG_LITE_PX_FMT;
vgbuf->tiled = VG_LITE_LINEAR;
vgbuf->image_mode = VG_LITE_NORMAL_IMAGE_MODE;
vgbuf->transparency_mode = VG_LITE_IMAGE_OPAQUE;
vgbuf->width = (int32_t)width;
vgbuf->height = (int32_t)height;
vgbuf->stride = (int32_t)stride;
lv_memset_00(&vgbuf->yuv, sizeof(vgbuf->yuv));
vgbuf->memory = (void *)ptr;
vgbuf->address = (uint32_t)vgbuf->memory;
vgbuf->handle = NULL;
return LV_RES_OK;
}
#if BLIT_DBG_AREAS
void lv_vglite_dbg_draw_rectangle(lv_color_t * dest_buf, lv_coord_t dest_width, lv_coord_t dest_height,
lv_area_t * fill_area, lv_color_t color)
{
lv_area_t a;
/* top line */
a.x1 = fill_area->x1;
a.x2 = fill_area->x2;
a.y1 = fill_area->y1;
a.y2 = fill_area->y1;
lv_gpu_nxp_vglite_fill(dest_buf, dest_width, dest_height, &a, color, LV_OPA_COVER);
/* bottom line */
a.x1 = fill_area->x1;
a.x2 = fill_area->x2;
a.y1 = fill_area->y2;
a.y2 = fill_area->y2;
lv_gpu_nxp_vglite_fill(dest_buf, dest_width, dest_height, &a, color, LV_OPA_COVER);
/* left line */
a.x1 = fill_area->x1;
a.x2 = fill_area->x1;
a.y1 = fill_area->y1;
a.y2 = fill_area->y2;
lv_gpu_nxp_vglite_fill(dest_buf, dest_width, dest_height, &a, color, LV_OPA_COVER);
/* right line */
a.x1 = fill_area->x2;
a.x2 = fill_area->x2;
a.y1 = fill_area->y1;
a.y2 = fill_area->y2;
lv_gpu_nxp_vglite_fill(dest_buf, dest_width, dest_height, &a, color, LV_OPA_COVER);
}
#endif /* BLIT_DBG_AREAS */
void lv_vglite_invalidate_cache(void)
{
lv_disp_t * disp = _lv_refr_get_disp_refreshing();
if(disp->driver->clean_dcache_cb)
disp->driver->clean_dcache_cb(disp->driver);
}
/**********************
* STATIC FUNCTIONS
**********************/
#endif /*LV_USE_GPU_NXP_VG_LITE*/

View File

@@ -0,0 +1,143 @@
/**
* @file lv_vglite_buf.c
*
*/
/**
* MIT License
*
* Copyright 2023 NXP
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next paragraph)
* shall be included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
* INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
/*********************
* INCLUDES
*********************/
#include "lv_vglite_buf.h"
#if LV_USE_GPU_NXP_VG_LITE
/*********************
* DEFINES
*********************/
#if LV_COLOR_DEPTH == 16
#define VG_LITE_PX_FMT VG_LITE_RGB565
#elif LV_COLOR_DEPTH == 32
#define VG_LITE_PX_FMT VG_LITE_BGRA8888
#else
#error Only 16bit and 32bit color depth are supported. Set LV_COLOR_DEPTH to 16 or 32.
#endif
/**********************
* TYPEDEFS
**********************/
/**********************
* STATIC PROTOTYPES
**********************/
static inline void lv_vglite_set_dest_buf(const lv_color_t * buf, const lv_area_t * area, lv_coord_t stride);
static inline void lv_vglite_set_buf_ptr(vg_lite_buffer_t * vgbuf, const lv_color_t * buf);
static inline void lv_vglite_set_buf(vg_lite_buffer_t * vgbuf, const lv_color_t * buf,
const lv_area_t * area, lv_coord_t stride);
/**********************
* STATIC VARIABLES
**********************/
static vg_lite_buffer_t dest_vgbuf;
static vg_lite_buffer_t src_vgbuf;
/**********************
* MACROS
**********************/
/**********************
* GLOBAL FUNCTIONS
**********************/
void lv_gpu_nxp_vglite_init_buf(const lv_color_t * buf, const lv_area_t * area, lv_coord_t stride)
{
lv_vglite_set_dest_buf(buf, area, stride);
}
vg_lite_buffer_t * lv_vglite_get_dest_buf(void)
{
return &dest_vgbuf;
}
vg_lite_buffer_t * lv_vglite_get_src_buf(void)
{
return &src_vgbuf;
}
void lv_vglite_set_dest_buf_ptr(const lv_color_t * buf)
{
lv_vglite_set_buf_ptr(&dest_vgbuf, buf);
}
void lv_vglite_set_src_buf_ptr(const lv_color_t * buf)
{
lv_vglite_set_buf_ptr(&src_vgbuf, buf);
}
void lv_vglite_set_src_buf(const lv_color_t * buf, const lv_area_t * area, lv_coord_t stride)
{
if(src_vgbuf.memory != (void *)buf)
lv_vglite_set_buf(&src_vgbuf, buf, area, stride);
}
/**********************
* STATIC FUNCTIONS
**********************/
static inline void lv_vglite_set_dest_buf(const lv_color_t * buf, const lv_area_t * area, lv_coord_t stride)
{
lv_vglite_set_buf(&dest_vgbuf, buf, area, stride);
}
static inline void lv_vglite_set_buf_ptr(vg_lite_buffer_t * vgbuf, const lv_color_t * buf)
{
vgbuf->memory = (void *)buf;
vgbuf->address = (uint32_t)vgbuf->memory;
}
static inline void lv_vglite_set_buf(vg_lite_buffer_t * vgbuf, const lv_color_t * buf,
const lv_area_t * area, lv_coord_t stride)
{
vgbuf->format = VG_LITE_PX_FMT;
vgbuf->tiled = VG_LITE_LINEAR;
vgbuf->image_mode = VG_LITE_NORMAL_IMAGE_MODE;
vgbuf->transparency_mode = VG_LITE_IMAGE_OPAQUE;
vgbuf->width = (int32_t)lv_area_get_width(area);
vgbuf->height = (int32_t)lv_area_get_height(area);
vgbuf->stride = (int32_t)(stride) * sizeof(lv_color_t);
lv_memset_00(&vgbuf->yuv, sizeof(vgbuf->yuv));
vgbuf->memory = (void *)buf;
vgbuf->address = (uint32_t)vgbuf->memory;
vgbuf->handle = NULL;
}
#endif /*LV_USE_GPU_NXP_VG_LITE*/

View File

@@ -0,0 +1,113 @@
/**
* @file lv_vglite_buf.h
*
*/
/**
* MIT License
*
* Copyright 2023 NXP
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next paragraph)
* shall be included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
* INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef LV_VGLITE_BUF_H
#define LV_VGLITE_BUF_H
#ifdef __cplusplus
extern "C" {
#endif
/*********************
* INCLUDES
*********************/
#include "../../../lv_conf_internal.h"
#if LV_USE_GPU_NXP_VG_LITE
#include "vg_lite.h"
#include "../../sw/lv_draw_sw.h"
/*********************
* DEFINES
*********************/
/**********************
* TYPEDEFS
**********************/
/**********************
* GLOBAL PROTOTYPES
**********************/
/**
* Init vglite destination buffer. It will be done once per frame.
*
* @param[in] buf Destination buffer address (does not require alignment for VG_LITE_LINEAR mode)
* @param[in] area Destination buffer area (for width and height)
* @param[in] stride Stride of destination buffer
*/
void lv_gpu_nxp_vglite_init_buf(const lv_color_t * buf, const lv_area_t * area, lv_coord_t stride);
/**
* Get vglite destination buffer pointer.
*
* @retval The vglite destination buffer
*/
vg_lite_buffer_t * lv_vglite_get_dest_buf(void);
/**
* Get vglite source buffer pointer.
*
* @retval The vglite source buffer
*/
vg_lite_buffer_t * lv_vglite_get_src_buf(void);
/**
* Set vglite destination buffer address only.
*
* @param[in] buf Destination buffer address (does not require alignment for VG_LITE_LINEAR mode)
*/
void lv_vglite_set_dest_buf_ptr(const lv_color_t * buf);
/**
* Set vglite source buffer address only.
*
* @param[in] buf Source buffer address
*/
void lv_vglite_set_src_buf_ptr(const lv_color_t * buf);
/**
* Set vglite source buffer. It will be done only if buffer addreess is different.
*
* @param[in] buf Source buffer address
* @param[in] area Source buffer area (for width and height)
* @param[in] stride Stride of source buffer
*/
void lv_vglite_set_src_buf(const lv_color_t * buf, const lv_area_t * area, lv_coord_t stride);
/**********************
* MACROS
**********************/
#endif /*LV_USE_GPU_NXP_VG_LITE*/
#ifdef __cplusplus
} /*extern "C"*/
#endif
#endif /*LV_VGLITE_BUF_H*/

View File

@@ -0,0 +1,149 @@
/**
* @file lv_vglite_utils.c
*
*/
/**
* MIT License
*
* Copyright 2022, 2023 NXP
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next paragraph)
* shall be included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
* INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
/*********************
* INCLUDES
*********************/
#include "lv_vglite_utils.h"
#if LV_USE_GPU_NXP_VG_LITE
#include "../../../core/lv_refr.h"
/*********************
* DEFINES
*********************/
/**********************
* TYPEDEFS
**********************/
/**********************
* STATIC PROTOTYPES
**********************/
/**
* Clean and invalidate cache.
*/
static inline void invalidate_cache(void);
/**********************
* STATIC VARIABLES
**********************/
/**********************
* MACROS
**********************/
/**********************
* GLOBAL FUNCTIONS
**********************/
lv_res_t lv_vglite_run(void)
{
invalidate_cache();
VG_LITE_ERR_RETURN_INV(vg_lite_flush(), "Flush failed.");
return LV_RES_OK;
}
lv_res_t lv_vglite_premult_and_swizzle(vg_lite_color_t * vg_col32, lv_color32_t lv_col32, lv_opa_t opa,
vg_lite_buffer_format_t vg_col_format)
{
lv_color32_t lv_col32_premul = lv_col32;
if(opa <= (lv_opa_t)LV_OPA_MAX) {
/* Only pre-multiply color if hardware pre-multiplication is not present */
if(!vg_lite_query_feature(gcFEATURE_BIT_VG_PE_PREMULTIPLY)) {
lv_col32_premul.ch.red = (uint8_t)(((uint16_t)lv_col32.ch.red * opa) >> 8);
lv_col32_premul.ch.green = (uint8_t)(((uint16_t)lv_col32.ch.green * opa) >> 8);
lv_col32_premul.ch.blue = (uint8_t)(((uint16_t)lv_col32.ch.blue * opa) >> 8);
}
lv_col32_premul.ch.alpha = opa;
}
switch(vg_col_format) {
case VG_LITE_BGRA8888:
*vg_col32 = lv_col32_premul.full;
break;
case VG_LITE_RGBA8888:
*vg_col32 = ((uint32_t)lv_col32_premul.ch.red << 24) | ((uint32_t)lv_col32_premul.ch.green << 16) |
((uint32_t)lv_col32_premul.ch.blue << 8) | (uint32_t)lv_col32_premul.ch.alpha;
break;
case VG_LITE_ABGR8888:
*vg_col32 = ((uint32_t)lv_col32_premul.ch.alpha << 24) | ((uint32_t)lv_col32_premul.ch.blue << 16) |
((uint32_t)lv_col32_premul.ch.green << 8) | (uint32_t)lv_col32_premul.ch.red;
break;
case VG_LITE_ARGB8888:
*vg_col32 = ((uint32_t)lv_col32_premul.ch.alpha << 24) | ((uint32_t)lv_col32_premul.ch.red << 16) |
((uint32_t)lv_col32_premul.ch.green << 8) | (uint32_t)lv_col32_premul.ch.blue;
break;
default:
return LV_RES_INV;
}
return LV_RES_OK;
}
vg_lite_blend_t lv_vglite_get_blend_mode(lv_blend_mode_t lv_blend_mode)
{
vg_lite_blend_t vg_blend_mode;
switch(lv_blend_mode) {
case LV_BLEND_MODE_ADDITIVE:
vg_blend_mode = VG_LITE_BLEND_ADDITIVE;
break;
case LV_BLEND_MODE_SUBTRACTIVE:
vg_blend_mode = VG_LITE_BLEND_SUBTRACT;
break;
case LV_BLEND_MODE_MULTIPLY:
vg_blend_mode = VG_LITE_BLEND_MULTIPLY;
break;
case LV_BLEND_MODE_REPLACE:
vg_blend_mode = VG_LITE_BLEND_NONE;
break;
default:
vg_blend_mode = VG_LITE_BLEND_SRC_OVER;
break;
}
return vg_blend_mode;
}
/**********************
* STATIC FUNCTIONS
**********************/
static inline void invalidate_cache(void)
{
lv_disp_t * disp = _lv_refr_get_disp_refreshing();
if(disp->driver->clean_dcache_cb)
disp->driver->clean_dcache_cb(disp->driver);
}
#endif /*LV_USE_GPU_NXP_VG_LITE*/

View File

@@ -1,12 +1,12 @@
/**
* @file lv_gpu_nxp_vglite.h
* @file lv_vglite_utils.h
*
*/
/**
* MIT License
*
* Copyright 2020-2022 NXP
* Copyright 2022, 2023 NXP
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -27,8 +27,8 @@
*
*/
#ifndef LV_GPU_NXP_VGLITE_H
#define LV_GPU_NXP_VGLITE_H
#ifndef LV_VGLITE_UTILS_H
#define LV_VGLITE_UTILS_H
#ifdef __cplusplus
extern "C" {
@@ -43,40 +43,21 @@ extern "C" {
#include "vg_lite.h"
#include "../../sw/lv_draw_sw.h"
#include "../../../misc/lv_log.h"
#include "fsl_debug_console.h"
/*********************
* DEFINES
*********************/
/** Use this symbol as limit to disable feature (value has to be larger than supported resolution) */
#define LV_GPU_NXP_VG_LITE_FEATURE_DISABLED (1920*1080+1)
/** Stride in px required by VG-Lite HW. Don't change this. */
#define LV_GPU_NXP_VG_LITE_STRIDE_ALIGN_PX 16U
#ifndef LV_GPU_NXP_VG_LITE_LOG_ERRORS
/** Enable logging of VG-Lite errors (\see LV_LOG_ERROR)*/
#define LV_GPU_NXP_VG_LITE_LOG_ERRORS 1
#endif
#ifndef LV_GPU_NXP_VG_LITE_LOG_TRACES
/** Enable logging of VG-Lite errors (\see LV_LOG_ERROR)*/
/** Enable logging of VG-Lite traces (\see LV_LOG_ERROR)*/
#define LV_GPU_NXP_VG_LITE_LOG_TRACES 0
#endif
/* Draw rectangles around BLIT tiles */
#define BLIT_DBG_AREAS 0
/* Print detailed info to SDK console (NOT to LVGL log system) */
#define BLIT_DBG_VERBOSE 0
/* Verbose debug print */
#if BLIT_DBG_VERBOSE
#define PRINT_BLT PRINTF
#else
#define PRINT_BLT(...)
#endif
/* The optimal Bezier control point offset for radial unit
* see: https://spencermortensen.com/articles/bezier-circle/
@@ -95,36 +76,35 @@ extern "C" {
**********************/
/**
* Fills vg_lite_buffer_t structure according given parameters.
* Premultiplies and swizzles given LVGL 32bit color to obtain vglite color.
*
* @param[in/out] vgbuf Buffer structure to be filled
* @param[in] width Width of buffer in pixels
* @param[in] height Height of buffer in pixels
* @param[in] stride Stride of the buffer in bytes
* @param[in] ptr Pointer to the buffer (must be aligned according VG-Lite requirements)
* @param[in] source Boolean to check if this is a source buffer
*/
lv_res_t lv_vglite_init_buf(vg_lite_buffer_t * vgbuf, uint32_t width, uint32_t height, uint32_t stride,
const lv_color_t * ptr, bool source);
#if BLIT_DBG_AREAS
/**
* Draw a simple rectangle, 1 px line width.
* @param[in/out] vg_col32 The obtained vglite color
* @param[in] lv_col32 The initial LVGL 32bit color
* @param[in] opa The opacity to premultiply with
* @param[in] vg_col_format The format of the resulting vglite color
*
* @param dest_buf Destination buffer
* @param dest_width Destination buffer width (must be aligned on 16px)
* @param dest_height Destination buffer height
* @param fill_area Rectangle coordinates
* @param color Rectangle color
* @retval LV_RES_OK Operation completed
* @retval LV_RES_INV Error occurred (\see LV_GPU_NXP_VG_LITE_LOG_ERRORS)
*/
void lv_vglite_dbg_draw_rectangle(lv_color_t * dest_buf, lv_coord_t dest_width, lv_coord_t dest_height,
lv_area_t * fill_area, lv_color_t color);
#endif
lv_res_t lv_vglite_premult_and_swizzle(vg_lite_color_t * vg_col32, lv_color32_t lv_col32, lv_opa_t opa,
vg_lite_buffer_format_t vg_col_format);
/**
* Clean & invalidate cache.
* Get vglite blend mode.
*
* @param[in] lv_blend_mode The LVGL blend mode
*
* @retval The vglite blend mode
*/
void lv_vglite_invalidate_cache(void);
vg_lite_blend_t lv_vglite_get_blend_mode(lv_blend_mode_t lv_blend_mode);
/**
* Clear cache and flush command to VG-Lite.
*
* @retval LV_RES_OK Run completed
* @retval LV_RES_INV Error occurred (\see LV_GPU_NXP_VG_LITE_LOG_ERRORS)
*/
lv_res_t lv_vglite_run(void);
/**********************
* MACROS
@@ -142,7 +122,8 @@ void lv_vglite_invalidate_cache(void);
#define VG_LITE_ERR_RETURN_INV(err, fmt, ...) \
do { \
if(err != VG_LITE_SUCCESS) { \
LV_LOG_ERROR(fmt, ##__VA_ARGS__); \
LV_LOG_ERROR(fmt" (err = %d)", \
err, ##__VA_ARGS__); \
return LV_RES_INV; \
} \
} while (0)
@@ -158,7 +139,7 @@ void lv_vglite_invalidate_cache(void);
#if LV_GPU_NXP_VG_LITE_LOG_TRACES
#define VG_LITE_LOG_TRACE(fmt, ...) \
do { \
LV_LOG_ERROR(fmt, ##__VA_ARGS__); \
LV_LOG(fmt, ##__VA_ARGS__); \
} while (0)
#define VG_LITE_RETURN_INV(fmt, ...) \
@@ -182,4 +163,4 @@ void lv_vglite_invalidate_cache(void);
} /*extern "C"*/
#endif
#endif /*LV_GPU_NXP_VGLITE_H*/
#endif /*LV_VGLITE_UTILS_H*/