feat(mem): switch the default allocator to tlsf (#2129)

* feat(mem): add initial version of tlsf

from  : https://github.com/mattconte/tlsf
commit: deff9ab509341f264addbd3c8ada533678591905

* feat(mem): switch the default allocator to tlsf

* fix(mem): remove the preserved small buffer

since the builtin allocator(tlsf) is fast enough now

* fix(mem): change LV_MEM_ADD_JUNK to 0

to speed up the normal operation
This commit is contained in:
Xiang Xiao
2021-03-10 22:13:35 +08:00
committed by GitHub
parent 91a44a465a
commit 7bf547a928
6 changed files with 1434 additions and 358 deletions

View File

@@ -8,7 +8,7 @@
* INCLUDES
*********************/
#include "lv_mem.h"
#include "lv_math.h"
#include "lv_tlsf.h"
#include "lv_gc.h"
#include "lv_assert.h"
#include <string.h>
@@ -23,81 +23,38 @@
*********************/
/*Add memory junk on alloc (0xaa) and free(0xbb) (just for testing purposes)*/
#ifndef LV_MEM_ADD_JUNK
# define LV_MEM_ADD_JUNK 1
#endif
#ifndef LV_MEM_FULL_DEFRAG_CNT
# define LV_MEM_FULL_DEFRAG_CNT 64
# define LV_MEM_ADD_JUNK 0
#endif
#ifdef LV_ARCH_64
# define MEM_UNIT uint64_t
# define ALIGN_MASK 0x7
#else
# define MEM_UNIT uint32_t
# define ALIGN_MASK 0x7
#endif
#define ZERO_MEM_SENTINEL 0xa1b2c3d4
/**********************
* TYPEDEFS
**********************/
#if LV_ENABLE_GC == 0 /*gc custom allocations must not include header*/
/*The size of this union must be 4/8 bytes (uint32_t/uint64_t)*/
typedef union {
struct {
MEM_UNIT used : 1; /* 1: if the entry is used*/
MEM_UNIT d_size : 31; /* Size of the data*/
} s;
MEM_UNIT header; /* The header (used + d_size)*/
} lv_mem_header_t;
typedef struct {
lv_mem_header_t header;
uint8_t first_data; /*First data byte in the allocated data (Just for easily create a pointer)*/
} lv_mem_ent_t;
#endif /* LV_ENABLE_GC */
#ifdef LV_ARCH_64
#define ALIGN_MASK 0x7
#else
#define ALIGN_MASK 0x3
#endif
#define MEM_BUF_SMALL_SIZE 16
#define ZERO_MEM_SENTINEL 0xa1b2c3d4
/**********************
* STATIC PROTOTYPES
**********************/
#if LV_MEM_CUSTOM == 0
static void * alloc_core(size_t size);
static lv_mem_ent_t * ent_get_next(lv_mem_ent_t * act_e);
static inline void * ent_alloc(lv_mem_ent_t * e, size_t size);
static void ent_trunc(lv_mem_ent_t * e, size_t size);
static size_t get_size(void * data_p);
static void lv_mem_walker(void * ptr, size_t size, int used, void * user);
#endif
/**********************
* STATIC VARIABLES
**********************/
#if LV_MEM_CUSTOM == 0
static uint8_t * work_mem;
static tlsf_t tlsf;
#endif
static uint32_t zero_mem = ZERO_MEM_SENTINEL; /*Give the address of this variable if 0 byte should be allocated*/
#if LV_MEM_CUSTOM == 0
static uint8_t * last_mem; /*Address of the last valid byte*/
static uint32_t mem_max_size; /*Tracks the maximum total size of memory ever used from the internal heap*/
#endif
static uint8_t mem_buf1_32[MEM_BUF_SMALL_SIZE];
static uint8_t mem_buf2_32[MEM_BUF_SMALL_SIZE];
static lv_mem_buf_t mem_buf_small[] = {{.p = mem_buf1_32, .size = MEM_BUF_SMALL_SIZE, .used = 0},
{.p = mem_buf2_32, .size = MEM_BUF_SMALL_SIZE, .used = 0}
};
/**********************
* MACROS
@@ -128,15 +85,10 @@ void lv_mem_init(void)
#if LV_MEM_ADR == 0
/*Allocate a large array to store the dynamically allocated data*/
static LV_ATTRIBUTE_LARGE_RAM_ARRAY MEM_UNIT work_mem_int[LV_MEM_SIZE / sizeof(MEM_UNIT)];
work_mem = (uint8_t *)work_mem_int;
tlsf = tlsf_create_with_pool((void *)work_mem_int, LV_MEM_SIZE);
#else
work_mem = (uint8_t *)LV_MEM_ADR;
tlsf = tlsf_create_with_pool((void *)LV_MEM_ADR, LV_MEM_SIZE);
#endif
last_mem = &work_mem[LV_MEM_SIZE - 1];
lv_mem_ent_t * full = (lv_mem_ent_t *)work_mem;
full->header.s.used = 0;
/*The total mem size reduced by the first header and the close patterns */
full->header.s.d_size = LV_MEM_SIZE - sizeof(lv_mem_header_t);
#endif
#if LV_MEM_ADD_JUNK
@@ -151,11 +103,8 @@ void lv_mem_init(void)
void lv_mem_deinit(void)
{
#if LV_MEM_CUSTOM == 0
lv_memset_00(work_mem, (LV_MEM_SIZE / sizeof(MEM_UNIT)) * sizeof(MEM_UNIT));
lv_mem_ent_t * full = (lv_mem_ent_t *)work_mem;
full->header.s.used = 0;
/*The total mem size reduced by the first header and the close patterns */
full->header.s.d_size = LV_MEM_SIZE - sizeof(lv_mem_header_t);
tlsf_destroy(tlsf);
lv_mem_init();
#endif
}
@@ -172,24 +121,11 @@ void * lv_mem_alloc(size_t size)
return &zero_mem;
}
/*Round the size up to ALIGN_MASK*/
size = (size + ALIGN_MASK) & (~ALIGN_MASK);
void * alloc = NULL;
#if LV_MEM_CUSTOM == 0
alloc = alloc_core(size);
if(alloc == NULL) {
LV_LOG_WARN("out of memory, trying to defrag");
lv_mem_defrag();
alloc = alloc_core(size);
if(alloc) {
LV_LOG_INFO("defrag made enough memory, memory allocated successfully");
}
}
void * alloc = tlsf_malloc(tlsf, size);
#else
alloc = LV_MEM_CUSTOM_ALLOC(size);
#endif /* LV_MEM_CUSTOM */
void * alloc = LV_MEM_CUSTOM_ALLOC(size);
#endif
#if LV_MEM_ADD_JUNK
if(alloc != NULL) lv_memset(alloc, 0xaa, size);
@@ -203,16 +139,6 @@ void * lv_mem_alloc(size_t size)
(int)mon.total_size - mon.free_size, mon.used_pct, mon.frag_pct,
(int)mon.free_biggest_size);
}
else {
#if LV_MEM_CUSTOM == 0
/* just a safety check, should always be true */
if((uintptr_t) alloc > (uintptr_t) work_mem) {
if((((uintptr_t) alloc - (uintptr_t) work_mem) + size) > mem_max_size) {
mem_max_size = ((uintptr_t) alloc - (uintptr_t) work_mem) + size;
}
}
#endif
}
MEM_TRACE("allocated at 0x%p", alloc);
return alloc;
@@ -229,21 +155,11 @@ void lv_mem_free(void * data)
if(data == NULL) return;
#if LV_MEM_CUSTOM == 0
lv_mem_ent_t * e = (lv_mem_ent_t *)((uint8_t *)data - sizeof(lv_mem_header_t));
e->header.s.used = 0;
# if LV_MEM_ADD_JUNK
lv_memset((void *)data, 0xbb, e->header.s.d_size);
lv_memset(data, 0xbb, tlsf_block_size(data));
# endif
static uint32_t defr = 0;
defr++;
if(defr > LV_MEM_FULL_DEFRAG_CNT) {
MEM_TRACE("performing auto defrag")
defr = 0;
lv_mem_defrag();
}
tlsf_free(tlsf, data);
#else
/*e points to the header*/
LV_MEM_CUSTOM_FREE(data);
#endif
}
@@ -258,120 +174,26 @@ void lv_mem_free(void * data)
void * lv_mem_realloc(void * data_p, size_t new_size)
{
MEM_TRACE("reallocating 0x%p with %d size", data_p, new_size);
void * new_p = NULL;
#if LV_MEM_CUSTOM
if(new_size == 0) {
MEM_TRACE("using zero_mem");
LV_MEM_CUSTOM_FREE(data_p);
return &zero_mem;
}
if(data_p == &zero_mem) new_p = LV_MEM_CUSTOM_ALLOC(new_size);
else new_p = LV_MEM_CUSTOM_REALLOC(data_p, new_size);
if(new_p == NULL) {
LV_LOG_ERROR("couldn't allocate memory");
return NULL;
} else {
MEM_TRACE("allocated at 0x%p", new_p);
return new_p;
}
#else
/*Round the size up to ALIGN_MASK*/
new_size = (new_size + ALIGN_MASK) & (~ALIGN_MASK);
/*data_p could be previously freed pointer (in this case it is invalid)*/
if(data_p != NULL) {
lv_mem_ent_t * e = (lv_mem_ent_t *)((uint8_t *)data_p - sizeof(lv_mem_header_t));
if(e->header.s.used == 0) {
data_p = NULL;
}
}
uint32_t old_size = get_size(data_p);
if(old_size == new_size) {
MEM_TRACE("same size, using the original memory");
return data_p;
}
if(new_size == 0) {
lv_mem_free(data_p);
return &zero_mem;
}
/* Truncate the memory if the new size is smaller. */
if(new_size < old_size) {
lv_mem_ent_t * e = (lv_mem_ent_t *)((uint8_t *)data_p - sizeof(lv_mem_header_t));
ent_trunc(e, new_size);
MEM_TRACE("memory entry is truncated (same address 0x%p is used)", &e->first_data);
return &e->first_data;
}
new_p = lv_mem_alloc(new_size);
if(new_p == NULL) {
LV_LOG_ERROR("couldn't allocate memory");
return NULL;
}
if(data_p == &zero_mem) return lv_mem_alloc(new_size);
if(data_p != NULL) {
/*Copy the old data to the new. Use the smaller size*/
if(old_size != 0 && new_size != 0) {
lv_memcpy(new_p, data_p, LV_MIN(new_size, old_size));
}
lv_mem_free(data_p);
}
MEM_TRACE("allocated at 0x%p", new_p);
return new_p;
#endif
}
/**
* Join the adjacent free memory blocks
*/
void lv_mem_defrag(void)
{
MEM_TRACE("begin");
#if LV_MEM_CUSTOM == 0
lv_mem_ent_t * e_free;
lv_mem_ent_t * e_next;
e_free = ent_get_next(NULL);
while(1) {
/*Search the next free entry*/
while(e_free != NULL) {
if(e_free->header.s.used != 0) e_free = ent_get_next(e_free);
else break;
}
if(e_free == NULL) {
MEM_TRACE("finished");
return;
}
/*Joint the following free entries to the free*/
e_next = ent_get_next(e_free);
while(e_next != NULL) {
if(e_next->header.s.used == 0) {
e_free->header.s.d_size += e_next->header.s.d_size + sizeof(e_next->header);
}
else {
break;
}
e_next = ent_get_next(e_next);
}
if(e_next == NULL) {
MEM_TRACE("finished");
return;
}
/*Continue from the lastly checked entry*/
e_free = e_next;
}
void * new_p = tlsf_realloc(tlsf, data_p, new_size);
#else
void * new_p = LV_MEM_CUSTOM_REALLOC(data_p, new_size);
#endif
if(new_p == NULL) {
LV_LOG_ERROR("couldn't allocate memory");
return NULL;
}
MEM_TRACE("allocated at 0x%p", new_p);
return new_p;
}
lv_res_t lv_mem_test(void)
@@ -382,20 +204,15 @@ lv_res_t lv_mem_test(void)
}
#if LV_MEM_CUSTOM == 0
lv_mem_ent_t * e;
e = ent_get_next(NULL);
while(e) {
if(e->header.s.d_size > LV_MEM_SIZE) {
if(tlsf_check(tlsf)) {
LV_LOG_WARN("failed");
return LV_RES_INV;
}
uint8_t * e8 = (uint8_t *)e;
if(e8 + e->header.s.d_size > work_mem + LV_MEM_SIZE) {
LV_LOG_WARN("failed");
if (tlsf_check_pool(tlsf_get_pool(tlsf))) {
LV_LOG_WARN("pool failed");
return LV_RES_INV;
}
e = ent_get_next(e);
}
#endif
MEM_TRACE("passed");
return LV_RES_OK;
@@ -412,26 +229,10 @@ void lv_mem_monitor(lv_mem_monitor_t * mon_p)
lv_memset(mon_p, 0, sizeof(lv_mem_monitor_t));
#if LV_MEM_CUSTOM == 0
MEM_TRACE("begin");
lv_mem_ent_t * e;
e = ent_get_next(NULL);
tlsf_walk_pool(tlsf_get_pool(tlsf), lv_mem_walker, mon_p);
while(e != NULL) {
if(e->header.s.used == 0) {
mon_p->free_cnt++;
mon_p->free_size += e->header.s.d_size;
if(e->header.s.d_size > mon_p->free_biggest_size) {
mon_p->free_biggest_size = e->header.s.d_size;
}
}
else {
mon_p->used_cnt++;
}
e = ent_get_next(e);
}
mon_p->total_size = LV_MEM_SIZE;
mon_p->max_used = mem_max_size;
mon_p->used_pct = 100 - (100U * mon_p->free_size) / mon_p->total_size;
if(mon_p->free_size > 0) {
mon_p->frag_pct = mon_p->free_biggest_size * 100U / mon_p->free_size;
@@ -455,21 +256,10 @@ void * lv_mem_buf_get(uint32_t size)
if(size == 0) return NULL;
MEM_TRACE("begin, getting %d bytes", size);
/*Try small static buffers first*/
uint8_t i;
if(size <= MEM_BUF_SMALL_SIZE) {
for(i = 0; i < sizeof(mem_buf_small) / sizeof(mem_buf_small[0]); i++) {
if(mem_buf_small[i].used == 0) {
mem_buf_small[i].used = 1;
MEM_TRACE("return using small static buffer");
return mem_buf_small[i].p;
}
}
}
/*Try to find a free buffer with suitable size */
int8_t i_guess = -1;
for(i = 0; i < LV_MEM_BUF_MAX_NUM; i++) {
for(uint8_t i = 0; i < LV_MEM_BUF_MAX_NUM; i++) {
if(LV_GC_ROOT(lv_mem_buf[i]).used == 0 && LV_GC_ROOT(lv_mem_buf[i]).size >= size) {
if(LV_GC_ROOT(lv_mem_buf[i]).size == size) {
LV_GC_ROOT(lv_mem_buf[i]).used = 1;
@@ -492,7 +282,7 @@ void * lv_mem_buf_get(uint32_t size)
}
/*Reallocate a free buffer*/
for(i = 0; i < LV_MEM_BUF_MAX_NUM; i++) {
for(uint8_t i = 0; i < LV_MEM_BUF_MAX_NUM; i++) {
if(LV_GC_ROOT(lv_mem_buf[i]).used == 0) {
/*if this fails you probably need to increase your LV_MEM_SIZE/heap size*/
void * buf = lv_mem_realloc(LV_GC_ROOT(lv_mem_buf[i]).p, size);
@@ -519,18 +309,8 @@ void * lv_mem_buf_get(uint32_t size)
void lv_mem_buf_release(void * p)
{
MEM_TRACE("begin (address: 0x%p)", p);
uint8_t i;
/*Try small static buffers first*/
for(i = 0; i < sizeof(mem_buf_small) / sizeof(mem_buf_small[0]); i++) {
if(mem_buf_small[i].p == p) {
mem_buf_small[i].used = 0;
MEM_TRACE("released (buffer id: %d)", i);
return;
}
}
for(i = 0; i < LV_MEM_BUF_MAX_NUM; i++) {
for(uint8_t i = 0; i < LV_MEM_BUF_MAX_NUM; i++) {
if(LV_GC_ROOT(lv_mem_buf[i]).p == p) {
LV_GC_ROOT(lv_mem_buf[i]).used = 0;
return;
@@ -545,12 +325,7 @@ void lv_mem_buf_release(void * p)
*/
void lv_mem_buf_free_all(void)
{
uint8_t i;
for(i = 0; i < sizeof(mem_buf_small) / sizeof(mem_buf_small[0]); i++) {
mem_buf_small[i].used = 0;
}
for(i = 0; i < LV_MEM_BUF_MAX_NUM; i++) {
for(uint8_t i = 0; i < LV_MEM_BUF_MAX_NUM; i++) {
if(LV_GC_ROOT(lv_mem_buf[i]).p) {
lv_mem_free(LV_GC_ROOT(lv_mem_buf[i]).p);
LV_GC_ROOT(lv_mem_buf[i]).p = NULL;
@@ -750,94 +525,19 @@ LV_ATTRIBUTE_FAST_MEM void lv_memset_ff(void * dst, size_t len)
**********************/
#if LV_MEM_CUSTOM == 0
static void * alloc_core(size_t size)
static void lv_mem_walker(void * ptr, size_t size, int used, void * user)
{
lv_mem_ent_t * e = NULL;
LV_UNUSED(ptr);
/* Search for a appropriate entry*/
while(1) {
/* Get the next entry*/
e = ent_get_next(e);
if( e == NULL) break;
/*If there is next entry then try to allocate there*/
if(!e->header.s.used && e->header.s.d_size >= size)
return ent_alloc(e, size);
lv_mem_monitor_t * mon_p = user;
if(used) {
mon_p->used_cnt++;
}
return NULL;
}
/**
* Give the next entry after 'act_e'
* @param act_e pointer to an entry
* @return pointer to an entry after 'act_e'
*/
static lv_mem_ent_t * ent_get_next(lv_mem_ent_t * act_e)
{
/*NULL means: get the first entry; else get the next after `act_e`*/
if(act_e == NULL) return (lv_mem_ent_t *)work_mem;
else {
uint8_t * data = &act_e->first_data;
lv_mem_ent_t * next_e = (lv_mem_ent_t *)&data[act_e->header.s.d_size];
if(&next_e->first_data > last_mem) return NULL;
else return next_e;
mon_p->free_cnt++;
mon_p->free_size += size;
if(size > mon_p->free_biggest_size)
mon_p->free_biggest_size = size;
}
}
/**
* Try to do the real allocation with a given size
* @param e try to allocate to this entry
* @param size size of the new memory in bytes
* @return pointer to the allocated memory or NULL if not enough memory in the entry
*/
static inline void * ent_alloc(lv_mem_ent_t * e, size_t size)
{
/*Truncate the entry to the desired size */
ent_trunc(e, size);
e->header.s.used = 1;
/*Save the allocated data*/
return &e->first_data;
}
/**
* Truncate the data of entry to the given size
* @param e Pointer to an entry
* @param size new size in bytes
*/
static void ent_trunc(lv_mem_ent_t * e, size_t size)
{
/*Round the size up to ALIGN_MASK*/
size = (size + ALIGN_MASK) & (~ALIGN_MASK);
/*Don't let empty space only for a header without data*/
if(e->header.s.d_size == size + sizeof(lv_mem_header_t)) {
size = e->header.s.d_size;
}
/* Create the new entry after the current if there is space for it */
if(e->header.s.d_size != size) {
uint8_t * e_data = &e->first_data;
lv_mem_ent_t * after_new_e = (lv_mem_ent_t *)&e_data[size];
after_new_e->header.s.used = 0;
after_new_e->header.s.d_size = e->header.s.d_size - size - sizeof(lv_mem_header_t);
/* Set the new size for the original entry */
e->header.s.d_size = size;
}
}
static size_t get_size(void * data_p)
{
if(data_p == NULL) return 0;
if(data_p == &zero_mem) return 0;
lv_mem_ent_t * e = (lv_mem_ent_t *)((uint8_t *)data_p - sizeof(lv_mem_header_t));
return e->header.s.d_size;
}
#endif

View File

@@ -95,11 +95,6 @@ void lv_mem_free(void * data);
*/
void * lv_mem_realloc(void * data_p, size_t new_size);
/**
* Join the adjacent free memory blocks
*/
void lv_mem_defrag(void);
/**
*
* @return

View File

@@ -12,6 +12,7 @@ CSRCS += lv_mem.c
CSRCS += lv_printf.c
CSRCS += lv_style.c
CSRCS += lv_timer.c
CSRCS += lv_tlsf.c
CSRCS += lv_txt.c
CSRCS += lv_txt_ap.c
CSRCS += lv_utils.c

1290
src/lv_misc/lv_tlsf.c Normal file

File diff suppressed because it is too large Load Diff

95
src/lv_misc/lv_tlsf.h Normal file
View File

@@ -0,0 +1,95 @@
#include "../lv_conf_internal.h"
#if LV_MEM_CUSTOM == 0
#ifndef INCLUDED_tlsf
#define INCLUDED_tlsf
/*
** Two Level Segregated Fit memory allocator, version 3.1.
** Written by Matthew Conte
** http://tlsf.baisoku.org
**
** Based on the original documentation by Miguel Masmano:
** http://www.gii.upv.es/tlsf/main/docs
**
** This implementation was written to the specification
** of the document, therefore no GPL restrictions apply.
**
** Copyright (c) 2006-2016, Matthew Conte
** All rights reserved.
**
** Redistribution and use in source and binary forms, with or without
** modification, are permitted provided that the following conditions are met:
** * Redistributions of source code must retain the above copyright
** notice, this list of conditions and the following disclaimer.
** * Redistributions in binary form must reproduce the above copyright
** notice, this list of conditions and the following disclaimer in the
** documentation and/or other materials provided with the distribution.
** * Neither the name of the copyright holder nor the
** names of its contributors may be used to endorse or promote products
** derived from this software without specific prior written permission.
**
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
** WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
** DISCLAIMED. IN NO EVENT SHALL MATTHEW CONTE BE LIABLE FOR ANY
** DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
** (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
** LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
** ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stddef.h>
#if defined(__cplusplus)
extern "C" {
#endif
/* tlsf_t: a TLSF structure. Can contain 1 to N pools. */
/* pool_t: a block of memory that TLSF can manage. */
typedef void* tlsf_t;
typedef void* pool_t;
/* Create/destroy a memory pool. */
tlsf_t tlsf_create(void* mem);
tlsf_t tlsf_create_with_pool(void* mem, size_t bytes);
void tlsf_destroy(tlsf_t tlsf);
pool_t tlsf_get_pool(tlsf_t tlsf);
/* Add/remove memory pools. */
pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes);
void tlsf_remove_pool(tlsf_t tlsf, pool_t pool);
/* malloc/memalign/realloc/free replacements. */
void* tlsf_malloc(tlsf_t tlsf, size_t bytes);
void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t bytes);
void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size);
void tlsf_free(tlsf_t tlsf, void* ptr);
/* Returns internal block size, not original request size */
size_t tlsf_block_size(void* ptr);
/* Overheads/limits of internal structures. */
size_t tlsf_size(void);
size_t tlsf_align_size(void);
size_t tlsf_block_size_min(void);
size_t tlsf_block_size_max(void);
size_t tlsf_pool_overhead(void);
size_t tlsf_alloc_overhead(void);
/* Debugging. */
typedef void (*tlsf_walker)(void* ptr, size_t size, int used, void* user);
void tlsf_walk_pool(pool_t pool, tlsf_walker walker, void* user);
/* Returns nonzero if any internal consistency check fails. */
int tlsf_check(tlsf_t tlsf);
int tlsf_check_pool(pool_t pool);
#if defined(__cplusplus)
};
#endif
#endif
#endif /* LV_MEM_CUSTOM == 0 */

View File

@@ -454,7 +454,6 @@ typedef int _keep_pedantic_happy;
// }
//
// lv_test_assert_int_eq(LV_RES_OK, lv_mem_test(), "Memory integrity check");
// lv_mem_defrag();
// lv_mem_monitor(&mon_end);
// lv_test_assert_int_lt(sizeof(void*) * 8, mon_start.free_size - mon_end.free_size, "Style memory leak");
//
@@ -478,7 +477,6 @@ typedef int _keep_pedantic_happy;
//
// _lv_style_list_reset(&style_list);
// lv_test_assert_int_eq(LV_RES_OK, lv_mem_test(), "Memory integrity check");
// lv_mem_defrag();
// lv_mem_monitor(&mon_end);
// lv_test_assert_int_lt(sizeof(void*) * 8, mon_start.free_size - mon_end.free_size, "Style memory leak");
//
@@ -505,7 +503,6 @@ typedef int _keep_pedantic_happy;
// }
//
// lv_test_assert_int_eq(LV_RES_OK, lv_mem_test(), "Memory integrity check");
// lv_mem_defrag();
// lv_mem_monitor(&mon_end);
// lv_test_assert_int_lt(sizeof(void*) * 8, mon_start.free_size - mon_end.free_size, "Style memory leak");
//
@@ -536,7 +533,6 @@ typedef int _keep_pedantic_happy;
// }
//
// lv_test_assert_int_eq(LV_RES_OK, lv_mem_test(), "Memory integrity check");
// lv_mem_defrag();
// lv_mem_monitor(&mon_end);
// lv_test_assert_int_lt(sizeof(void*) * 8, mon_start.free_size - mon_end.free_size, "Style memory leak");
//
@@ -616,7 +612,6 @@ typedef int _keep_pedantic_happy;
// lv_style_reset(&style3);
//
// lv_test_assert_int_eq(LV_RES_OK, lv_mem_test(), "Memory integrity check");
// lv_mem_defrag();
// lv_mem_monitor(&mon_end);
// lv_test_assert_int_lt(sizeof(void*) * 8, mon_start.free_size - mon_end.free_size, "Style memory leak");
//}