#ifdef __cplusplus
extern "C" {
#endif
/*========================================================================================
MODULE NAME : sp_mem.c
========================================================================================*/
/*========================================================================================
INCLUDE FILES
========================================================================================*/
#include "sp_mem.h"
#ifdef WIN32
#include "stdio.h"
#endif
#ifdef MEM_MANAGE_BY_MMI
/*
* Macros, rather than function calls, are used for maximum performance:
*/
/* Reading and writing of the size fields.
* The rightmost bit in the size field also
* holds the in use bit. */
#define set_hd1(p, v) ((p)->prev_size = (OP_UINT32)(v))
#define set_hd2(p, v) ((p)->size = (OP_UINT32)(v))
/* The IN USE bit */
#define chunk_isfree(p) ((((chunkptr)(p))->size & 0x01) == 0)
#define chunk_inuse(p) ((((chunkptr)(p))->size & 0x01) == 1)
/*
* List operations.
*/
#define remove_from_list(p) \
(p)->fwd->bck = (p)->bck; \
(p)->bck->fwd = (p)->fwd;
#define add_to_list(l, p) \
(p)->fwd = (l)->fwd; \
(p)->bck = l; \
(l)->fwd->bck = p; \
(l)->fwd = p;
/*
* Compute the chunk size we will need for an allocation request:
*/
#define request2size(req) \
(((OP_UINT32)((req) + CHUNKHEADERSIZE + MALLOC_ALIGN_MASK) < \
(OP_UINT32)(MINCHUNKSIZE + MALLOC_ALIGN_MASK)) ? MINCHUNKSIZE : \
(OP_UINT32)(((req) + CHUNKHEADERSIZE + MALLOC_ALIGN_MASK) & ~(MALLOC_ALIGN_MASK)))
/*========================================================================================
LOCAL FUNCTION PROTOTYPES
========================================================================================*/
/*========================================================================================
CONSTANTS
========================================================================================*/
/*========================================================================================
LOCAL VARIABLES
========================================================================================*/
//static long mmiMemoryPool_instance[MMI_HEAP_SIZE/4];
char* mmiMemoryPool;// = (char*)mmiMemoryPool_instance;
mem_internal_t mem_internal;
//static SU_SEMA_HANDLE mm_mutex; //avoid more task share common resource.
/*
* We use segregated free lists, with separate lists for
* chunks of different sizes. Currently, we use sizes that are
* powers of two.
* In list number n is kept all the free chunks whose size is
* strictly less than maxsizes[n].
*/
const OP_UINT32 maxsizes[NUM_FREE_LISTS] =
{
1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 0x8FFFFFFF
};
/*========================================================================================
LOCAL FUNCTIONS
========================================================================================*/
/**
Return the number of the list that a chunk of size "n"
belongs to.
@param [in] n pass size of chunk.
@return l : Return the number of the list
*/
int list_idx(OP_UINT32 n)
{
int l;
for (l = 0; maxsizes[l] <= n; l++);
return(l);
}
/**
Judge if this memory block pointed by men is alloc in internal mode.
@param [in] mem a pointer to memory.
@return 1: is internal memory malloced by mmi.
*/
int MM_internal_mem(void* mem)
{
OP_UINT32 left, right, mid;
right = (OP_UINT32)mmiMemoryPool + MMI_HEAP_SIZE;
left = (OP_UINT32)mmiMemoryPool;
mid = (OP_UINT32)mem;
if ((mid > right) || (mid < left))
{
return(0);
}
return(1);
}
/**
Allocate and return a pointer to a memory area of at least
the indicated size. The allocated block of memory will be aligned
on a 4-byte boundary.
@param [in] size
@return Returns 0 if allocation fails.
*/
void *MM_mem_malloc (OP_UINT32 size)
{
mem_internal_t* mod_mem;
chunkptr p = 0, ptmp = 0;
OP_UINT32 nb;
OP_UINT32 sz = 0xffffffff;
OP_UINT32 remsize;
int i;
mod_mem = &mem_internal;
if (mod_mem->baseptr == OP_NULL)
return(0);
/* We add space for our overhead (4 bytes) plus round to nearest
* larger multiple of 4, plus never allocate a chunk less than 8 bytes. */
nb = request2size (size);
/* Check all relevant free lists, until a non-empty one is found. */
for (i = list_idx (nb); i < NUM_FREE_LISTS; i++)
{
chunkptr freelist = mod_mem->freelists[i];
/* Search the entire list, select chunk with closest fit */
for (ptmp = freelist->fwd; ((ptmp != freelist) && (ptmp != OP_NULL)); ptmp = ptmp->fwd)
{
OP_UINT32 tmpsz = chunksize (ptmp);
/* Exact fit: no need to search any further. */
if (tmpsz == nb)
{
p = ptmp;
sz = tmpsz;
goto found;
}
else if (tmpsz > nb) /* Chunk is large enough */
{
if (tmpsz < sz)
{
/* This is the best so far. */
p = ptmp;
sz = tmpsz;
}
}
}
if (p != 0)
{
goto found;
}
}
/* Searched all lists, but found no large enough chunk */
return(0);
found:
/* We have found a large enough chunk, namely "p" of size "sz". */
remove_from_list(p);
remsize = sz - nb;
if (remsize >= MMI_MEM_MIN_SIZE)
{
/* The remainder is large enough to become a separate chunk */
chunkptr q, next;
chunkptr l;
sz = nb;
/* "q" will be the new chunk */
q = (chunkptr)((unsigned char *)p + sz);
set_hd2(q, remsize << 1);
set_hd1(q, nb);
next = nextchunk(q);
next->prev_size = remsize;
l = mod_mem->freelists[list_idx (remsize)];
add_to_list(l, q);
}
set_hd2(p, (sz << 1) | 0x01);
return(chunk2mem (p));
}
/**
Free a memory area previously allocated with msf_mem_internal_malloc.
Calling this routine with 'mem' equal to 0 has no effect.
@param [in] mem a pointer to a memory area
*/
void MM_mem_free (void* mem)
{
mem_internal_t* mod_mem;
chunkptr p,valid_ptr; /* chunk corresponding to mem */
OP_UINT8 bFound=0;
OP_UINT32 sz; /* its size */
chunkptr next; /* next adjacent chunk */
chunkptr prev; /* previous adjacent chunk */
chunkptr l;
mod_mem = &mem_internal;
if ((mem == 0) || (mod_mem->baseptr == OP_NULL))
{
return;
}
p = mem2chunk(mem);
if (chunk_isfree(p))
{
return;
}
valid_ptr = mem_internal.firstchunk;
while (valid_ptr != mem_internal.lastchunk) /* to find the address to free,free it if we find it,else return*/
{
//op_debug(3,"[UMB] valid_ptr: %x",valid_ptr);
if(p==valid_ptr)
{
bFound=1;
break;
}
valid_ptr = nextchunk(valid_ptr);
}
if(bFound==0) return;
sz = chunksize(p);
prev = prevchunk(p);
next = nextchunk(p);
if ((chunk_isfree(prev))&&(chunksize(prev)!=0)&&(MM_internal_mem(prev))) /* Join with previous chunk if it's size is not zero and it''s address is in memory pool*/
{
sz += chunksize(prev);
p = prev;
remove_from_list(prev);
}
if ((chunk_isfree (next))&&(chunksize(next)!=0)&&(MM_internal_mem(next)))/* Join with next chunk if it's size