feat: save freed thread to not realloc after
This commit is contained in:
parent
a43d94dede
commit
177e8807c5
@ -58,6 +58,8 @@ static TAILQ_HEAD(context_head, context_entry) head = TAILQ_HEAD_INITIALIZER(hea
|
|||||||
// Current running thread
|
// Current running thread
|
||||||
static struct context_entry* running = NULL;
|
static struct context_entry* running = NULL;
|
||||||
|
|
||||||
|
static TAILQ_HEAD(freed_context_head, context_entry) context_to_freed = TAILQ_HEAD_INITIALIZER(context_to_freed);
|
||||||
|
|
||||||
int thread_yield(void)
|
int thread_yield(void)
|
||||||
{
|
{
|
||||||
//TRACE("thread_yield");
|
//TRACE("thread_yield");
|
||||||
@ -112,23 +114,32 @@ void thread_function_wrapper(void* (*func)(void*), void* funcarg)
|
|||||||
*/
|
*/
|
||||||
int thread_create(thread_t* newthread, void* (*func)(void*), void* funcarg)
|
int thread_create(thread_t* newthread, void* (*func)(void*), void* funcarg)
|
||||||
{
|
{
|
||||||
TRACE("Create a new thread that execute function %p", func);
|
DBG("Create a new thread that execute function %p", func);
|
||||||
struct context_entry* new_entry = malloc(sizeof(*new_entry));
|
struct context_entry* new_entry;
|
||||||
memset(new_entry->stack, 0, STACK_SIZE);
|
TRACE("Checking for previous allocated entry");
|
||||||
|
if (!TAILQ_EMPTY(&context_to_freed)) {
|
||||||
|
new_entry = TAILQ_FIRST(&context_to_freed);
|
||||||
|
TAILQ_REMOVE(&context_to_freed, new_entry, link);
|
||||||
|
} else {
|
||||||
|
TRACE("Allocating new entry");
|
||||||
|
new_entry = malloc(sizeof(*new_entry));
|
||||||
|
memset(new_entry->stack, 0, STACK_SIZE);
|
||||||
|
|
||||||
|
new_entry->context.uc_stack.ss_sp = new_entry->stack;
|
||||||
|
new_entry->context.uc_stack.ss_size = STACK_SIZE;
|
||||||
|
new_entry->context.uc_stack.ss_flags = 0;
|
||||||
|
|
||||||
|
// Tell Valgrind that the memory area of the future stack is a stack
|
||||||
|
new_entry->valgrind_id = VALGRIND_STACK_REGISTER(
|
||||||
|
new_entry->context.uc_stack.ss_sp,
|
||||||
|
new_entry->context.uc_stack.ss_sp + new_entry->context.uc_stack.ss_size);
|
||||||
|
|
||||||
|
new_entry->id = new_entry;
|
||||||
|
}
|
||||||
|
|
||||||
getcontext(&new_entry->context);
|
getcontext(&new_entry->context);
|
||||||
|
|
||||||
new_entry->context.uc_stack.ss_sp = new_entry->stack;
|
|
||||||
new_entry->context.uc_stack.ss_size = STACK_SIZE;
|
|
||||||
new_entry->context.uc_stack.ss_flags = 0;
|
|
||||||
|
|
||||||
// Tell Valgrind that the memory area of the future stack is a stack
|
|
||||||
new_entry->valgrind_id = VALGRIND_STACK_REGISTER(
|
|
||||||
new_entry->context.uc_stack.ss_sp,
|
|
||||||
new_entry->context.uc_stack.ss_sp + new_entry->context.uc_stack.ss_size);
|
|
||||||
|
|
||||||
// Use the entry's memory address as an id.
|
// Use the entry's memory address as an id.
|
||||||
new_entry->id = new_entry;
|
|
||||||
TRACE("ALLOCATED %p", new_entry);
|
TRACE("ALLOCATED %p", new_entry);
|
||||||
new_entry->status = ALLOCATED;
|
new_entry->status = ALLOCATED;
|
||||||
new_entry->retvalue = NULL;
|
new_entry->retvalue = NULL;
|
||||||
@ -173,29 +184,29 @@ int thread_join(thread_t thread, void** retval)
|
|||||||
/** Deadlock **/
|
/** Deadlock **/
|
||||||
// if the running thread is solo (not waited by anyone)
|
// if the running thread is solo (not waited by anyone)
|
||||||
if(!IS_WAITED(running)) {
|
if(!IS_WAITED(running)) {
|
||||||
// if the thread that we want to join is already in a "group" of waiting threads
|
// if the thread that we want to join is already in a "group" of waiting threads
|
||||||
if (IS_WAITING(entry)) {
|
if (IS_WAITING(entry)) {
|
||||||
// give the last thread waited to the running thread
|
// give the last thread waited to the running thread
|
||||||
running->last_waited = entry->last_waited;
|
running->last_waited = entry->last_waited;
|
||||||
}
|
}
|
||||||
else { // the thread we want to join is solo
|
else { // the thread we want to join is solo
|
||||||
running->last_waited = malloc(sizeof(struct last_thread_t));
|
running->last_waited = malloc(sizeof(struct last_thread_t));
|
||||||
running->last_waited->ref = 0 ;
|
running->last_waited->ref = 0 ;
|
||||||
entry->last_waited = running->last_waited;
|
entry->last_waited = running->last_waited;
|
||||||
running->last_waited->last_thread = entry;
|
running->last_waited->last_thread = entry;
|
||||||
}
|
}
|
||||||
running->last_waited->ref++;
|
running->last_waited->ref++;
|
||||||
|
|
||||||
} else { // the running thread is already part of a groupe of waiting threads
|
} else { // the running thread is already part of a groupe of waiting threads
|
||||||
if (IS_WAITING(entry)) { // the thread we want to join is part of a groupe of waiting threads
|
if (IS_WAITING(entry)) { // the thread we want to join is part of a groupe of waiting threads
|
||||||
// release the last_waited of this entry
|
// release the last_waited of this entry
|
||||||
running->last_waited->last_thread = GET_LAST_WAITED_THREAD(entry);
|
running->last_waited->last_thread = GET_LAST_WAITED_THREAD(entry);
|
||||||
}
|
}
|
||||||
else { // the thread we want to join is solo and has no last_waited allocated
|
else { // the thread we want to join is solo and has no last_waited allocated
|
||||||
running->last_waited->last_thread = entry;
|
running->last_waited->last_thread = entry;
|
||||||
entry->last_waited = running->last_waited;
|
entry->last_waited = running->last_waited;
|
||||||
entry->last_waited->ref ++;
|
entry->last_waited->ref ++;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -225,9 +236,11 @@ int thread_join(thread_t thread, void** retval)
|
|||||||
// Clean up
|
// Clean up
|
||||||
DBG("(entry, was_alloacted) : %p,%d", entry, WAS_ALLOCATED(entry));
|
DBG("(entry, was_alloacted) : %p,%d", entry, WAS_ALLOCATED(entry));
|
||||||
if (WAS_ALLOCATED(entry)) {
|
if (WAS_ALLOCATED(entry)) {
|
||||||
VALGRIND_STACK_DEREGISTER(entry->valgrind_id);
|
DBG("ADDING (%p) TO FREED TAIL", entry);
|
||||||
|
TAILQ_INSERT_TAIL(&context_to_freed, entry, link);
|
||||||
|
} else {
|
||||||
|
free(entry);
|
||||||
}
|
}
|
||||||
free(entry);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -268,6 +281,14 @@ void clear_context(void)
|
|||||||
}
|
}
|
||||||
free(last);
|
free(last);
|
||||||
}
|
}
|
||||||
|
while (!TAILQ_EMPTY(&context_to_freed)) {
|
||||||
|
last = TAILQ_FIRST(&context_to_freed);
|
||||||
|
TAILQ_REMOVE(&context_to_freed, last, link);
|
||||||
|
if (WAS_ALLOCATED(last)) {
|
||||||
|
VALGRIND_STACK_DEREGISTER(last->valgrind_id);
|
||||||
|
}
|
||||||
|
free(last);
|
||||||
|
}
|
||||||
VALGRIND_STACK_DEREGISTER(stack_valgrind_id);
|
VALGRIND_STACK_DEREGISTER(stack_valgrind_id);
|
||||||
exit(0);
|
exit(0);
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user