From 177e8807c5db0d355cd883bd7b72829972a2096b Mon Sep 17 00:00:00 2001 From: Alessandre Laguierce Date: Thu, 27 Mar 2025 18:03:14 +0100 Subject: [PATCH] feat: save freed thread to not realloc after --- src/thread/thread.c | 93 +++++++++++++++++++++++++++------------------ 1 file changed, 57 insertions(+), 36 deletions(-) diff --git a/src/thread/thread.c b/src/thread/thread.c index 1bb1680..d4ead2f 100644 --- a/src/thread/thread.c +++ b/src/thread/thread.c @@ -58,6 +58,8 @@ static TAILQ_HEAD(context_head, context_entry) head = TAILQ_HEAD_INITIALIZER(hea // Current running thread static struct context_entry* running = NULL; +static TAILQ_HEAD(freed_context_head, context_entry) context_to_freed = TAILQ_HEAD_INITIALIZER(context_to_freed); + int thread_yield(void) { //TRACE("thread_yield"); @@ -112,23 +114,32 @@ void thread_function_wrapper(void* (*func)(void*), void* funcarg) */ int thread_create(thread_t* newthread, void* (*func)(void*), void* funcarg) { - TRACE("Create a new thread that execute function %p", func); - struct context_entry* new_entry = malloc(sizeof(*new_entry)); - memset(new_entry->stack, 0, STACK_SIZE); + DBG("Create a new thread that execute function %p", func); + struct context_entry* new_entry; + TRACE("Checking for previous allocated entry"); + if (!TAILQ_EMPTY(&context_to_freed)) { + new_entry = TAILQ_FIRST(&context_to_freed); + TAILQ_REMOVE(&context_to_freed, new_entry, link); + } else { + TRACE("Allocating new entry"); + new_entry = malloc(sizeof(*new_entry)); + memset(new_entry->stack, 0, STACK_SIZE); + + new_entry->context.uc_stack.ss_sp = new_entry->stack; + new_entry->context.uc_stack.ss_size = STACK_SIZE; + new_entry->context.uc_stack.ss_flags = 0; + + // Tell Valgrind that the memory area of the future stack is a stack + new_entry->valgrind_id = VALGRIND_STACK_REGISTER( + new_entry->context.uc_stack.ss_sp, + new_entry->context.uc_stack.ss_sp + new_entry->context.uc_stack.ss_size); + + new_entry->id = new_entry; + } getcontext(&new_entry->context); - new_entry->context.uc_stack.ss_sp = new_entry->stack; - new_entry->context.uc_stack.ss_size = STACK_SIZE; - new_entry->context.uc_stack.ss_flags = 0; - - // Tell Valgrind that the memory area of the future stack is a stack - new_entry->valgrind_id = VALGRIND_STACK_REGISTER( - new_entry->context.uc_stack.ss_sp, - new_entry->context.uc_stack.ss_sp + new_entry->context.uc_stack.ss_size); - // Use the entry's memory address as an id. - new_entry->id = new_entry; TRACE("ALLOCATED %p", new_entry); new_entry->status = ALLOCATED; new_entry->retvalue = NULL; @@ -173,29 +184,29 @@ int thread_join(thread_t thread, void** retval) /** Deadlock **/ // if the running thread is solo (not waited by anyone) if(!IS_WAITED(running)) { - // if the thread that we want to join is already in a "group" of waiting threads - if (IS_WAITING(entry)) { - // give the last thread waited to the running thread - running->last_waited = entry->last_waited; - } - else { // the thread we want to join is solo - running->last_waited = malloc(sizeof(struct last_thread_t)); - running->last_waited->ref = 0 ; - entry->last_waited = running->last_waited; - running->last_waited->last_thread = entry; - } - running->last_waited->ref++; + // if the thread that we want to join is already in a "group" of waiting threads + if (IS_WAITING(entry)) { + // give the last thread waited to the running thread + running->last_waited = entry->last_waited; + } + else { // the thread we want to join is solo + running->last_waited = malloc(sizeof(struct last_thread_t)); + running->last_waited->ref = 0 ; + entry->last_waited = running->last_waited; + running->last_waited->last_thread = entry; + } + running->last_waited->ref++; } else { // the running thread is already part of a groupe of waiting threads - if (IS_WAITING(entry)) { // the thread we want to join is part of a groupe of waiting threads - // release the last_waited of this entry - running->last_waited->last_thread = GET_LAST_WAITED_THREAD(entry); - } - else { // the thread we want to join is solo and has no last_waited allocated - running->last_waited->last_thread = entry; - entry->last_waited = running->last_waited; - entry->last_waited->ref ++; - } + if (IS_WAITING(entry)) { // the thread we want to join is part of a groupe of waiting threads + // release the last_waited of this entry + running->last_waited->last_thread = GET_LAST_WAITED_THREAD(entry); + } + else { // the thread we want to join is solo and has no last_waited allocated + running->last_waited->last_thread = entry; + entry->last_waited = running->last_waited; + entry->last_waited->ref ++; + } } @@ -225,9 +236,11 @@ int thread_join(thread_t thread, void** retval) // Clean up DBG("(entry, was_alloacted) : %p,%d", entry, WAS_ALLOCATED(entry)); if (WAS_ALLOCATED(entry)) { - VALGRIND_STACK_DEREGISTER(entry->valgrind_id); + DBG("ADDING (%p) TO FREED TAIL", entry); + TAILQ_INSERT_TAIL(&context_to_freed, entry, link); + } else { + free(entry); } - free(entry); return 0; } @@ -268,6 +281,14 @@ void clear_context(void) } free(last); } + while (!TAILQ_EMPTY(&context_to_freed)) { + last = TAILQ_FIRST(&context_to_freed); + TAILQ_REMOVE(&context_to_freed, last, link); + if (WAS_ALLOCATED(last)) { + VALGRIND_STACK_DEREGISTER(last->valgrind_id); + } + free(last); + } VALGRIND_STACK_DEREGISTER(stack_valgrind_id); exit(0); }