#ifndef USE_PTHREAD #include "thread.h" #include "debug.h" #include "pthread.h" #include #include #include #include #include #include #define FINISHED 0x1 #define IS_FINISHED(entry) (entry->status & FINISHED) #define ALLOCATED 0x2 #define WAS_ALLOCATED(entry) (entry->status & ALLOCATED) #define WAITING 0x4 #define IS_WAITING(entry) (entry->status & WAITING) #define GET_WAITING_THREAD(entry) ((struct context_entry*)entry->waiting) #define IS_WAITING_THREAD_FINISHED(entry) (GET_WAITING_THREAD(entry)->status & FINISHED) #define WAITED 0x8 #define IS_WAITED(entry) (entry->status & WAITED) #ifndef STACK_SIZE #define STACK_SIZE 4096 #endif // Variables used to clean up everything at the end of the processus static char stack_for_freeing[STACK_SIZE] = {0}; static int stack_valgrind_id = 0; static ucontext_t context_for_freeing; struct context_entry { TAILQ_ENTRY(context_entry) link; // Use to navigate inside the list ucontext_t context; thread_t id; void *waiting; void* retvalue; int valgrind_id; char status; char stack[STACK_SIZE]; }; // Use TailQ from queue BSD static TAILQ_HEAD(context_head, context_entry) head = TAILQ_HEAD_INITIALIZER(head); // Current running thread static struct context_entry* running = NULL; int thread_yield(void) { TRACE("thread_yield"); if (TAILQ_EMPTY(&head)) { return 0; } /* Current strategy : * if we have checked the number of threads then keep the running one * otherwise, take the first element of the list should not be null * remove it from the head and put it at the end to take it in the next round * check if the thread is not finished and is not waiting for a non finished thread * check if the thread is not the running one. */ struct context_entry* first = TAILQ_FIRST(&head); if (!first) { return -1; } TAILQ_REMOVE(&head, first, link); if (!IS_FINISHED(running) && !(IS_WAITING(running) && !IS_WAITING_THREAD_FINISHED(running))) { TAILQ_INSERT_TAIL(&head, running, link); } TRACE("PICKING %p (previous was %p)", first->id, running->id); // Switch to the new thread. struct context_entry* old_runner = running; running = first; swapcontext(&old_runner->context, &running->context); return 0; } thread_t thread_self(void) { // This condition should not be true at any moment after main call if (running == NULL) { return 0; } return running->id; } /** * Wrap the function used by the thread to handle `return` statement * without using thread_exit. */ void thread_function_wrapper(void* (*func)(void*), void* funcarg) { TRACE("Wrapper for %p\n", func); thread_exit(func(funcarg)); } /** * Create an entry and put it at the end of the FIFO */ int thread_create(thread_t* newthread, void* (*func)(void*), void* funcarg) { TRACE("Create a new thread that execute function %p", func); struct context_entry* new_entry = malloc(sizeof(*new_entry)); memset(new_entry->stack, 0, STACK_SIZE); getcontext(&new_entry->context); new_entry->context.uc_stack.ss_sp = new_entry->stack; new_entry->context.uc_stack.ss_size = STACK_SIZE; new_entry->context.uc_stack.ss_flags = 0; // Tell Valgrind that the memory area of the future stack is a stack new_entry->valgrind_id = VALGRIND_STACK_REGISTER( new_entry->context.uc_stack.ss_sp, new_entry->context.uc_stack.ss_sp + new_entry->context.uc_stack.ss_size); // Use the entry's memory address as an id. new_entry->id = new_entry; TRACE("ALLOCATED %p", new_entry); new_entry->status = ALLOCATED; new_entry->retvalue = NULL; *newthread = new_entry->id; makecontext(&new_entry->context, (void (*)(void))thread_function_wrapper, 2, func, funcarg); TAILQ_INSERT_TAIL(&head, new_entry, link); return 0; } void print_entry(struct context_entry* entry) { TRACE("CONTEXT (%p, %p, %d);", entry, entry->id, WAS_ALLOCATED(entry)); } int thread_join(thread_t thread, void** retval) { TRACE("Join thread %p", thread); struct context_entry* entry = thread; // Check if the target is not already waited by another if (IS_WAITED(entry) || IS_WAITING(entry) && GET_WAITING_THREAD(entry) == running) { return -1; } if (!IS_FINISHED(entry)) { // Use status to be in waiting state running->status |= WAITING; // Use retvalue to share which thread we are currently waiting for running->waiting = entry; // Mark the waited thread as waited to not be waited by any other thread. entry->status |= WAITED; entry->retvalue = running; do { thread_yield(); } while (!IS_FINISHED(entry)); // Exit from waiting state running->status &= ~WAITING; } // Save returned value if needed TRACE("RETURNING %p IN %p", entry->retvalue, retval); if (retval) *retval = entry->retvalue; // Clean up DBG("(entry, was_alloacted) : %p,%d", entry, WAS_ALLOCATED(entry)); if (WAS_ALLOCATED(entry)) { VALGRIND_STACK_DEREGISTER(entry->valgrind_id); } free(entry); return 0; } void thread_exit(void* retval) { TRACE("Exit thread %p", running); print_entry(running); if (running == NULL) { exit(0); } running->status |= FINISHED; if (IS_WAITED(running)) { // If the thread was waited by another thread, we need to wake it up. struct context_entry* waited = running->retvalue; TAILQ_INSERT_TAIL(&head, waited, link); } running->retvalue = retval; if (!TAILQ_EMPTY(&head)) { thread_yield(); } exit(0); } void clear_context(void) { TRACE("INSIDE CLEAR"); struct context_entry* last = NULL; // Loop over remaining threads to clean them from the heap. while (!TAILQ_EMPTY(&head)) { last = TAILQ_FIRST(&head); TAILQ_REMOVE(&head, last, link); if (WAS_ALLOCATED(last)) { VALGRIND_STACK_DEREGISTER(last->valgrind_id); } if (IS_WAITED(last)) { struct context_entry* waited = last->retvalue; TAILQ_INSERT_TAIL(&head, waited, link); } free(last); } VALGRIND_STACK_DEREGISTER(stack_valgrind_id); exit(0); } void __attribute__((constructor)) setup_main_thread() { TRACE("premain"); // Create an entry for the main thread. struct context_entry* main = malloc(sizeof(*main)); // memset(main, 0, sizeof(*main)); getcontext(&main->context); main->id = main; main->status = 0; main->retvalue = NULL; running = main; // Create a context with static stack to clean everything at the end. getcontext(&context_for_freeing); stack_valgrind_id = VALGRIND_STACK_REGISTER(stack_for_freeing, stack_for_freeing + STACK_SIZE); context_for_freeing.uc_stack.ss_sp = stack_for_freeing; context_for_freeing.uc_stack.ss_size = STACK_SIZE; makecontext(&context_for_freeing, (void (*)(void)) clear_context, 0); } void __attribute__((destructor)) clear_last_thread() { TRACE("POST"); // Running is the initial main thread. No need to switch to a static stack. TAILQ_INSERT_HEAD(&head, running, link); if (!WAS_ALLOCATED(running)) { clear_context(); exit(0); } // Running's stack was allocated by us, lets switch to a static stack first. swapcontext(&running->context, &context_for_freeing); exit(0); } int thread_mutex_init(thread_mutex_t* mutex) { return pthread_mutex_init((pthread_mutex_t*)mutex, NULL); } int thread_mutex_destroy(thread_mutex_t* mutex) { return pthread_mutex_destroy((pthread_mutex_t*)mutex); } int thread_mutex_lock(thread_mutex_t* mutex) { return pthread_mutex_lock((pthread_mutex_t*)mutex); } int thread_mutex_unlock(thread_mutex_t* mutex) { return pthread_mutex_unlock((pthread_mutex_t*)mutex); } #endif