docs: add comments

This commit is contained in:
Alessandre Laguierce 2025-03-22 20:15:38 +01:00
parent 0b068a07de
commit c3839b140b

View File

@ -24,14 +24,14 @@
#define STACK_SIZE 4096 #define STACK_SIZE 4096
#endif #endif
// Variables to free at the end of the processus // Variables used to clean up everything at the end of the processus
static char stack_for_freeing[STACK_SIZE] = {0}; static char stack_for_freeing[STACK_SIZE] = {0};
static int stack_valgrind_id = 0; static int stack_valgrind_id = 0;
static ucontext_t context_for_freeing; static ucontext_t context_for_freeing;
struct context_entry { struct context_entry {
TAILQ_ENTRY(context_entry) TAILQ_ENTRY(context_entry)
link; link; // Use to navigate inside the list
ucontext_t context; ucontext_t context;
thread_t id; thread_t id;
void* retvalue; void* retvalue;
@ -39,8 +39,11 @@ struct context_entry {
char status; char status;
}; };
// Use TailQ from queue BSD
static TAILQ_HEAD(context_head, context_entry) head = TAILQ_HEAD_INITIALIZER(head); static TAILQ_HEAD(context_head, context_entry) head = TAILQ_HEAD_INITIALIZER(head);
// Current running thread
static struct context_entry* running = NULL; static struct context_entry* running = NULL;
// Thread counter
static unsigned long long counter = 0; static unsigned long long counter = 0;
int thread_yield(void) int thread_yield(void)
@ -49,6 +52,13 @@ int thread_yield(void)
if (counter <= 1) if (counter <= 1)
return 0; return 0;
/* Current strategy :
* if we have checked the number of threads then keep the running one
* otherwise, take the first element of the list should not be null
* remove it from the head and put it at the end to take it in the next round
* check if the thread is not finished and is not waiting for a non finished thread
* check if the thread is not the running one.
*/
struct context_entry* first = NULL; struct context_entry* first = NULL;
int count = 0; int count = 0;
do { do {
@ -69,6 +79,7 @@ int thread_yield(void)
|| (first->id == running->id)); || (first->id == running->id));
TRACE("PICKING %p (previous was %p)", first->id, running->id); TRACE("PICKING %p (previous was %p)", first->id, running->id);
// Switch to the new thread.
struct context_entry* old_runner = running; struct context_entry* old_runner = running;
running = first; running = first;
swapcontext(&old_runner->context, &running->context); swapcontext(&old_runner->context, &running->context);
@ -77,18 +88,26 @@ int thread_yield(void)
thread_t thread_self(void) thread_t thread_self(void)
{ {
// This condition should not be true at any moment after main call
if (running == NULL) { if (running == NULL) {
return 0; return 0;
} }
return running->id; return running->id;
} }
/**
* Wrap the function used by the thread to handle `return` statement
* without using thread_exit.
*/
void thread_function_wrapper(void* (*func)(void*), void* funcarg) void thread_function_wrapper(void* (*func)(void*), void* funcarg)
{ {
TRACE("Wrapper for %p\n", func); TRACE("Wrapper for %p\n", func);
thread_exit(func(funcarg)); thread_exit(func(funcarg));
} }
/**
* Create an entry and put it at the end of the FIFO
*/
int thread_create(thread_t* newthread, void* (*func)(void*), void* funcarg) int thread_create(thread_t* newthread, void* (*func)(void*), void* funcarg)
{ {
TRACE("Create a new thread that execute function %p", func); TRACE("Create a new thread that execute function %p", func);
@ -101,10 +120,12 @@ int thread_create(thread_t* newthread, void* (*func)(void*), void* funcarg)
new_entry->context.uc_stack.ss_size = STACK_SIZE; new_entry->context.uc_stack.ss_size = STACK_SIZE;
new_entry->context.uc_stack.ss_flags = 0; new_entry->context.uc_stack.ss_flags = 0;
// Tell Valgrind that the memory area of the future stack is a stack
new_entry->valgrind_id = VALGRIND_STACK_REGISTER( new_entry->valgrind_id = VALGRIND_STACK_REGISTER(
new_entry->context.uc_stack.ss_sp, new_entry->context.uc_stack.ss_sp,
new_entry->context.uc_stack.ss_sp + new_entry->context.uc_stack.ss_size); new_entry->context.uc_stack.ss_sp + new_entry->context.uc_stack.ss_size);
// Use the entry's memory address as an id.
new_entry->id = new_entry; new_entry->id = new_entry;
TRACE("ALLOCATED %p", new_entry); TRACE("ALLOCATED %p", new_entry);
new_entry->status = ALLOCATED; new_entry->status = ALLOCATED;
@ -128,22 +149,29 @@ int thread_join(thread_t thread, void** retval)
{ {
TRACE("Join thread %p", thread); TRACE("Join thread %p", thread);
struct context_entry* entry = thread; struct context_entry* entry = thread;
// Check if the target is not already waited by an other
if (IS_WAITED(entry)) { if (IS_WAITED(entry)) {
return -1; return -1;
} }
// Use status to be in waiting state
running->status |= WAITING; running->status |= WAITING;
// Use retvalue to share which thread we are currently waiting for
running->retvalue = entry; running->retvalue = entry;
// Mark the waited thread as waited to not be waited by any other thread.
entry->status |= WAITED; entry->status |= WAITED;
while (!IS_FINISHED(entry)) { while (!IS_FINISHED(entry)) {
thread_yield(); thread_yield();
} }
// Exit from waiting state
running->status &= ~WAITING; running->status &= ~WAITING;
// Save returned value if needed
if (retval) if (retval)
*retval = entry->retvalue; *retval = entry->retvalue;
// Clean up
TAILQ_REMOVE(&head, entry, link); TAILQ_REMOVE(&head, entry, link);
TRACE("DEBUG %p,%d", entry, WAS_ALLOCATED(entry)); TRACE("DEBUG %p,%d", entry, WAS_ALLOCATED(entry));
@ -179,6 +207,7 @@ void clear_context(void)
{ {
TRACE("INSIDE CLEAR"); TRACE("INSIDE CLEAR");
struct context_entry* last = NULL; struct context_entry* last = NULL;
// Loop over remaining threads to clean them from the heap.
while (!TAILQ_EMPTY(&head)) { while (!TAILQ_EMPTY(&head)) {
last = TAILQ_FIRST(&head); last = TAILQ_FIRST(&head);
TAILQ_REMOVE(&head, last, link); TAILQ_REMOVE(&head, last, link);
@ -195,6 +224,7 @@ void clear_context(void)
void __attribute__((constructor)) setup_main_thread() void __attribute__((constructor)) setup_main_thread()
{ {
TRACE("premain"); TRACE("premain");
// Create an entry for the main thread.
struct context_entry* main = malloc(sizeof(*main)); struct context_entry* main = malloc(sizeof(*main));
memset(main, 0, sizeof(*main)); memset(main, 0, sizeof(*main));
getcontext(&main->context); getcontext(&main->context);
@ -205,22 +235,24 @@ void __attribute__((constructor)) setup_main_thread()
running = main; running = main;
counter++; counter++;
// Create a context with static stack to clean everything at the end.
getcontext(&context_for_freeing); getcontext(&context_for_freeing);
stack_valgrind_id = VALGRIND_STACK_REGISTER(stack_for_freeing, stack_for_freeing + STACK_SIZE); stack_valgrind_id = VALGRIND_STACK_REGISTER(stack_for_freeing, stack_for_freeing + STACK_SIZE);
context_for_freeing.uc_stack.ss_sp = stack_for_freeing; context_for_freeing.uc_stack.ss_sp = stack_for_freeing;
context_for_freeing.uc_stack.ss_size = STACK_SIZE; context_for_freeing.uc_stack.ss_size = STACK_SIZE;
makecontext(&context_for_freeing, (void (*)(void)) clear_context, 1, running); makecontext(&context_for_freeing, (void (*)(void)) clear_context, 0);
} }
void __attribute__((destructor)) clear_last_thread() void __attribute__((destructor)) clear_last_thread()
{ {
TRACE("POST"); TRACE("POST");
// Running is the initial main thread. Just free the entry. // Running is the initial main thread. No need to switch to a static stack.
if (!WAS_ALLOCATED(running)) { if (!WAS_ALLOCATED(running)) {
clear_context(); clear_context();
exit(0); exit(0);
} }
// Running's stack was allocated by us, lets switch to a static stack first.
swapcontext(&running->context, &context_for_freeing); swapcontext(&running->context, &context_for_freeing);
exit(0); exit(0);
} }