uthread/src/thread/thread.c
2025-05-06 17:47:08 +02:00

403 lines
12 KiB
C

#ifndef USE_PTHREAD
#include "thread.h"
#include "debug.h"
#include <stdlib.h>
#include <string.h>
#include <sys/queue.h>
#include <ucontext.h>
#include <valgrind/valgrind.h>
#include "ufd.h"
#include <errno.h>
#define HAS_STATUS(entry, value) ((entry->status) & (value))
#define SET_STATUS(entry, value) ((entry->status) |= (value))
#define UNSET_STATUS(entry, value) ((entry->status) &= ~(value))
#define FINISHED (1 << 0)
#define IS_FINISHED(entry) (HAS_STATUS(entry, FINISHED))
#define IS_MAIN(entry) (entry == main_thread)
#define WAITING (1 << 2)
#define IS_WAITING(entry) (HAS_STATUS(entry, WAITING))
#define WAITED (1 << 3)
#define MUTEX_WAITING (1 << 4)
#define MUTEX_LOCKING (1 << 5)
#define MUTEX_MAXPRIO 2
#define IS_WAITED(entry) (HAS_STATUS(entry, WAITED))
#define IS_MUTEX_WAITING(entry) (HAS_STATUS(entry, MUTEX_WAITING))
#ifdef FIBO_STRAT
#define YIELD (1 << 6)
#define IS_YIELD(entry) (HAS_STATUS(entry, YIELD))
#endif
#ifndef STACK_SIZE
#define STACK_SIZE 1024
#endif
// Variables used to clean up everything at the end of the processus
static char stack_for_freeing[STACK_SIZE] = {0};
static int stack_valgrind_id = 0;
static ucontext_t context_for_freeing;
static struct context_entry_t* main_thread = NULL;
struct mutex_fifo_entry_t;
struct context_entry_t {
TAILQ_ENTRY(context_entry_t)
link; // Use to navigate inside the list
ucontext_t context;
void *retvalue; // return value or if the thread is waited, the id of the thread that wait for it
struct ufd_t waited_threads;
struct mutex_fifo_entry_t mutex_fifo_entry;
int valgrind_id;
int status;
int mutex_prio;
char stack[STACK_SIZE];
};
// Current running thread
static struct context_entry_t* running = NULL;
TAILQ_HEAD(scheduler_fifo_t, context_entry_t);
static struct scheduler_fifo_t scheduler_fifo = TAILQ_HEAD_INITIALIZER(scheduler_fifo);
// Linked list used to store what's needed to be freed at the very end
TAILQ_HEAD(available_threads_fifo, context_entry_t);
static struct available_threads_fifo available_threads = TAILQ_HEAD_INITIALIZER(available_threads);
int thread_yield(void)
{
//TRACE("thread_yield");
if (TAILQ_EMPTY(&scheduler_fifo))
return 0;
if (HAS_STATUS(running, MUTEX_LOCKING)) {
if (running->mutex_prio > 0) {
running->mutex_prio--;
DBG("skip yield : running thread is locking a mutex");
return 0;
}
else
running->mutex_prio = MUTEX_MAXPRIO;
}
#ifdef FIBO_STRAT
if (!(IS_YIELD(running)) && !IS_FINISHED(running) && !IS_WAITING(running)) {
SET_STATUS(running, YIELD);
return 0;
}
#endif
struct context_entry_t* first = TAILQ_FIRST(&scheduler_fifo);
TAILQ_REMOVE(&scheduler_fifo, first, link);
if (!IS_FINISHED(running) && !IS_WAITING(running) && !IS_MUTEX_WAITING(running))
TAILQ_INSERT_TAIL(&scheduler_fifo, running, link);
TRACE("PICKING %p (previous was %p)", first, running);
// Switch to the new thread.
struct context_entry_t* old_runner = running;
running = first;
swapcontext(&old_runner->context, &running->context);
return 0;
}
thread_t thread_self(void)
{
return running;
}
/**
* Wrap the function used by the thread to handle `return` statement
* without using thread_exit.
*/
void thread_function_wrapper(void* (*func)(void*), void* funcarg)
{
TRACE("Wrapper for %p\n", func);
thread_exit(func(funcarg));
}
/**
* Create an entry and put it at the end of the FIFO
*/
int thread_create(thread_t* newthread, void* (*func)(void*), void* funcarg)
{
DBG("Create a new thread that execute function %p", func);
struct context_entry_t* new_entry;
TRACE("Checking for previous allocated entry");
if (!TAILQ_EMPTY(&available_threads)) {
TRACE("TAKE THREAD AVAILABLE");
new_entry = TAILQ_FIRST(&available_threads);
TAILQ_REMOVE(&available_threads, new_entry, link);
} else {
TRACE("Allocating new entry");
new_entry = malloc(sizeof(*new_entry));
memset(new_entry->stack, 0, STACK_SIZE);
new_entry->context.uc_stack.ss_sp = new_entry->stack;
new_entry->context.uc_stack.ss_size = STACK_SIZE;
new_entry->context.uc_stack.ss_flags = 0;
// Tell Valgrind that the memory area of the future stack is a stack
new_entry->valgrind_id = VALGRIND_STACK_REGISTER(
new_entry->context.uc_stack.ss_sp,
new_entry->context.uc_stack.ss_sp + new_entry->context.uc_stack.ss_size);
}
getcontext(&new_entry->context);
// Use the entry's memory address as an id.
TRACE("ALLOCATED %p", new_entry);
new_entry->status = 0;
new_entry->retvalue = NULL;
ufd__init(&new_entry->waited_threads, new_entry);
new_entry->mutex_prio = MUTEX_MAXPRIO;
*newthread = new_entry;
makecontext(&new_entry->context, (void (*)(void))thread_function_wrapper, 2, func, funcarg);
#ifdef FIBO_STRAT
TAILQ_INSERT_HEAD(&scheduler_fifo, new_entry, link);
#else
TAILQ_INSERT_TAIL(&scheduler_fifo, new_entry, link);
#endif
return 0;
}
void print_entry(struct context_entry_t* entry)
{
TRACE("CONTEXT (%p, %p, %d);", entry, entry, IS_MAIN(entry));
}
int thread_join(thread_t thread, void** retval)
{
TRACE("Join thread %p", thread);
struct context_entry_t* entry = thread;
// Check if the target is not already waited by another
if (IS_WAITED(entry))
return -1;
struct context_entry_t* entry_last_waited = ufd__find(&entry->waited_threads)->thread;
if (entry_last_waited == running) {
TRACE("Deadlock detected");
return EDEADLK;
}
if (!IS_FINISHED(entry)) {
DBG("%p is waiting for %p", running, entry);
// Use status to be in waiting state
SET_STATUS(running, WAITING);
SET_STATUS(entry, WAITED);
// Use retvalue to share which thread is currently waiting for this thread
entry->retvalue = running;
ufd__join(&running->waited_threads, &entry->waited_threads);
#ifdef FIBO_STRAT
#else
// Opti: prioritize the scheduling of the most waited thread
struct context_entry_t* running_last_waited = ufd__find(&running->waited_threads)->thread;
// theorically, this thread can be finished and on the way of being joined
if (!IS_FINISHED(running_last_waited) && !IS_MUTEX_WAITING(running_last_waited)) {
TAILQ_REMOVE(&scheduler_fifo, running_last_waited, link);
TAILQ_INSERT_HEAD(&scheduler_fifo, running_last_waited, link);
}
#endif
do {
thread_yield();
} while (!IS_FINISHED(entry));
ufd__delete(&entry->waited_threads);
}
// Save returned value if needed
TRACE("RETURNING %p IN %p", entry->retvalue, retval);
if (retval)
*retval = entry->retvalue;
UNSET_STATUS(entry, WAITED);
DBG("(entry, was_alloacted) : %p,%d", entry, IS_MAIN(entry));
if (!IS_MAIN(entry))
TAILQ_INSERT_TAIL(&available_threads, entry, link);
return 0;
}
void thread_exit(void* retval)
{
TRACE("Exit thread %p", running);
print_entry(running);
if (IS_WAITED(running)) {
// If the thread was waited by another thread, we need to wake it up.
struct context_entry_t* waiting = running->retvalue;
UNSET_STATUS(waiting, WAITING);
TRACE("WAS WAITED BY %p", waiting);
#ifdef FIBO_STRAT
TAILQ_INSERT_HEAD(&scheduler_fifo, waiting, link);
#else
TAILQ_INSERT_HEAD(&scheduler_fifo, waiting, link);
#endif
}
running->retvalue = retval;
SET_STATUS(running, FINISHED);
while (!TAILQ_EMPTY(&scheduler_fifo))
thread_yield();
exit(0);
}
void clear_context(void)
{
TRACE("INSIDE CLEAR");
struct context_entry_t* last = NULL;
// Loop over remaining threads to clean them from the heap.
while (!TAILQ_EMPTY(&scheduler_fifo)) {
last = TAILQ_FIRST(&scheduler_fifo);
TAILQ_REMOVE(&scheduler_fifo, last, link);
if (!IS_MAIN(last))
VALGRIND_STACK_DEREGISTER(last->valgrind_id);
if (IS_WAITED(last)) {
struct context_entry_t* waiting = last->retvalue;
TAILQ_INSERT_TAIL(&scheduler_fifo, waiting, link);
}
free(last);
}
while (!TAILQ_EMPTY(&available_threads)) {
last = TAILQ_FIRST(&available_threads);
TAILQ_REMOVE(&available_threads, last, link);
if (IS_MAIN(last))
continue;
VALGRIND_STACK_DEREGISTER(last->valgrind_id);
free(last);
}
VALGRIND_STACK_DEREGISTER(stack_valgrind_id);
exit(0);
}
void __attribute__((constructor)) setup_main_thread()
{
TRACE("premain");
// Create an entry for the main thread.
struct context_entry_t *new_entry;
for (int i = 0; i < 2000; ++i) {
new_entry = malloc(sizeof(*new_entry));
memset(new_entry->stack, 0, STACK_SIZE);
new_entry->context.uc_stack.ss_sp = new_entry->stack;
new_entry->context.uc_stack.ss_size = STACK_SIZE;
new_entry->context.uc_stack.ss_flags = 0;
// Tell Valgrind that the memory area of the future stack is a stack
new_entry->valgrind_id = VALGRIND_STACK_REGISTER(
new_entry->context.uc_stack.ss_sp,
new_entry->context.uc_stack.ss_sp + new_entry->context.uc_stack.ss_size);
ufd__init(&new_entry->waited_threads, new_entry);
new_entry->retvalue = NULL;
new_entry->status = 0;
TAILQ_INSERT_TAIL(&available_threads, new_entry, link);
}
main_thread = malloc(sizeof(*main_thread));
// memset(main_thread, 0, sizeof(*main));
getcontext(&main_thread->context);
main_thread->status = 0;
main_thread->valgrind_id = 0;
main_thread->retvalue = NULL;
ufd__init(&main_thread->waited_threads, main_thread);
running = main_thread;
// Create a context with static stack to clean everything at the end.
getcontext(&context_for_freeing);
stack_valgrind_id = VALGRIND_STACK_REGISTER(stack_for_freeing, stack_for_freeing + STACK_SIZE);
context_for_freeing.uc_stack.ss_sp = stack_for_freeing;
context_for_freeing.uc_stack.ss_size = STACK_SIZE;
makecontext(&context_for_freeing, (void (*)(void)) clear_context, 0);
}
void __attribute__((destructor)) clear_last_thread()
{
TRACE("POST");
// Running is the initial main thread. No need to switch to a static stack.
TAILQ_INSERT_HEAD(&scheduler_fifo, running, link);
if (IS_MAIN(running)) {
clear_context();
exit(0);
}
free(main_thread);
// Running's stack was allocated by us, lets switch to a static stack first.
swapcontext(&running->context, &context_for_freeing);
exit(0);
}
int thread_mutex_init(thread_mutex_t* mutex)
{
STAILQ_INIT(&mutex->fifo);
return mutex->dummy = 0;
}
int thread_mutex_destroy(thread_mutex_t* mutex)
{
return 0;
}
int thread_mutex_lock(thread_mutex_t* mutex)
{
// Add to mutex fifo
DBG("Lock mutex %p\n", mutex);
while (! __sync_bool_compare_and_swap(&mutex->dummy, 0, 1))
{
DBG("Wait for mutex %p\n", mutex);
STAILQ_INSERT_TAIL(&mutex->fifo, &running->mutex_fifo_entry, link);
// Use status to be in waiting state
SET_STATUS(running, MUTEX_WAITING);
running->mutex_fifo_entry.thread = running;
thread_yield();
}
SET_STATUS(running, MUTEX_LOCKING);
running->mutex_prio = MUTEX_MAXPRIO;
mutex->dummy = 1;
return 0;
}
int thread_mutex_unlock(thread_mutex_t* mutex)
{
DBG("Unlock mutex %p\n", mutex);
if (!STAILQ_EMPTY(&mutex->fifo))
{
struct mutex_fifo_entry_t* first = STAILQ_FIRST(&mutex->fifo);
STAILQ_REMOVE_HEAD(&mutex->fifo, link);
UNSET_STATUS(first->thread, MUTEX_WAITING);
TAILQ_INSERT_TAIL(&scheduler_fifo, first->thread, link);
}
mutex->dummy = 0;
UNSET_STATUS(running, MUTEX_LOCKING);
return 0;
}
#endif