uthread/src/thread/thread.c
2025-03-22 19:03:47 +01:00

249 lines
6.1 KiB
C

#ifndef USE_PTHREAD
#include "thread.h"
#include "debug.h"
#include "pthread.h"
#include <bits/pthreadtypes.h>
#include <stdlib.h>
#include <string.h>
#include <sys/queue.h>
#include <ucontext.h>
#include <valgrind/valgrind.h>
#define FINISHED 0x1
#define IS_FINISHED(entry) (entry->status & FINISHED)
#define ALLOCATED 0x2
#define WAS_ALLOCATED(entry) (entry->status & ALLOCATED)
#define WAITING 0x4
#define IS_WAITING(entry) (entry->status & WAITING)
#define IS_WAITING_THREAD_FINISHED(entry) (((struct context_entry*)entry->retvalue)->status & FINISHED)
#define WAITED 0x8
#define IS_WAITED(entry) (entry->status & WAITED)
#ifndef STACK_SIZE
#define STACK_SIZE 4096
#endif
// Variables to free at the end of the processus
static char stack_for_freeing[STACK_SIZE] = {0};
static int stack_valgrind_id = 0;
static ucontext_t context_for_freeing;
struct context_entry {
TAILQ_ENTRY(context_entry)
link;
ucontext_t context;
thread_t id;
void* retvalue;
int valgrind_id;
char status;
};
static TAILQ_HEAD(context_head, context_entry) head = TAILQ_HEAD_INITIALIZER(head);
static struct context_entry* running = NULL;
static unsigned long long counter = 0;
int thread_yield(void)
{
TRACE("thread_yield");
if (counter <= 1)
return 0;
struct context_entry* first = NULL;
int count = 0;
do {
if (count++ == counter) {
return 0;
}
first = TAILQ_FIRST(&head);
if (!first) {
return -1;
}
TAILQ_REMOVE(&head, first, link);
TAILQ_INSERT_TAIL(&head, first, link);
} while (
IS_FINISHED(first)
|| IS_WAITING(first) && !IS_WAITING_THREAD_FINISHED(first)
|| (first->id == running->id));
TRACE("PICKING %p (previous was %p)", first->id, running->id);
struct context_entry* old_runner = running;
running = first;
swapcontext(&old_runner->context, &running->context);
return 0;
}
thread_t thread_self(void)
{
if (running == NULL) {
return 0;
}
return running->id;
}
void thread_function_wrapper(void* (*func)(void*), void* funcarg)
{
TRACE("Wrapper for %p\n", func);
thread_exit(func(funcarg));
}
int thread_create(thread_t* newthread, void* (*func)(void*), void* funcarg)
{
TRACE("Create a new thread that execute function %p", func);
struct context_entry* new_entry = malloc(sizeof(*new_entry));
memset(new_entry, 0, sizeof(*new_entry));
getcontext(&new_entry->context);
new_entry->context.uc_stack.ss_sp = malloc(STACK_SIZE);
new_entry->context.uc_stack.ss_size = STACK_SIZE;
new_entry->context.uc_stack.ss_flags = 0;
new_entry->valgrind_id = VALGRIND_STACK_REGISTER(
new_entry->context.uc_stack.ss_sp,
new_entry->context.uc_stack.ss_sp + new_entry->context.uc_stack.ss_size);
new_entry->id = new_entry;
TRACE("ALLOCATED %p", new_entry);
new_entry->status = ALLOCATED;
new_entry->retvalue = NULL;
*newthread = new_entry->id;
makecontext(&new_entry->context, (void (*)(void))thread_function_wrapper, 2, func, funcarg);
counter++;
TAILQ_INSERT_TAIL(&head, new_entry, link);
return 0;
}
void print_entry(struct context_entry* entry)
{
TRACE("CONTEXT (%p, %p, %d);", entry, entry->id, WAS_ALLOCATED(entry));
}
int thread_join(thread_t thread, void** retval)
{
TRACE("Join thread %p", thread);
struct context_entry* entry = thread;
if (IS_WAITED(entry)) {
return -1;
}
running->status |= WAITING;
running->retvalue = entry;
entry->status |= WAITED;
while (!IS_FINISHED(entry)) {
thread_yield();
}
running->status &= ~WAITING;
if (retval)
*retval = entry->retvalue;
TAILQ_REMOVE(&head, entry, link);
TRACE("DEBUG %p,%d", entry, WAS_ALLOCATED(entry));
if (WAS_ALLOCATED(entry)) {
VALGRIND_STACK_DEREGISTER(entry->valgrind_id);
free(entry->context.uc_stack.ss_sp);
}
free(entry);
--counter;
TRACE("DEBUG %p,%d", running, WAS_ALLOCATED(running));
return 0;
}
void thread_exit(void* retval)
{
TRACE("Exit thread %p", running);
print_entry(running);
if (running == 0) {
exit(0);
}
running->status |= FINISHED;
running->retvalue = retval;
if (counter > 1) {
thread_yield();
}
exit(0);
}
void clear_context(void)
{
TRACE("INSIDE CLEAR");
struct context_entry* last = NULL;
while (!TAILQ_EMPTY(&head)) {
last = TAILQ_FIRST(&head);
TAILQ_REMOVE(&head, last, link);
if (WAS_ALLOCATED(last)) {
free(last->context.uc_stack.ss_sp);
VALGRIND_STACK_DEREGISTER(last->valgrind_id);
}
free(last);
}
VALGRIND_STACK_DEREGISTER(stack_valgrind_id);
exit(0);
}
void __attribute__((constructor)) setup_main_thread()
{
TRACE("premain");
struct context_entry* main = malloc(sizeof(*main));
memset(main, 0, sizeof(*main));
getcontext(&main->context);
main->id = main;
main->status = 0;
main->retvalue = NULL;
TAILQ_INSERT_TAIL(&head, main, link);
running = main;
counter++;
getcontext(&context_for_freeing);
stack_valgrind_id = VALGRIND_STACK_REGISTER(stack_for_freeing, stack_for_freeing + STACK_SIZE);
context_for_freeing.uc_stack.ss_sp = stack_for_freeing;
context_for_freeing.uc_stack.ss_size = STACK_SIZE;
makecontext(&context_for_freeing, (void (*)(void)) clear_context, 1, running);
}
void __attribute__((destructor)) clear_last_thread()
{
TRACE("POST");
// Running is the initial main thread. Just free the entry.
if (!WAS_ALLOCATED(running)) {
free(running);
exit(0);
}
swapcontext(&running->context, &context_for_freeing);
exit(0);
}
int thread_mutex_init(thread_mutex_t* mutex)
{
return pthread_mutex_init((pthread_mutex_t*)mutex, NULL);
}
int thread_mutex_destroy(thread_mutex_t* mutex)
{
return pthread_mutex_destroy((pthread_mutex_t*)mutex);
}
int thread_mutex_lock(thread_mutex_t* mutex)
{
return pthread_mutex_lock((pthread_mutex_t*)mutex);
}
int thread_mutex_unlock(thread_mutex_t* mutex)
{
return pthread_mutex_unlock((pthread_mutex_t*)mutex);
}
#endif