436 lines
14 KiB
C
436 lines
14 KiB
C
#ifndef USE_PTHREAD
|
|
|
|
#include "thread.h"
|
|
#include "debug.h"
|
|
#include <stdlib.h>
|
|
#include <string.h>
|
|
#include <sys/queue.h>
|
|
#include <ucontext.h>
|
|
#include <valgrind/valgrind.h>
|
|
#include <errno.h>
|
|
|
|
#define FINISHED 0x1
|
|
#define IS_FINISHED(entry) (entry->status & FINISHED)
|
|
#define ALLOCATED 0x2
|
|
#define WAS_ALLOCATED(entry) (entry->status & ALLOCATED)
|
|
#define WAITING 0x4
|
|
#define IS_WAITING(entry) (entry->status & WAITING)
|
|
#define GET_LAST_WAITED_THREAD(entry) (entry->last_waited ? entry->last_waited->last_thread : NULL)
|
|
#define WAITED 0x8
|
|
#define YIELD (char) 0x20
|
|
#define MUTEX_WAITING (char) 0x10
|
|
#define IS_WAITED(entry) (entry->status & WAITED)
|
|
#define IS_MUTEX_WAITING(entry) (entry->status & MUTEX_WAITING)
|
|
|
|
#ifndef STACK_SIZE
|
|
#define STACK_SIZE 4096
|
|
#endif
|
|
|
|
// Variables used to clean up everything at the end of the processes
|
|
#ifndef HASHMAP_SIZE
|
|
#define HASHMAP_SIZE 16384
|
|
#endif
|
|
|
|
// Variables used to clean up everything at the end of the processus
|
|
static char stack_for_freeing[STACK_SIZE] = {0};
|
|
static int stack_valgrind_id = 0;
|
|
static ucontext_t context_for_freeing;
|
|
|
|
struct last_thread_t;
|
|
struct mutex_fifo_entry_t;
|
|
|
|
struct context_entry {
|
|
TAILQ_ENTRY(context_entry)
|
|
link; // Use to navigate inside the list
|
|
ucontext_t context;
|
|
void *retvalue; // return value or if the thread is waited, the id of the thread that wait for it
|
|
struct last_thread_t *last_waited;
|
|
struct mutex_fifo_entry_t* mutex_fifo_entry;
|
|
int valgrind_id;
|
|
char status;
|
|
};
|
|
|
|
struct last_thread_t {
|
|
STAILQ_ENTRY(last_thread_t)
|
|
link;
|
|
struct context_entry * last_thread;
|
|
int ref; // number of reference to this struct (for free)
|
|
};
|
|
|
|
// Use TailQ from queue BSD
|
|
static TAILQ_HEAD(context_head, context_entry) head = TAILQ_HEAD_INITIALIZER(head);
|
|
// Current running thread
|
|
static struct context_entry* running = NULL;
|
|
|
|
static TAILQ_HEAD(freed_context_head, context_entry) context_to_freed = TAILQ_HEAD_INITIALIZER(context_to_freed);
|
|
static STAILQ_HEAD(last_thread_head, last_thread_t) last_thread_freed = STAILQ_HEAD_INITIALIZER(last_thread_freed);
|
|
|
|
|
|
struct mutex_fifo_entry_t {
|
|
STAILQ_ENTRY(mutex_fifo_entry_t) link;
|
|
struct context_entry* thread;
|
|
};
|
|
STAILQ_HEAD(mutex_fifo, mutex_fifo_entry_t) mutex_fifo;
|
|
static struct mutex_fifo* mutex_fifo_hashmap[HASHMAP_SIZE] = {};
|
|
|
|
int thread_yield(void)
|
|
{
|
|
//TRACE("thread_yield");
|
|
if (TAILQ_EMPTY(&head)) {
|
|
return 0;
|
|
}
|
|
if (!(running->status & YIELD) && !IS_FINISHED(running)) {
|
|
running->status |= YIELD;
|
|
return 0;
|
|
}
|
|
|
|
/* Current strategy :
|
|
* if we have checked the number of threads then keep the running one
|
|
* otherwise, take the first element of the list should not be null
|
|
* remove it from the head and put it at the end to take it in the next round
|
|
* check if the thread is not finished and is not waiting for a non finished thread
|
|
* check if the thread is not the running one.
|
|
*/
|
|
struct context_entry* first = TAILQ_FIRST(&head);
|
|
TAILQ_REMOVE(&head, first, link);
|
|
if (!IS_FINISHED(running) && !IS_WAITING(running) && !IS_MUTEX_WAITING(running)) {
|
|
TAILQ_INSERT_TAIL(&head, running, link);
|
|
}
|
|
TRACE("PICKING %p (previous was %p)", first, running);
|
|
// Switch to the new thread.
|
|
struct context_entry* old_runner = running;
|
|
running = first;
|
|
swapcontext(&old_runner->context, &running->context);
|
|
return 0;
|
|
}
|
|
|
|
thread_t thread_self(void)
|
|
{
|
|
// This condition should not be true at any moment after main call
|
|
if (running == NULL) {
|
|
return 0;
|
|
}
|
|
return running;
|
|
}
|
|
|
|
/**
|
|
* Wrap the function used by the thread to handle `return` statement
|
|
* without using thread_exit.
|
|
*/
|
|
void thread_function_wrapper(void* (*func)(void*), void* funcarg)
|
|
{
|
|
TRACE("Wrapper for %p\n", func);
|
|
thread_exit(func(funcarg));
|
|
}
|
|
|
|
/**
|
|
* Create an entry and put it at the end of the FIFO
|
|
*/
|
|
int thread_create(thread_t* newthread, void* (*func)(void*), void* funcarg)
|
|
{
|
|
DBG("Create a new thread that execute function %p", func);
|
|
struct context_entry* new_entry;
|
|
TRACE("Checking for previous allocated entry");
|
|
if (!TAILQ_EMPTY(&context_to_freed)) {
|
|
new_entry = TAILQ_FIRST(&context_to_freed);
|
|
TAILQ_REMOVE(&context_to_freed, new_entry, link);
|
|
} else {
|
|
TRACE("Allocating new entry");
|
|
new_entry = malloc(sizeof(*new_entry));
|
|
|
|
new_entry->context.uc_stack.ss_sp = malloc(STACK_SIZE);
|
|
memset(new_entry->context.uc_stack.ss_sp, 0, STACK_SIZE);
|
|
new_entry->context.uc_stack.ss_size = STACK_SIZE;
|
|
new_entry->context.uc_stack.ss_flags = 0;
|
|
|
|
// Tell Valgrind that the memory area of the future stack is a stack
|
|
new_entry->valgrind_id = VALGRIND_STACK_REGISTER(
|
|
new_entry->context.uc_stack.ss_sp,
|
|
new_entry->context.uc_stack.ss_sp + new_entry->context.uc_stack.ss_size);
|
|
}
|
|
|
|
getcontext(&new_entry->context);
|
|
|
|
// Use the entry's memory address as an id.
|
|
TRACE("ALLOCATED %p", new_entry);
|
|
new_entry->status = ALLOCATED;
|
|
new_entry->retvalue = NULL;
|
|
new_entry->last_waited = NULL;
|
|
new_entry->mutex_fifo_entry = NULL;
|
|
|
|
*newthread = new_entry;
|
|
|
|
makecontext(&new_entry->context, (void (*)(void))thread_function_wrapper, 2, func, funcarg);
|
|
|
|
TAILQ_INSERT_TAIL(&head, new_entry, link);
|
|
return 0;
|
|
}
|
|
|
|
void print_entry(struct context_entry* entry)
|
|
{
|
|
TRACE("CONTEXT (%p, %p, %d);", entry, entry, WAS_ALLOCATED(entry));
|
|
}
|
|
|
|
int thread_join(thread_t thread, void** retval)
|
|
{
|
|
TRACE("Join thread %p", thread);
|
|
struct context_entry* entry = thread;
|
|
// Check if the target is not already waited by another
|
|
if (IS_WAITED(entry)) {
|
|
return -1;
|
|
}
|
|
|
|
if(GET_LAST_WAITED_THREAD(entry) == running) {
|
|
TRACE("Deadlock detected");
|
|
return EDEADLK;
|
|
}
|
|
|
|
if (!IS_FINISHED(entry)) {
|
|
// Use status to be in waiting state
|
|
running->status |= WAITING;
|
|
// Mark the waited thread as waited to not be waited by any other thread.
|
|
entry->status |= WAITED;
|
|
// Use retvalue to share which thread is currently waiting for this thread
|
|
entry->retvalue = running;
|
|
|
|
/** Deadlock **/
|
|
// if the running thread is solo (not waited by anyone)
|
|
if(!IS_WAITED(running)) {
|
|
// if the thread that we want to join is already in a "group" of waiting threads
|
|
if (IS_WAITING(entry)) {
|
|
// give the last thread waited to the running thread
|
|
running->last_waited = entry->last_waited;
|
|
}
|
|
else { // the thread we want to join is solo
|
|
if (STAILQ_EMPTY(&last_thread_freed)) {
|
|
running->last_waited = malloc(sizeof(struct last_thread_t));
|
|
} else {
|
|
running->last_waited = STAILQ_FIRST(&last_thread_freed);
|
|
STAILQ_REMOVE_HEAD(&last_thread_freed, link);
|
|
}
|
|
running->last_waited->ref = 0 ;
|
|
entry->last_waited = running->last_waited;
|
|
running->last_waited->last_thread = entry;
|
|
}
|
|
running->last_waited->ref++;
|
|
|
|
} else { // the running thread is already part of a groupe of waiting threads
|
|
if (IS_WAITING(entry)) { // the thread we want to join is part of a groupe of waiting threads
|
|
// release the last_waited of this entry
|
|
running->last_waited->last_thread = GET_LAST_WAITED_THREAD(entry);
|
|
}
|
|
else { // the thread we want to join is solo and has no last_waited allocated
|
|
running->last_waited->last_thread = entry;
|
|
entry->last_waited = running->last_waited;
|
|
entry->last_waited->ref ++;
|
|
}
|
|
|
|
}
|
|
|
|
|
|
DBG("%p is waiting for %p", running, entry);
|
|
if (!IS_MUTEX_WAITING(GET_LAST_WAITED_THREAD(running))) {
|
|
TAILQ_REMOVE(&head, GET_LAST_WAITED_THREAD(running), link);
|
|
TAILQ_INSERT_HEAD(&head, GET_LAST_WAITED_THREAD(running), link);
|
|
}
|
|
do {
|
|
thread_yield();
|
|
} while (!IS_FINISHED(entry));
|
|
// Exit from waiting state
|
|
running->status &= ~WAITING;
|
|
|
|
if (running->last_waited) {
|
|
// Release the last waited thread if no one use it anymore
|
|
DBG("Last waited ref : %d", running->last_waited->ref);
|
|
if (--running->last_waited->ref == 0) {
|
|
STAILQ_INSERT_TAIL(&last_thread_freed, running->last_waited, link);
|
|
// free(running->last_waited);
|
|
}
|
|
running->last_waited = NULL;
|
|
}
|
|
}
|
|
|
|
// Save returned value if needed
|
|
TRACE("RETURNING %p IN %p", entry->retvalue, retval);
|
|
if (retval)
|
|
*retval = entry->retvalue;
|
|
|
|
// Clean up
|
|
DBG("(entry, was_alloacted) : %p,%d", entry, WAS_ALLOCATED(entry));
|
|
if (WAS_ALLOCATED(entry)) {
|
|
DBG("ADDING (%p) TO FREED TAIL", entry);
|
|
TAILQ_INSERT_TAIL(&context_to_freed, entry, link);
|
|
} else {
|
|
free(entry->context.uc_stack.ss_sp);
|
|
free(entry);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
void thread_exit(void* retval)
|
|
{
|
|
TRACE("Exit thread %p", running);
|
|
print_entry(running);
|
|
|
|
running->status |= FINISHED;
|
|
if (IS_WAITED(running)) {
|
|
// If the thread was waited by another thread, we need to wake it up.
|
|
struct context_entry* waited = running->retvalue;
|
|
TAILQ_INSERT_TAIL(&head, waited, link);
|
|
}
|
|
running->retvalue = retval;
|
|
|
|
if (!TAILQ_EMPTY(&head)) {
|
|
thread_yield();
|
|
}
|
|
exit(0);
|
|
}
|
|
|
|
void clear_context(void)
|
|
{
|
|
TRACE("INSIDE CLEAR");
|
|
struct context_entry* last = NULL;
|
|
// Loop over remaining threads to clean them from the heap.
|
|
while (!TAILQ_EMPTY(&head)) {
|
|
last = TAILQ_FIRST(&head);
|
|
TAILQ_REMOVE(&head, last, link);
|
|
free(last->mutex_fifo_entry);
|
|
if (WAS_ALLOCATED(last)) {
|
|
free(last->context.uc_stack.ss_sp);
|
|
VALGRIND_STACK_DEREGISTER(last->valgrind_id);
|
|
}
|
|
if (IS_WAITED(last)) {
|
|
struct context_entry* waited = last->retvalue;
|
|
TAILQ_INSERT_TAIL(&head, waited, link);
|
|
}
|
|
free(last);
|
|
}
|
|
while (!TAILQ_EMPTY(&context_to_freed)) {
|
|
last = TAILQ_FIRST(&context_to_freed);
|
|
free(last->mutex_fifo_entry);
|
|
TAILQ_REMOVE(&context_to_freed, last, link);
|
|
if (WAS_ALLOCATED(last)) {
|
|
free(last->context.uc_stack.ss_sp);
|
|
VALGRIND_STACK_DEREGISTER(last->valgrind_id);
|
|
}
|
|
free(last);
|
|
}
|
|
|
|
struct last_thread_t* last_thread;
|
|
while (!STAILQ_EMPTY(&last_thread_freed)) {
|
|
last_thread = STAILQ_FIRST(&last_thread_freed);
|
|
STAILQ_REMOVE_HEAD(&last_thread_freed, link);
|
|
free(last_thread);
|
|
}
|
|
|
|
// Free all the fifo that might have been allocated
|
|
for (int i = 0 ; i < HASHMAP_SIZE ; ++i)
|
|
free(mutex_fifo_hashmap[i]);
|
|
|
|
VALGRIND_STACK_DEREGISTER(stack_valgrind_id);
|
|
exit(0);
|
|
}
|
|
|
|
void __attribute__((constructor)) setup_main_thread()
|
|
{
|
|
TRACE("premain");
|
|
// Create an entry for the main thread.
|
|
struct context_entry* main = malloc(sizeof(*main));
|
|
// memset(main, 0, sizeof(*main));
|
|
getcontext(&main->context);
|
|
main->status = 0;
|
|
main->retvalue = NULL;
|
|
main->last_waited = NULL;
|
|
main->mutex_fifo_entry = NULL;
|
|
running = main;
|
|
|
|
// Create a context with static stack to clean everything at the end.
|
|
getcontext(&context_for_freeing);
|
|
stack_valgrind_id = VALGRIND_STACK_REGISTER(stack_for_freeing, stack_for_freeing + STACK_SIZE);
|
|
context_for_freeing.uc_stack.ss_sp = stack_for_freeing;
|
|
context_for_freeing.uc_stack.ss_size = STACK_SIZE;
|
|
makecontext(&context_for_freeing, (void (*)(void)) clear_context, 0);
|
|
}
|
|
|
|
void __attribute__((destructor)) clear_last_thread()
|
|
{
|
|
TRACE("POST");
|
|
// Running is the initial main thread. No need to switch to a static stack.
|
|
TAILQ_INSERT_HEAD(&head, running, link);
|
|
if (!WAS_ALLOCATED(running)) {
|
|
clear_context();
|
|
exit(0);
|
|
}
|
|
|
|
// Running's stack was allocated by us, lets switch to a static stack first.
|
|
swapcontext(&running->context, &context_for_freeing);
|
|
exit(0);
|
|
}
|
|
|
|
|
|
int thread_mutex_init(thread_mutex_t* mutex)
|
|
{
|
|
long id = ((long)mutex) % HASHMAP_SIZE;
|
|
if (mutex_fifo_hashmap[id] == NULL)
|
|
{
|
|
mutex_fifo_hashmap[id] = malloc(sizeof(mutex_fifo));
|
|
STAILQ_INIT(mutex_fifo_hashmap[id]);
|
|
}
|
|
|
|
return mutex->dummy = 0;
|
|
}
|
|
|
|
int thread_mutex_destroy(thread_mutex_t* mutex)
|
|
{
|
|
long id = ((long)mutex) % HASHMAP_SIZE;
|
|
struct mutex_fifo_entry_t* last = NULL;
|
|
|
|
while (!STAILQ_EMPTY(mutex_fifo_hashmap[id])) {
|
|
last = STAILQ_FIRST(mutex_fifo_hashmap[id]);
|
|
STAILQ_REMOVE_HEAD(mutex_fifo_hashmap[id], link);
|
|
free(last);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int thread_mutex_lock(thread_mutex_t* mutex)
|
|
{
|
|
// Add to mutex fifo
|
|
long id = ((long)mutex) % HASHMAP_SIZE;
|
|
DBG("Lock mutex %d\n", id);
|
|
while (! __sync_bool_compare_and_swap(&mutex->dummy, 0, 1))
|
|
{
|
|
DBG("Wait for mutex %d\n", id);
|
|
if (running->mutex_fifo_entry == NULL)
|
|
running->mutex_fifo_entry = malloc(sizeof(struct mutex_fifo_entry_t));
|
|
|
|
STAILQ_INSERT_TAIL(mutex_fifo_hashmap[id], running->mutex_fifo_entry, link);
|
|
// Use status to be in waiting state
|
|
running->status |= MUTEX_WAITING;
|
|
running->mutex_fifo_entry->thread = running;
|
|
thread_yield();
|
|
}
|
|
|
|
mutex->dummy = 1;
|
|
return 0;
|
|
}
|
|
|
|
int thread_mutex_unlock(thread_mutex_t* mutex)
|
|
{
|
|
long id = ((long)mutex) % HASHMAP_SIZE;
|
|
DBG("Unlock mutex %d\n", id);
|
|
if (!STAILQ_EMPTY(mutex_fifo_hashmap[id]))
|
|
{
|
|
struct mutex_fifo_entry_t* first = STAILQ_FIRST(mutex_fifo_hashmap[id]);
|
|
STAILQ_REMOVE_HEAD(mutex_fifo_hashmap[id], link);
|
|
|
|
first->thread->status &= ~MUTEX_WAITING;
|
|
TAILQ_INSERT_TAIL(&head, first->thread, link);
|
|
}
|
|
|
|
return mutex->dummy = 0;
|
|
}
|
|
#endif
|