diff --git a/Makefile b/Makefile index 2fbc4be..09c7f1e 100644 --- a/Makefile +++ b/Makefile @@ -16,8 +16,8 @@ bins+=31-switch-many bins+=32-switch-many-join bins+=33-switch-many-cascade bins+=51-fibonacci -#bins+=61-mutex -#bins+=62-mutex +bins+=61-mutex +bins+=62-mutex bins+=71-preemption bins+=81-deadlock diff --git a/src/thread/thread.c b/src/thread/thread.c index 59d2813..56871a0 100644 --- a/src/thread/thread.c +++ b/src/thread/thread.c @@ -2,8 +2,6 @@ #include "thread.h" #include "debug.h" -#include "pthread.h" -#include #include #include #include @@ -19,19 +17,26 @@ #define IS_WAITING(entry) (entry->status & WAITING) #define GET_LAST_WAITED_THREAD(entry) (entry->last_waited ? entry->last_waited->last_thread : NULL) #define WAITED 0x8 +#define MUTEX_WAITING 0xf0 #define IS_WAITED(entry) (entry->status & WAITED) +#define IS_MUTEX_WAITING(entry) (entry->status & MUTEX_WAITING) #ifndef STACK_SIZE #define STACK_SIZE 4096 #endif // Variables used to clean up everything at the end of the processes +#ifndef HASHMAP_SIZE +#define HASHMAP_SIZE 16384 +#endif + +// Variables used to clean up everything at the end of the processus static char stack_for_freeing[STACK_SIZE] = {0}; static int stack_valgrind_id = 0; static ucontext_t context_for_freeing; struct last_thread_t; - +struct mutex_fifo_entry_t; struct context_entry { TAILQ_ENTRY(context_entry) @@ -39,6 +44,7 @@ struct context_entry { ucontext_t context; void *retvalue; // return value or if the thread is waited, the id of the thread that wait for it struct last_thread_t *last_waited; + struct mutex_fifo_entry_t* mutex_fifo_entry; int valgrind_id; char status; char stack[STACK_SIZE]; @@ -56,6 +62,14 @@ static struct context_entry* running = NULL; static TAILQ_HEAD(freed_context_head, context_entry) context_to_freed = TAILQ_HEAD_INITIALIZER(context_to_freed); + +struct mutex_fifo_entry_t { + STAILQ_ENTRY(mutex_fifo_entry_t) link; + struct context_entry* thread; +}; +STAILQ_HEAD(mutex_fifo, mutex_fifo_entry_t) mutex_fifo; +static struct mutex_fifo* mutex_fifo_hashmap[HASHMAP_SIZE] = {}; + int thread_yield(void) { //TRACE("thread_yield"); @@ -72,7 +86,7 @@ int thread_yield(void) */ struct context_entry* first = TAILQ_FIRST(&head); TAILQ_REMOVE(&head, first, link); - if (!IS_FINISHED(running) && !IS_WAITING(running)) { + if (!IS_FINISHED(running) && !IS_WAITING(running) && !IS_MUTEX_WAITING(running)) { TAILQ_INSERT_TAIL(&head, running, link); } TRACE("PICKING %p (previous was %p)", first, running); @@ -135,6 +149,7 @@ int thread_create(thread_t* newthread, void* (*func)(void*), void* funcarg) new_entry->status = ALLOCATED; new_entry->retvalue = NULL; new_entry->last_waited = NULL; + new_entry->mutex_fifo_entry = NULL; *newthread = new_entry; @@ -202,8 +217,10 @@ int thread_join(thread_t thread, void** retval) DBG("%p is waiting for %p", running, entry); - TAILQ_REMOVE(&head, GET_LAST_WAITED_THREAD(running), link); - TAILQ_INSERT_HEAD(&head, GET_LAST_WAITED_THREAD(running), link); + if (!IS_MUTEX_WAITING(GET_LAST_WAITED_THREAD(running))) { + TAILQ_REMOVE(&head, GET_LAST_WAITED_THREAD(running), link); + TAILQ_INSERT_HEAD(&head, GET_LAST_WAITED_THREAD(running), link); + } do { thread_yield(); } while (!IS_FINISHED(entry)); @@ -264,6 +281,7 @@ void clear_context(void) while (!TAILQ_EMPTY(&head)) { last = TAILQ_FIRST(&head); TAILQ_REMOVE(&head, last, link); + free(last->mutex_fifo_entry); if (WAS_ALLOCATED(last)) { VALGRIND_STACK_DEREGISTER(last->valgrind_id); } @@ -275,12 +293,18 @@ void clear_context(void) } while (!TAILQ_EMPTY(&context_to_freed)) { last = TAILQ_FIRST(&context_to_freed); + free(last->mutex_fifo_entry); TAILQ_REMOVE(&context_to_freed, last, link); if (WAS_ALLOCATED(last)) { VALGRIND_STACK_DEREGISTER(last->valgrind_id); } free(last); } + + // Free all the fifo that might have been allocated + for (int i = 0 ; i < HASHMAP_SIZE ; ++i) + free(mutex_fifo_hashmap[i]); + VALGRIND_STACK_DEREGISTER(stack_valgrind_id); exit(0); } @@ -320,24 +344,68 @@ void __attribute__((destructor)) clear_last_thread() exit(0); } + int thread_mutex_init(thread_mutex_t* mutex) { - return pthread_mutex_init((pthread_mutex_t*)mutex, NULL); + long id = ((long)mutex) % HASHMAP_SIZE; + if (mutex_fifo_hashmap[id] == NULL) + { + mutex_fifo_hashmap[id] = malloc(sizeof(mutex_fifo)); + STAILQ_INIT(mutex_fifo_hashmap[id]); + } + + return mutex->dummy = 0; } int thread_mutex_destroy(thread_mutex_t* mutex) { - return pthread_mutex_destroy((pthread_mutex_t*)mutex); + long id = ((long)mutex) % HASHMAP_SIZE; + struct mutex_fifo_entry_t* last = NULL; + + while (!STAILQ_EMPTY(mutex_fifo_hashmap[id])) { + last = STAILQ_FIRST(mutex_fifo_hashmap[id]); + STAILQ_REMOVE_HEAD(mutex_fifo_hashmap[id], link); + free(last); + } + + return 0; } int thread_mutex_lock(thread_mutex_t* mutex) { - return pthread_mutex_lock((pthread_mutex_t*)mutex); + // Add to mutex fifo + long id = ((long)mutex) % HASHMAP_SIZE; + DBG("Lock mutex %d\n", id); + while (! __sync_bool_compare_and_swap(&mutex->dummy, 0, 1)) + { + DBG("Wait for mutex %d\n", id); + if (running->mutex_fifo_entry == NULL) + running->mutex_fifo_entry = malloc(sizeof(struct mutex_fifo_entry_t)); + + STAILQ_INSERT_TAIL(mutex_fifo_hashmap[id], running->mutex_fifo_entry, link); + // Use status to be in waiting state + running->status |= MUTEX_WAITING; + running->mutex_fifo_entry->thread = running; + thread_yield(); + } + + mutex->dummy = 1; + return 0; } int thread_mutex_unlock(thread_mutex_t* mutex) { - return pthread_mutex_unlock((pthread_mutex_t*)mutex); -} + long id = ((long)mutex) % HASHMAP_SIZE; + DBG("Unlock mutex %d\n", id); + if (!STAILQ_EMPTY(mutex_fifo_hashmap[id])) + { + struct mutex_fifo_entry_t* first = STAILQ_FIRST(mutex_fifo_hashmap[id]); + STAILQ_REMOVE_HEAD(mutex_fifo_hashmap[id], link); + first->thread->status &= ~MUTEX_WAITING; + TAILQ_INSERT_TAIL(&head, first->thread, link); + } + + return mutex->dummy = 0; +} #endif