diff --git a/Makefile b/Makefile index 2fbc4be..09c7f1e 100644 --- a/Makefile +++ b/Makefile @@ -16,8 +16,8 @@ bins+=31-switch-many bins+=32-switch-many-join bins+=33-switch-many-cascade bins+=51-fibonacci -#bins+=61-mutex -#bins+=62-mutex +bins+=61-mutex +bins+=62-mutex bins+=71-preemption bins+=81-deadlock diff --git a/src/thread/thread.c b/src/thread/thread.c index d4ead2f..87aa6c6 100644 --- a/src/thread/thread.c +++ b/src/thread/thread.c @@ -21,19 +21,25 @@ #define GET_LAST_WAITED_THREAD(entry) (entry->last_waited ? entry->last_waited->last_thread : NULL) #define IS_WAITED_THREAD_FINISHED(entry) (GET_WAITED_THREAD(entry)->status & FINISHED) #define WAITED 0x8 +#define MUTEX_WAITING 0xf0 #define IS_WAITED(entry) (entry->status & WAITED) +#define IS_MUTEX_WAITING(entry) (entry->status & MUTEX_WAITING) #ifndef STACK_SIZE #define STACK_SIZE 4096 #endif +#ifndef HASHMAP_SIZE +#define HASHMAP_SIZE 16384 +#endif + // Variables used to clean up everything at the end of the processus static char stack_for_freeing[STACK_SIZE] = {0}; static int stack_valgrind_id = 0; static ucontext_t context_for_freeing; struct last_thread_t; - +struct mutex_fifo_entry_t; struct context_entry { TAILQ_ENTRY(context_entry) @@ -43,6 +49,7 @@ struct context_entry { void *waiting; // the thread that the entry is waiting for void *retvalue; // return value or if the thread is waited, the id of the thread that wait for it struct last_thread_t *last_waited; + struct mutex_fifo_entry_t* mutex_fifo_entry; int valgrind_id; char status; char stack[STACK_SIZE]; @@ -60,6 +67,14 @@ static struct context_entry* running = NULL; static TAILQ_HEAD(freed_context_head, context_entry) context_to_freed = TAILQ_HEAD_INITIALIZER(context_to_freed); + +struct mutex_fifo_entry_t { + STAILQ_ENTRY(mutex_fifo_entry_t) link; + struct context_entry* thread; +}; +STAILQ_HEAD(mutex_fifo, mutex_fifo_entry_t) mutex_fifo; +static struct mutex_fifo* mutex_fifo_hashmap[HASHMAP_SIZE] = {}; + int thread_yield(void) { //TRACE("thread_yield"); @@ -79,7 +94,7 @@ int thread_yield(void) return -1; } TAILQ_REMOVE(&head, first, link); - if (!IS_FINISHED(running) && !(IS_WAITING(running) && !IS_WAITED_THREAD_FINISHED(running))) { + if (!IS_FINISHED(running) && !IS_MUTEX_WAITING(running) && !(IS_WAITING(running) && !IS_WAITED_THREAD_FINISHED(running))) { TAILQ_INSERT_TAIL(&head, running, link); } TRACE("PICKING %p (previous was %p)", first->id, running->id); @@ -144,6 +159,7 @@ int thread_create(thread_t* newthread, void* (*func)(void*), void* funcarg) new_entry->status = ALLOCATED; new_entry->retvalue = NULL; new_entry->last_waited = NULL; + new_entry->mutex_fifo_entry = NULL; *newthread = new_entry->id; @@ -272,6 +288,7 @@ void clear_context(void) while (!TAILQ_EMPTY(&head)) { last = TAILQ_FIRST(&head); TAILQ_REMOVE(&head, last, link); + free(last->mutex_fifo_entry); if (WAS_ALLOCATED(last)) { VALGRIND_STACK_DEREGISTER(last->valgrind_id); } @@ -283,12 +300,18 @@ void clear_context(void) } while (!TAILQ_EMPTY(&context_to_freed)) { last = TAILQ_FIRST(&context_to_freed); + free(last->mutex_fifo_entry); TAILQ_REMOVE(&context_to_freed, last, link); if (WAS_ALLOCATED(last)) { VALGRIND_STACK_DEREGISTER(last->valgrind_id); } free(last); } + + // Free all the fifo that might have been allocated + for (int i = 0 ; i < HASHMAP_SIZE ; ++i) + free(mutex_fifo_hashmap[i]); + VALGRIND_STACK_DEREGISTER(stack_valgrind_id); exit(0); } @@ -329,24 +352,68 @@ void __attribute__((destructor)) clear_last_thread() exit(0); } + int thread_mutex_init(thread_mutex_t* mutex) { - return pthread_mutex_init((pthread_mutex_t*)mutex, NULL); + long id = ((long)mutex) % HASHMAP_SIZE; + if (mutex_fifo_hashmap[id] == NULL) + { + mutex_fifo_hashmap[id] = malloc(sizeof(mutex_fifo)); + STAILQ_INIT(mutex_fifo_hashmap[id]); + } + + return mutex->dummy = 0; } int thread_mutex_destroy(thread_mutex_t* mutex) { - return pthread_mutex_destroy((pthread_mutex_t*)mutex); + long id = ((long)mutex) % HASHMAP_SIZE; + struct mutex_fifo_entry_t* last = NULL; + + while (!STAILQ_EMPTY(mutex_fifo_hashmap[id])) { + last = STAILQ_FIRST(mutex_fifo_hashmap[id]); + STAILQ_REMOVE_HEAD(mutex_fifo_hashmap[id], link); + free(last); + } + + return 0; } int thread_mutex_lock(thread_mutex_t* mutex) { - return pthread_mutex_lock((pthread_mutex_t*)mutex); + // Add to mutex fifo + long id = ((long)mutex) % HASHMAP_SIZE; + DBG("Lock mutex %d\n", id); + while (! __sync_bool_compare_and_swap(&mutex->dummy, 0, 1)) + { + DBG("Wait for mutex %d\n", id); + if (running->mutex_fifo_entry == NULL) + running->mutex_fifo_entry = malloc(sizeof(struct mutex_fifo_entry_t)); + + STAILQ_INSERT_TAIL(mutex_fifo_hashmap[id], running->mutex_fifo_entry, link); + // Use status to be in waiting state + running->status |= MUTEX_WAITING; + running->mutex_fifo_entry->thread = running; + thread_yield(); + } + + mutex->dummy = 1; + return 0; } int thread_mutex_unlock(thread_mutex_t* mutex) { - return pthread_mutex_unlock((pthread_mutex_t*)mutex); -} + long id = ((long)mutex) % HASHMAP_SIZE; + DBG("Unlock mutex %d\n", id); + if (!STAILQ_EMPTY(mutex_fifo_hashmap[id])) + { + struct mutex_fifo_entry_t* first = STAILQ_FIRST(mutex_fifo_hashmap[id]); + STAILQ_REMOVE_HEAD(mutex_fifo_hashmap[id], link); + first->thread->status &= ~MUTEX_WAITING; + TAILQ_INSERT_TAIL(&head, first->thread, link); + } + + return mutex->dummy = 0; +} #endif