feat: use ufd_t instead of lastthread
This commit is contained in:
parent
da2b87ad05
commit
2df4b8b662
4
Makefile
4
Makefile
@ -97,10 +97,10 @@ ${check_targets}: check_%: ${build_dir}/${tst_dir}/%
|
|||||||
${bins_target}: ${build_dir}/%: ${objs} ${build_dir}/%.o
|
${bins_target}: ${build_dir}/%: ${objs} ${build_dir}/%.o
|
||||||
${CC} -o $@ $^ ${CFLAGS} ./lib/libmimalloc ${LDFLAGS}
|
${CC} -o $@ $^ ${CFLAGS} ./lib/libmimalloc ${LDFLAGS}
|
||||||
|
|
||||||
${build_dir}/${tst_dir}/51-fibonacci: ${build_dir}/src/thread/thread_fibo.o ${build_dir}/${tst_dir}/51-fibonacci.o
|
${build_dir}/${tst_dir}/51-fibonacci: ${build_dir}/src/thread/thread_fibo.o ${build_dir}/${tst_dir}/51-fibonacci.o ${build_dir}/src/utils/ufd.o
|
||||||
${CC} -o $@ $^ ${CFLAGS} ./lib/libmimalloc ${LDFLAGS}
|
${CC} -o $@ $^ ${CFLAGS} ./lib/libmimalloc ${LDFLAGS}
|
||||||
|
|
||||||
${build_dir}/${tst_dir}/71-preemption: ${build_dir}/src/thread/thread_preempt.o ${build_dir}/${tst_dir}/71-preemption.o
|
${build_dir}/${tst_dir}/71-preemption: ${build_dir}/src/thread/thread_preempt.o ${build_dir}/${tst_dir}/71-preemption.o ${build_dir}/src/utils/ufd.o
|
||||||
${CC} -o $@ $^ ${CFLAGS} ./lib/libmimalloc ${LDFLAGS}
|
${CC} -o $@ $^ ${CFLAGS} ./lib/libmimalloc ${LDFLAGS}
|
||||||
|
|
||||||
${build_dir}/libthread.so: ${objs}
|
${build_dir}/libthread.so: ${objs}
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
#include <sys/queue.h>
|
#include <sys/queue.h>
|
||||||
#include <ucontext.h>
|
#include <ucontext.h>
|
||||||
#include <valgrind/valgrind.h>
|
#include <valgrind/valgrind.h>
|
||||||
|
#include "ufd.h"
|
||||||
#include <errno.h>
|
#include <errno.h>
|
||||||
|
|
||||||
#define HAS_STATUS(entry, value) ((entry->status) & (value))
|
#define HAS_STATUS(entry, value) ((entry->status) & (value))
|
||||||
@ -18,7 +19,6 @@
|
|||||||
#define WAS_ALLOCATED(entry) (HAS_STATUS(entry, ALLOCATED))
|
#define WAS_ALLOCATED(entry) (HAS_STATUS(entry, ALLOCATED))
|
||||||
#define WAITING (1 << 2)
|
#define WAITING (1 << 2)
|
||||||
#define IS_WAITING(entry) (HAS_STATUS(entry, WAITING))
|
#define IS_WAITING(entry) (HAS_STATUS(entry, WAITING))
|
||||||
#define GET_LAST_WAITED_THREAD(entry) (entry->last_waited ? entry->last_waited->last_thread : NULL)
|
|
||||||
#define WAITED (1 << 3)
|
#define WAITED (1 << 3)
|
||||||
#define MUTEX_WAITING (1 << 4)
|
#define MUTEX_WAITING (1 << 4)
|
||||||
#define MUTEX_LOCKING (1 << 5)
|
#define MUTEX_LOCKING (1 << 5)
|
||||||
@ -42,7 +42,6 @@ static char stack_for_freeing[STACK_SIZE] = {0};
|
|||||||
static int stack_valgrind_id = 0;
|
static int stack_valgrind_id = 0;
|
||||||
static ucontext_t context_for_freeing;
|
static ucontext_t context_for_freeing;
|
||||||
|
|
||||||
struct last_thread_t;
|
|
||||||
struct mutex_fifo_entry_t;
|
struct mutex_fifo_entry_t;
|
||||||
|
|
||||||
struct context_entry_t {
|
struct context_entry_t {
|
||||||
@ -50,7 +49,7 @@ struct context_entry_t {
|
|||||||
link; // Use to navigate inside the list
|
link; // Use to navigate inside the list
|
||||||
ucontext_t context;
|
ucontext_t context;
|
||||||
void *retvalue; // return value or if the thread is waited, the id of the thread that wait for it
|
void *retvalue; // return value or if the thread is waited, the id of the thread that wait for it
|
||||||
struct last_thread_t *last_waited;
|
struct ufd_t waited_threads;
|
||||||
struct mutex_fifo_entry_t mutex_fifo_entry;
|
struct mutex_fifo_entry_t mutex_fifo_entry;
|
||||||
int valgrind_id;
|
int valgrind_id;
|
||||||
int status;
|
int status;
|
||||||
@ -68,18 +67,6 @@ static struct scheduler_fifo_t scheduler_fifo = TAILQ_HEAD_INITIALIZER(scheduler
|
|||||||
TAILQ_HEAD(ctx_to_free_fifo_t, context_entry_t);
|
TAILQ_HEAD(ctx_to_free_fifo_t, context_entry_t);
|
||||||
static struct ctx_to_free_fifo_t context_to_freed = TAILQ_HEAD_INITIALIZER(context_to_freed);
|
static struct ctx_to_free_fifo_t context_to_freed = TAILQ_HEAD_INITIALIZER(context_to_freed);
|
||||||
|
|
||||||
// Last thread_t types and fifo
|
|
||||||
// Used to optimize thread_join
|
|
||||||
struct last_thread_t {
|
|
||||||
STAILQ_ENTRY(last_thread_t)
|
|
||||||
link;
|
|
||||||
struct context_entry_t * last_thread;
|
|
||||||
int ref; // number of reference to this struct (for free)
|
|
||||||
};
|
|
||||||
STAILQ_HEAD(last_thread_fifo_t, last_thread_t);
|
|
||||||
struct last_thread_fifo_t last_thread_freed = STAILQ_HEAD_INITIALIZER(last_thread_freed);
|
|
||||||
|
|
||||||
|
|
||||||
int thread_yield(void)
|
int thread_yield(void)
|
||||||
{
|
{
|
||||||
//TRACE("thread_yield");
|
//TRACE("thread_yield");
|
||||||
@ -166,7 +153,7 @@ int thread_create(thread_t* newthread, void* (*func)(void*), void* funcarg)
|
|||||||
TRACE("ALLOCATED %p", new_entry);
|
TRACE("ALLOCATED %p", new_entry);
|
||||||
new_entry->status = ALLOCATED;
|
new_entry->status = ALLOCATED;
|
||||||
new_entry->retvalue = NULL;
|
new_entry->retvalue = NULL;
|
||||||
new_entry->last_waited = NULL;
|
ufd__init(&new_entry->waited_threads, new_entry);
|
||||||
new_entry->mutex_prio = MUTEX_MAXPRIO;
|
new_entry->mutex_prio = MUTEX_MAXPRIO;
|
||||||
|
|
||||||
*newthread = new_entry;
|
*newthread = new_entry;
|
||||||
@ -191,11 +178,11 @@ int thread_join(thread_t thread, void** retval)
|
|||||||
TRACE("Join thread %p", thread);
|
TRACE("Join thread %p", thread);
|
||||||
struct context_entry_t* entry = thread;
|
struct context_entry_t* entry = thread;
|
||||||
// Check if the target is not already waited by another
|
// Check if the target is not already waited by another
|
||||||
if (IS_WAITED(entry)) {
|
if (IS_WAITED(entry))
|
||||||
return -1;
|
return -1;
|
||||||
}
|
|
||||||
|
|
||||||
if(GET_LAST_WAITED_THREAD(entry) == running) {
|
struct context_entry_t* entry_last_waited = ufd__find(&entry->waited_threads)->thread;
|
||||||
|
if (entry_last_waited == running) {
|
||||||
TRACE("Deadlock detected");
|
TRACE("Deadlock detected");
|
||||||
return EDEADLK;
|
return EDEADLK;
|
||||||
}
|
}
|
||||||
@ -203,70 +190,34 @@ int thread_join(thread_t thread, void** retval)
|
|||||||
if (!IS_FINISHED(entry)) {
|
if (!IS_FINISHED(entry)) {
|
||||||
// Use status to be in waiting state
|
// Use status to be in waiting state
|
||||||
SET_STATUS(running, WAITING);
|
SET_STATUS(running, WAITING);
|
||||||
|
|
||||||
// Mark the waited thread as waited to not be waited by any other thread.
|
// Mark the waited thread as waited to not be waited by any other thread.
|
||||||
SET_STATUS(entry, WAITED);
|
SET_STATUS(entry, WAITED);
|
||||||
|
|
||||||
// Use retvalue to share which thread is currently waiting for this thread
|
// Use retvalue to share which thread is currently waiting for this thread
|
||||||
entry->retvalue = running;
|
entry->retvalue = running;
|
||||||
|
|
||||||
/** Deadlock **/
|
ufd__join(&running->waited_threads, &entry->waited_threads);
|
||||||
// if the running thread is solo (not waited by anyone)
|
|
||||||
if(!IS_WAITED(running)) {
|
|
||||||
// if the thread that we want to join is already in a "group" of waiting threads
|
|
||||||
if (IS_WAITING(entry)) {
|
|
||||||
// give the last thread waited to the running thread
|
|
||||||
running->last_waited = entry->last_waited;
|
|
||||||
}
|
|
||||||
else { // the thread we want to join is solo
|
|
||||||
if (STAILQ_EMPTY(&last_thread_freed)) {
|
|
||||||
running->last_waited = malloc(sizeof(struct last_thread_t));
|
|
||||||
} else {
|
|
||||||
running->last_waited = STAILQ_FIRST(&last_thread_freed);
|
|
||||||
STAILQ_REMOVE_HEAD(&last_thread_freed, link);
|
|
||||||
}
|
|
||||||
running->last_waited->ref = 0 ;
|
|
||||||
entry->last_waited = running->last_waited;
|
|
||||||
running->last_waited->last_thread = entry;
|
|
||||||
}
|
|
||||||
running->last_waited->ref++;
|
|
||||||
|
|
||||||
} else { // the running thread is already part of a groupe of waiting threads
|
|
||||||
if (IS_WAITING(entry)) { // the thread we want to join is part of a groupe of waiting threads
|
|
||||||
// release the last_waited of this entry
|
|
||||||
running->last_waited->last_thread = GET_LAST_WAITED_THREAD(entry);
|
|
||||||
}
|
|
||||||
else { // the thread we want to join is solo and has no last_waited allocated
|
|
||||||
running->last_waited->last_thread = entry;
|
|
||||||
entry->last_waited = running->last_waited;
|
|
||||||
entry->last_waited->ref ++;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
|
struct context_entry_t* running_last_waited = ufd__find(&running->waited_threads)->thread;
|
||||||
|
|
||||||
DBG("%p is waiting for %p", running, entry);
|
DBG("%p is waiting for %p", running, entry);
|
||||||
DBG("MUTEX WAITING %d", IS_MUTEX_WAITING(GET_LAST_WAITED_THREAD(running)));
|
DBG("MUTEX WAITING %d %p", IS_MUTEX_WAITING(running_last_waited));
|
||||||
#ifdef FIBO_STRAT
|
#ifdef FIBO_STRAT
|
||||||
#else
|
#else
|
||||||
if (!IS_MUTEX_WAITING(GET_LAST_WAITED_THREAD(running))) {
|
if (!IS_MUTEX_WAITING(running_last_waited)) {
|
||||||
TAILQ_REMOVE(&scheduler_fifo, GET_LAST_WAITED_THREAD(running), link);
|
TAILQ_REMOVE(&scheduler_fifo, running_last_waited, link);
|
||||||
TAILQ_INSERT_HEAD(&scheduler_fifo, GET_LAST_WAITED_THREAD(running), link);
|
TAILQ_INSERT_HEAD(&scheduler_fifo, running_last_waited, link);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
do {
|
do {
|
||||||
thread_yield();
|
thread_yield();
|
||||||
} while (!IS_FINISHED(entry));
|
} while (!IS_FINISHED(entry));
|
||||||
|
|
||||||
|
ufd__delete(&entry->waited_threads);
|
||||||
|
|
||||||
// Exit from waiting state
|
// Exit from waiting state
|
||||||
UNSET_STATUS(running, WAITING);
|
UNSET_STATUS(running, WAITING);
|
||||||
|
|
||||||
if (running->last_waited) {
|
|
||||||
// Release the last waited thread if no one use it anymore
|
|
||||||
DBG("Last waited ref : %d", running->last_waited->ref);
|
|
||||||
if (--running->last_waited->ref == 0) {
|
|
||||||
STAILQ_INSERT_TAIL(&last_thread_freed, running->last_waited, link);
|
|
||||||
// free(running->last_waited);
|
|
||||||
}
|
|
||||||
running->last_waited = NULL;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save returned value if needed
|
// Save returned value if needed
|
||||||
@ -335,13 +286,6 @@ void clear_context(void)
|
|||||||
free(last);
|
free(last);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct last_thread_t* last_thread;
|
|
||||||
while (!STAILQ_EMPTY(&last_thread_freed)) {
|
|
||||||
last_thread = STAILQ_FIRST(&last_thread_freed);
|
|
||||||
STAILQ_REMOVE_HEAD(&last_thread_freed, link);
|
|
||||||
free(last_thread);
|
|
||||||
}
|
|
||||||
|
|
||||||
VALGRIND_STACK_DEREGISTER(stack_valgrind_id);
|
VALGRIND_STACK_DEREGISTER(stack_valgrind_id);
|
||||||
exit(0);
|
exit(0);
|
||||||
}
|
}
|
||||||
@ -365,7 +309,7 @@ void __attribute__((constructor)) setup_main_thread()
|
|||||||
new_entry->valgrind_id = VALGRIND_STACK_REGISTER(
|
new_entry->valgrind_id = VALGRIND_STACK_REGISTER(
|
||||||
new_entry->context.uc_stack.ss_sp,
|
new_entry->context.uc_stack.ss_sp,
|
||||||
new_entry->context.uc_stack.ss_sp + new_entry->context.uc_stack.ss_size);
|
new_entry->context.uc_stack.ss_sp + new_entry->context.uc_stack.ss_size);
|
||||||
new_entry->last_waited = NULL;
|
ufd__init(&new_entry->waited_threads, new_entry);
|
||||||
new_entry->retvalue = NULL;
|
new_entry->retvalue = NULL;
|
||||||
new_entry->status = 0;
|
new_entry->status = 0;
|
||||||
TAILQ_INSERT_TAIL(&context_to_freed, new_entry, link);
|
TAILQ_INSERT_TAIL(&context_to_freed, new_entry, link);
|
||||||
@ -377,7 +321,7 @@ void __attribute__((constructor)) setup_main_thread()
|
|||||||
main->status = 0;
|
main->status = 0;
|
||||||
main->valgrind_id = 0;
|
main->valgrind_id = 0;
|
||||||
main->retvalue = NULL;
|
main->retvalue = NULL;
|
||||||
main->last_waited = NULL;
|
ufd__init(&main->waited_threads, main);
|
||||||
running = main;
|
running = main;
|
||||||
|
|
||||||
// Create a context with static stack to clean everything at the end.
|
// Create a context with static stack to clean everything at the end.
|
||||||
|
Loading…
x
Reference in New Issue
Block a user