2016-04-22 23:53:51 +01:00
|
|
|
/*
|
|
|
|
* This file is part of the MicroPython project, http://micropython.org/
|
|
|
|
*
|
|
|
|
* The MIT License (MIT)
|
|
|
|
*
|
|
|
|
* Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
2016-05-04 11:23:21 +01:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
2016-04-22 23:53:51 +01:00
|
|
|
#include <errno.h>
|
|
|
|
|
2016-10-07 04:05:15 +01:00
|
|
|
#include "py/runtime.h"
|
2016-04-22 23:53:51 +01:00
|
|
|
#include "py/mpthread.h"
|
2016-05-04 11:23:21 +01:00
|
|
|
#include "py/gc.h"
|
2016-04-22 23:53:51 +01:00
|
|
|
|
|
|
|
#if MICROPY_PY_THREAD
|
|
|
|
|
2019-02-05 21:13:36 +00:00
|
|
|
#include <fcntl.h>
|
2016-05-04 11:23:21 +01:00
|
|
|
#include <signal.h>
|
|
|
|
#include <sched.h>
|
2019-01-02 15:44:52 +00:00
|
|
|
#include <semaphore.h>
|
2016-05-04 11:23:21 +01:00
|
|
|
|
2021-07-09 05:19:15 +01:00
|
|
|
#include "shared/runtime/gchelper.h"
|
2020-04-23 07:12:55 +01:00
|
|
|
|
2020-02-18 18:24:23 +00:00
|
|
|
// Some platforms don't have SIGRTMIN but if we do have it, use it to avoid
|
|
|
|
// potential conflict with other uses of the more commonly used SIGUSR1.
|
|
|
|
#ifdef SIGRTMIN
|
2020-02-15 19:19:58 +00:00
|
|
|
#define MP_THREAD_GC_SIGNAL (SIGRTMIN + 5)
|
2020-02-18 18:24:23 +00:00
|
|
|
#else
|
|
|
|
#define MP_THREAD_GC_SIGNAL (SIGUSR1)
|
|
|
|
#endif
|
2020-02-15 19:19:58 +00:00
|
|
|
|
2020-03-28 11:39:01 +00:00
|
|
|
// This value seems to be about right for both 32-bit and 64-bit builds.
|
|
|
|
#define THREAD_STACK_OVERFLOW_MARGIN (8192)
|
|
|
|
|
2016-05-04 11:23:21 +01:00
|
|
|
// this structure forms a linked list, one node per active thread
|
|
|
|
typedef struct _thread_t {
|
|
|
|
pthread_t id; // system id of thread
|
|
|
|
int ready; // whether the thread is ready and running
|
|
|
|
void *arg; // thread Python args, a GC root pointer
|
|
|
|
struct _thread_t *next;
|
|
|
|
} thread_t;
|
|
|
|
|
2016-04-22 23:53:51 +01:00
|
|
|
STATIC pthread_key_t tls_key;
|
|
|
|
|
2020-04-03 04:07:34 +01:00
|
|
|
// The mutex is used for any code in this port that needs to be thread safe.
|
|
|
|
// Specifically for thread management, access to the linked list is one example.
|
|
|
|
// But also, e.g. scheduler state.
|
2020-06-17 06:10:55 +01:00
|
|
|
STATIC pthread_mutex_t thread_mutex;
|
2016-05-04 11:23:21 +01:00
|
|
|
STATIC thread_t *thread;
|
|
|
|
|
|
|
|
// this is used to synchronise the signal handler of the thread
|
|
|
|
// it's needed because we can't use any pthread calls in a signal handler
|
2019-02-05 21:13:36 +00:00
|
|
|
#if defined(__APPLE__)
|
|
|
|
STATIC char thread_signal_done_name[25];
|
|
|
|
STATIC sem_t *thread_signal_done_p;
|
|
|
|
#else
|
2019-01-02 15:44:52 +00:00
|
|
|
STATIC sem_t thread_signal_done;
|
2019-02-05 21:13:36 +00:00
|
|
|
#endif
|
2016-05-04 11:23:21 +01:00
|
|
|
|
2020-04-03 04:07:34 +01:00
|
|
|
void mp_thread_unix_begin_atomic_section(void) {
|
|
|
|
pthread_mutex_lock(&thread_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
void mp_thread_unix_end_atomic_section(void) {
|
|
|
|
pthread_mutex_unlock(&thread_mutex);
|
|
|
|
}
|
|
|
|
|
2016-05-04 11:23:21 +01:00
|
|
|
// this signal handler is used to scan the regs and stack of a thread
|
2016-05-05 11:31:24 +01:00
|
|
|
STATIC void mp_thread_gc(int signo, siginfo_t *info, void *context) {
|
2016-05-05 12:00:37 +01:00
|
|
|
(void)info; // unused
|
|
|
|
(void)context; // unused
|
2020-02-15 19:19:58 +00:00
|
|
|
if (signo == MP_THREAD_GC_SIGNAL) {
|
2020-04-23 07:12:55 +01:00
|
|
|
gc_helper_collect_regs_and_stack();
|
2016-05-05 11:31:24 +01:00
|
|
|
// We have access to the context (regs, stack) of the thread but it seems
|
|
|
|
// that we don't need the extra information, enough is captured by the
|
|
|
|
// gc_collect_regs_and_stack function above
|
|
|
|
// gc_collect_root((void**)context, sizeof(ucontext_t) / sizeof(uintptr_t));
|
2017-11-26 12:40:30 +00:00
|
|
|
#if MICROPY_ENABLE_PYSTACK
|
|
|
|
void **ptrs = (void **)(void *)MP_STATE_THREAD(pystack_start);
|
|
|
|
gc_collect_root(ptrs, (MP_STATE_THREAD(pystack_cur) - MP_STATE_THREAD(pystack_start)) / sizeof(void *));
|
|
|
|
#endif
|
2019-02-05 21:13:36 +00:00
|
|
|
#if defined(__APPLE__)
|
|
|
|
sem_post(thread_signal_done_p);
|
|
|
|
#else
|
2019-01-02 15:44:52 +00:00
|
|
|
sem_post(&thread_signal_done);
|
2019-02-05 21:13:36 +00:00
|
|
|
#endif
|
2016-05-04 11:23:21 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-22 23:53:51 +01:00
|
|
|
void mp_thread_init(void) {
|
|
|
|
pthread_key_create(&tls_key, NULL);
|
|
|
|
pthread_setspecific(tls_key, &mp_state_ctx.thread);
|
2016-05-04 11:23:21 +01:00
|
|
|
|
2020-06-17 06:10:55 +01:00
|
|
|
// Needs to be a recursive mutex to emulate the behavior of
|
|
|
|
// BEGIN_ATOMIC_SECTION on bare metal.
|
|
|
|
pthread_mutexattr_t thread_mutex_attr;
|
|
|
|
pthread_mutexattr_init(&thread_mutex_attr);
|
|
|
|
pthread_mutexattr_settype(&thread_mutex_attr, PTHREAD_MUTEX_RECURSIVE);
|
|
|
|
pthread_mutex_init(&thread_mutex, &thread_mutex_attr);
|
|
|
|
|
2016-05-04 11:23:21 +01:00
|
|
|
// create first entry in linked list of all threads
|
|
|
|
thread = malloc(sizeof(thread_t));
|
|
|
|
thread->id = pthread_self();
|
|
|
|
thread->ready = 1;
|
|
|
|
thread->arg = NULL;
|
|
|
|
thread->next = NULL;
|
2019-02-05 21:13:36 +00:00
|
|
|
|
|
|
|
#if defined(__APPLE__)
|
2021-06-06 20:49:40 +01:00
|
|
|
snprintf(thread_signal_done_name, sizeof(thread_signal_done_name), "micropython_sem_%ld", (long)thread->id);
|
2019-02-05 21:13:36 +00:00
|
|
|
thread_signal_done_p = sem_open(thread_signal_done_name, O_CREAT | O_EXCL, 0666, 0);
|
|
|
|
#else
|
2019-01-02 15:44:52 +00:00
|
|
|
sem_init(&thread_signal_done, 0, 0);
|
2019-02-05 21:13:36 +00:00
|
|
|
#endif
|
2016-05-04 11:23:21 +01:00
|
|
|
|
|
|
|
// enable signal handler for garbage collection
|
|
|
|
struct sigaction sa;
|
2016-05-05 11:31:24 +01:00
|
|
|
sa.sa_flags = SA_SIGINFO;
|
|
|
|
sa.sa_sigaction = mp_thread_gc;
|
2016-05-04 11:23:21 +01:00
|
|
|
sigemptyset(&sa.sa_mask);
|
2020-02-15 19:19:58 +00:00
|
|
|
sigaction(MP_THREAD_GC_SIGNAL, &sa, NULL);
|
2016-05-04 11:23:21 +01:00
|
|
|
}
|
|
|
|
|
2019-01-02 15:05:17 +00:00
|
|
|
void mp_thread_deinit(void) {
|
2020-04-03 04:07:34 +01:00
|
|
|
mp_thread_unix_begin_atomic_section();
|
2019-01-02 15:05:17 +00:00
|
|
|
while (thread->next != NULL) {
|
|
|
|
thread_t *th = thread;
|
|
|
|
thread = thread->next;
|
|
|
|
pthread_cancel(th->id);
|
|
|
|
free(th);
|
|
|
|
}
|
2020-04-03 04:07:34 +01:00
|
|
|
mp_thread_unix_end_atomic_section();
|
2019-02-05 21:13:36 +00:00
|
|
|
#if defined(__APPLE__)
|
|
|
|
sem_close(thread_signal_done_p);
|
|
|
|
sem_unlink(thread_signal_done_name);
|
|
|
|
#endif
|
2019-01-02 15:05:17 +00:00
|
|
|
assert(thread->id == pthread_self());
|
|
|
|
free(thread);
|
|
|
|
}
|
|
|
|
|
2016-05-04 11:23:21 +01:00
|
|
|
// This function scans all pointers that are external to the current thread.
|
|
|
|
// It does this by signalling all other threads and getting them to scan their
|
|
|
|
// own registers and stack. Note that there may still be some edge cases left
|
|
|
|
// with race conditions and root-pointer scanning: a given thread may manipulate
|
|
|
|
// the global root pointers (in mp_state_ctx) while another thread is doing a
|
|
|
|
// garbage collection and tracing these pointers.
|
|
|
|
void mp_thread_gc_others(void) {
|
2020-04-03 04:07:34 +01:00
|
|
|
mp_thread_unix_begin_atomic_section();
|
2016-05-04 11:23:21 +01:00
|
|
|
for (thread_t *th = thread; th != NULL; th = th->next) {
|
|
|
|
gc_collect_root(&th->arg, 1);
|
|
|
|
if (th->id == pthread_self()) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!th->ready) {
|
|
|
|
continue;
|
|
|
|
}
|
2020-02-15 19:19:58 +00:00
|
|
|
pthread_kill(th->id, MP_THREAD_GC_SIGNAL);
|
2019-02-05 21:13:36 +00:00
|
|
|
#if defined(__APPLE__)
|
|
|
|
sem_wait(thread_signal_done_p);
|
|
|
|
#else
|
2019-01-02 15:44:52 +00:00
|
|
|
sem_wait(&thread_signal_done);
|
2019-02-05 21:13:36 +00:00
|
|
|
#endif
|
2016-05-04 11:23:21 +01:00
|
|
|
}
|
2020-04-03 04:07:34 +01:00
|
|
|
mp_thread_unix_end_atomic_section();
|
2016-04-22 23:53:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
mp_state_thread_t *mp_thread_get_state(void) {
|
|
|
|
return (mp_state_thread_t *)pthread_getspecific(tls_key);
|
|
|
|
}
|
|
|
|
|
2020-01-24 17:37:53 +00:00
|
|
|
void mp_thread_set_state(mp_state_thread_t *state) {
|
2016-04-22 23:53:51 +01:00
|
|
|
pthread_setspecific(tls_key, state);
|
|
|
|
}
|
|
|
|
|
2016-05-04 11:23:21 +01:00
|
|
|
void mp_thread_start(void) {
|
2019-01-02 15:05:17 +00:00
|
|
|
pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
|
2020-04-03 04:07:34 +01:00
|
|
|
mp_thread_unix_begin_atomic_section();
|
2016-05-04 11:23:21 +01:00
|
|
|
for (thread_t *th = thread; th != NULL; th = th->next) {
|
|
|
|
if (th->id == pthread_self()) {
|
|
|
|
th->ready = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2020-04-03 04:07:34 +01:00
|
|
|
mp_thread_unix_end_atomic_section();
|
2016-05-04 11:23:21 +01:00
|
|
|
}
|
|
|
|
|
2016-05-30 16:56:51 +01:00
|
|
|
void mp_thread_create(void *(*entry)(void *), void *arg, size_t *stack_size) {
|
2016-07-11 15:59:47 +01:00
|
|
|
// default stack size is 8k machine-words
|
2016-05-30 16:56:51 +01:00
|
|
|
if (*stack_size == 0) {
|
2021-02-04 05:39:09 +00:00
|
|
|
*stack_size = 8192 * sizeof(void *);
|
2016-07-11 15:59:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// minimum stack size is set by pthreads
|
|
|
|
if (*stack_size < PTHREAD_STACK_MIN) {
|
|
|
|
*stack_size = PTHREAD_STACK_MIN;
|
2016-04-25 10:02:47 +01:00
|
|
|
}
|
|
|
|
|
2020-03-28 11:39:01 +00:00
|
|
|
// ensure there is enough stack to include a stack-overflow margin
|
|
|
|
if (*stack_size < 2 * THREAD_STACK_OVERFLOW_MARGIN) {
|
|
|
|
*stack_size = 2 * THREAD_STACK_OVERFLOW_MARGIN;
|
|
|
|
}
|
|
|
|
|
2016-04-25 10:02:47 +01:00
|
|
|
// set thread attributes
|
|
|
|
pthread_attr_t attr;
|
|
|
|
int ret = pthread_attr_init(&attr);
|
|
|
|
if (ret != 0) {
|
|
|
|
goto er;
|
|
|
|
}
|
2016-05-30 16:56:51 +01:00
|
|
|
ret = pthread_attr_setstacksize(&attr, *stack_size);
|
2016-04-25 10:02:47 +01:00
|
|
|
if (ret != 0) {
|
|
|
|
goto er;
|
|
|
|
}
|
|
|
|
|
2019-01-02 15:31:36 +00:00
|
|
|
ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
|
|
|
|
if (ret != 0) {
|
|
|
|
goto er;
|
|
|
|
}
|
|
|
|
|
2020-04-03 04:07:34 +01:00
|
|
|
mp_thread_unix_begin_atomic_section();
|
2016-05-04 11:23:21 +01:00
|
|
|
|
2016-04-25 10:02:47 +01:00
|
|
|
// create thread
|
2016-04-22 23:53:51 +01:00
|
|
|
pthread_t id;
|
2016-04-25 10:02:47 +01:00
|
|
|
ret = pthread_create(&id, &attr, entry, arg);
|
|
|
|
if (ret != 0) {
|
2020-04-03 04:07:34 +01:00
|
|
|
mp_thread_unix_end_atomic_section();
|
2016-04-25 10:02:47 +01:00
|
|
|
goto er;
|
|
|
|
}
|
|
|
|
|
2016-05-30 16:56:51 +01:00
|
|
|
// adjust stack_size to provide room to recover from hitting the limit
|
2020-03-28 11:39:01 +00:00
|
|
|
*stack_size -= THREAD_STACK_OVERFLOW_MARGIN;
|
2016-05-30 16:56:51 +01:00
|
|
|
|
2016-05-04 11:23:21 +01:00
|
|
|
// add thread to linked list of all threads
|
|
|
|
thread_t *th = malloc(sizeof(thread_t));
|
|
|
|
th->id = id;
|
|
|
|
th->ready = 0;
|
|
|
|
th->arg = arg;
|
|
|
|
th->next = thread;
|
|
|
|
thread = th;
|
|
|
|
|
2020-04-03 04:07:34 +01:00
|
|
|
mp_thread_unix_end_atomic_section();
|
2016-05-04 11:23:21 +01:00
|
|
|
|
2016-04-25 10:02:47 +01:00
|
|
|
return;
|
|
|
|
|
|
|
|
er:
|
2016-10-07 04:05:15 +01:00
|
|
|
mp_raise_OSError(ret);
|
2016-04-22 23:53:51 +01:00
|
|
|
}
|
|
|
|
|
2016-05-04 11:23:21 +01:00
|
|
|
void mp_thread_finish(void) {
|
2020-04-03 04:07:34 +01:00
|
|
|
mp_thread_unix_begin_atomic_section();
|
2019-01-02 15:31:36 +00:00
|
|
|
thread_t *prev = NULL;
|
2016-05-04 11:23:21 +01:00
|
|
|
for (thread_t *th = thread; th != NULL; th = th->next) {
|
|
|
|
if (th->id == pthread_self()) {
|
2019-01-02 15:31:36 +00:00
|
|
|
if (prev == NULL) {
|
|
|
|
thread = th->next;
|
|
|
|
} else {
|
|
|
|
prev->next = th->next;
|
|
|
|
}
|
|
|
|
free(th);
|
2016-05-04 11:23:21 +01:00
|
|
|
break;
|
|
|
|
}
|
2019-01-02 15:31:36 +00:00
|
|
|
prev = th;
|
2016-05-04 11:23:21 +01:00
|
|
|
}
|
2020-04-03 04:07:34 +01:00
|
|
|
mp_thread_unix_end_atomic_section();
|
2016-05-04 11:23:21 +01:00
|
|
|
}
|
|
|
|
|
2016-04-25 12:21:48 +01:00
|
|
|
void mp_thread_mutex_init(mp_thread_mutex_t *mutex) {
|
|
|
|
pthread_mutex_init(mutex, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
int mp_thread_mutex_lock(mp_thread_mutex_t *mutex, int wait) {
|
|
|
|
int ret;
|
|
|
|
if (wait) {
|
|
|
|
ret = pthread_mutex_lock(mutex);
|
|
|
|
if (ret == 0) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ret = pthread_mutex_trylock(mutex);
|
|
|
|
if (ret == 0) {
|
|
|
|
return 1;
|
|
|
|
} else if (ret == EBUSY) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return -ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void mp_thread_mutex_unlock(mp_thread_mutex_t *mutex) {
|
|
|
|
pthread_mutex_unlock(mutex);
|
|
|
|
// TODO check return value
|
|
|
|
}
|
|
|
|
|
2016-04-22 23:53:51 +01:00
|
|
|
#endif // MICROPY_PY_THREAD
|